applied-ai-018 commited on
Commit
07bac1a
·
verified ·
1 Parent(s): 472de0d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step80/zero/11.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step80/zero/11.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step80/zero/18.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  4. ckpts/universal/global_step80/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  5. ckpts/universal/global_step80/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  6. ckpts/universal/global_step80/zero/18.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  7. ckpts/universal/global_step80/zero/24.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  8. ckpts/universal/global_step80/zero/5.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  9. ckpts/universal/global_step80/zero/5.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  10. ckpts/universal/global_step80/zero/5.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  11. ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  12. ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  13. ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  14. ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  15. ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  16. venv/lib/python3.10/site-packages/markupsafe/__pycache__/__init__.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/markupsafe/__pycache__/_native.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/nvidia/__init__.py +0 -0
  19. venv/lib/python3.10/site-packages/nvidia/__pycache__/__init__.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/nvidia/cublas/__init__.py +0 -0
  21. venv/lib/python3.10/site-packages/nvidia/cublas/__pycache__/__init__.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/nvidia/cublas/include/__init__.py +0 -0
  23. venv/lib/python3.10/site-packages/nvidia/cublas/include/__pycache__/__init__.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/nvidia/cublas/include/cublas.h +891 -0
  25. venv/lib/python3.10/site-packages/nvidia/cublas/include/cublasLt.h +1815 -0
  26. venv/lib/python3.10/site-packages/nvidia/cublas/include/cublasXt.h +693 -0
  27. venv/lib/python3.10/site-packages/nvidia/cublas/include/cublas_api.h +0 -0
  28. venv/lib/python3.10/site-packages/nvidia/cublas/include/cublas_v2.h +478 -0
  29. venv/lib/python3.10/site-packages/nvidia/cublas/include/nvblas.h +824 -0
  30. venv/lib/python3.10/site-packages/nvidia/cublas/lib/__init__.py +0 -0
  31. venv/lib/python3.10/site-packages/nvidia/cublas/lib/__pycache__/__init__.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/nvidia/cublas/lib/libnvblas.so.12 +0 -0
  33. venv/lib/python3.10/site-packages/nvidia/cuda_cupti/__init__.py +0 -0
  34. venv/lib/python3.10/site-packages/nvidia/cuda_cupti/__pycache__/__init__.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/Openacc/cupti_openacc.h +98 -0
  36. venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/Openmp/cupti_openmp.h +100 -0
  37. venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/Openmp/omp-tools.h +1083 -0
  38. venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/__init__.py +0 -0
  39. venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/__pycache__/__init__.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cuda_stdint.h +112 -0
  41. venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti.h +123 -0
  42. venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_activity.h +0 -0
  43. venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_callbacks.h +762 -0
  44. venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_checkpoint.h +127 -0
  45. venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_driver_cbid.h +725 -0
  46. venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_events.h +1371 -0
  47. venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_metrics.h +825 -0
  48. venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_nvtx_cbid.h +111 -0
  49. venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_pcsampling.h +950 -0
  50. venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_pcsampling_util.h +419 -0
ckpts/universal/global_step80/zero/11.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61563ba8e0b5ecc0c60abf6de7fca13625ccdfb06c023bf6cc82be18813e84ea
3
+ size 33555627
ckpts/universal/global_step80/zero/11.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c01a8ce004c1f19494c18155c0095b6724158c388db1c1b0c9e603aef4cab84b
3
+ size 33555533
ckpts/universal/global_step80/zero/18.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb70643ff3c14b921689a4af1854a26de591d09389f6f038fc225599c7b0bd73
3
+ size 50332843
ckpts/universal/global_step80/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96168e5fdaa384db5d1d07fc220ffd2cb3e1e75825aaff40c7f93e36e3fb1c4e
3
+ size 33555612
ckpts/universal/global_step80/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e700f0de5aaf5dccf3849fe7dc919146f726ca849542d38d0a3178d31ae5be7
3
+ size 33555627
ckpts/universal/global_step80/zero/18.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:325a36d1f2f17b68ec2848a8c22aaa5dff21c97edad5b59cb3213a60632e0f1c
3
+ size 33555533
ckpts/universal/global_step80/zero/24.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a11f8382a8ddde2efa7fc46bff2043dbee9b4a7a3c3a03a8076529faf38a7044
3
+ size 33555533
ckpts/universal/global_step80/zero/5.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2152e1d28c10c8f684386e3e8e6044c4ce9b629aa52385e10444d41a5743f8a0
3
+ size 33555612
ckpts/universal/global_step80/zero/5.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2dcff6198ec0feb56dfb12336c56642d4958f81060f956588001452439dcfb56
3
+ size 33555627
ckpts/universal/global_step80/zero/5.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2c77b2af1d15cd7a10c3d5a60652075756ef8bae8f49bcf8f29ad94de51797f
3
+ size 33555533
ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e48a024c817f04d24f3b92e5554d684e1f9b9c14ef9da38bbd941d83f330bdc
3
+ size 33555612
ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fef3a709d477f61ff776b305a84314b582a7b1e74d5f63e96a0065fde574ba5
3
+ size 33555627
ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f95c7614eeede8576548db2a6a9df5d3f770c027c118369c635a83493f28ee4
3
+ size 33555533
ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca41078f19aca92cf4487922d8c477a3e4224e860143a72ca8c4407c7333db2e
3
+ size 33555627
ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6a7611ba7695d36ade228f5458db2ab1be0060784a72d85d24b0f99f7418ae1
3
+ size 33555533
venv/lib/python3.10/site-packages/markupsafe/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (11.3 kB). View file
 
venv/lib/python3.10/site-packages/markupsafe/__pycache__/_native.cpython-310.pyc ADDED
Binary file (2 kB). View file
 
venv/lib/python3.10/site-packages/nvidia/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/nvidia/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (174 Bytes). View file
 
venv/lib/python3.10/site-packages/nvidia/cublas/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/nvidia/cublas/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (181 Bytes). View file
 
venv/lib/python3.10/site-packages/nvidia/cublas/include/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/nvidia/cublas/include/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (189 Bytes). View file
 
venv/lib/python3.10/site-packages/nvidia/cublas/include/cublas.h ADDED
@@ -0,0 +1,891 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * This is the public header file for the CUBLAS library, defining the API
52
+ *
53
+ * CUBLAS is an implementation of BLAS (Basic Linear Algebra Subroutines)
54
+ * on top of the CUDA runtime.
55
+ */
56
+
57
+ #if !defined(CUBLAS_H_)
58
+ #define CUBLAS_H_
59
+
60
+ #if defined(CUBLAS_V2_H_)
61
+ #error "It is an error to include both cublas.h and cublas_v2.h"
62
+ #endif
63
+
64
+ #include <cuda_runtime.h>
65
+
66
+ #ifndef CUBLASWINAPI
67
+ #ifdef _WIN32
68
+ #define CUBLASWINAPI __stdcall
69
+ #else
70
+ #define CUBLASWINAPI
71
+ #endif
72
+ #endif
73
+
74
+ #undef CUBLASAPI
75
+ #ifdef __CUDACC__
76
+ #define CUBLASAPI __host__
77
+ #else
78
+ #define CUBLASAPI
79
+ #endif
80
+
81
+ #include "cublas_api.h"
82
+
83
+ #if defined(__cplusplus)
84
+ extern "C" {
85
+ #endif
86
+
87
+ /* CUBLAS data types */
88
+ #define cublasStatus cublasStatus_t
89
+
90
+ cublasStatus CUBLASWINAPI cublasInit(void);
91
+ cublasStatus CUBLASWINAPI cublasShutdown(void);
92
+ cublasStatus CUBLASWINAPI cublasGetError(void);
93
+
94
+ cublasStatus CUBLASWINAPI cublasGetVersion(int* version);
95
+ cublasStatus CUBLASWINAPI cublasAlloc(int n, int elemSize, void** devicePtr);
96
+
97
+ cublasStatus CUBLASWINAPI cublasFree(void* devicePtr);
98
+
99
+ cublasStatus CUBLASWINAPI cublasSetKernelStream(cudaStream_t stream);
100
+
101
+ /* ---------------- CUBLAS BLAS1 functions ---------------- */
102
+ /* NRM2 */
103
+ float CUBLASWINAPI cublasSnrm2(int n, const float* x, int incx);
104
+ double CUBLASWINAPI cublasDnrm2(int n, const double* x, int incx);
105
+ float CUBLASWINAPI cublasScnrm2(int n, const cuComplex* x, int incx);
106
+ double CUBLASWINAPI cublasDznrm2(int n, const cuDoubleComplex* x, int incx);
107
+ /*------------------------------------------------------------------------*/
108
+ /* DOT */
109
+ float CUBLASWINAPI cublasSdot(int n, const float* x, int incx, const float* y, int incy);
110
+ double CUBLASWINAPI cublasDdot(int n, const double* x, int incx, const double* y, int incy);
111
+ cuComplex CUBLASWINAPI cublasCdotu(int n, const cuComplex* x, int incx, const cuComplex* y, int incy);
112
+ cuComplex CUBLASWINAPI cublasCdotc(int n, const cuComplex* x, int incx, const cuComplex* y, int incy);
113
+ cuDoubleComplex CUBLASWINAPI cublasZdotu(int n, const cuDoubleComplex* x, int incx, const cuDoubleComplex* y, int incy);
114
+ cuDoubleComplex CUBLASWINAPI cublasZdotc(int n, const cuDoubleComplex* x, int incx, const cuDoubleComplex* y, int incy);
115
+ /*------------------------------------------------------------------------*/
116
+ /* SCAL */
117
+ void CUBLASWINAPI cublasSscal(int n, float alpha, float* x, int incx);
118
+ void CUBLASWINAPI cublasDscal(int n, double alpha, double* x, int incx);
119
+ void CUBLASWINAPI cublasCscal(int n, cuComplex alpha, cuComplex* x, int incx);
120
+ void CUBLASWINAPI cublasZscal(int n, cuDoubleComplex alpha, cuDoubleComplex* x, int incx);
121
+
122
+ void CUBLASWINAPI cublasCsscal(int n, float alpha, cuComplex* x, int incx);
123
+ void CUBLASWINAPI cublasZdscal(int n, double alpha, cuDoubleComplex* x, int incx);
124
+ /*------------------------------------------------------------------------*/
125
+ /* AXPY */
126
+ void CUBLASWINAPI cublasSaxpy(int n, float alpha, const float* x, int incx, float* y, int incy);
127
+ void CUBLASWINAPI cublasDaxpy(int n, double alpha, const double* x, int incx, double* y, int incy);
128
+ void CUBLASWINAPI cublasCaxpy(int n, cuComplex alpha, const cuComplex* x, int incx, cuComplex* y, int incy);
129
+ void CUBLASWINAPI
130
+ cublasZaxpy(int n, cuDoubleComplex alpha, const cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy);
131
+ /*------------------------------------------------------------------------*/
132
+ /* COPY */
133
+ void CUBLASWINAPI cublasScopy(int n, const float* x, int incx, float* y, int incy);
134
+ void CUBLASWINAPI cublasDcopy(int n, const double* x, int incx, double* y, int incy);
135
+ void CUBLASWINAPI cublasCcopy(int n, const cuComplex* x, int incx, cuComplex* y, int incy);
136
+ void CUBLASWINAPI cublasZcopy(int n, const cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy);
137
+ /*------------------------------------------------------------------------*/
138
+ /* SWAP */
139
+ void CUBLASWINAPI cublasSswap(int n, float* x, int incx, float* y, int incy);
140
+ void CUBLASWINAPI cublasDswap(int n, double* x, int incx, double* y, int incy);
141
+ void CUBLASWINAPI cublasCswap(int n, cuComplex* x, int incx, cuComplex* y, int incy);
142
+ void CUBLASWINAPI cublasZswap(int n, cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy);
143
+ /*------------------------------------------------------------------------*/
144
+ /* AMAX */
145
+ int CUBLASWINAPI cublasIsamax(int n, const float* x, int incx);
146
+ int CUBLASWINAPI cublasIdamax(int n, const double* x, int incx);
147
+ int CUBLASWINAPI cublasIcamax(int n, const cuComplex* x, int incx);
148
+ int CUBLASWINAPI cublasIzamax(int n, const cuDoubleComplex* x, int incx);
149
+ /*------------------------------------------------------------------------*/
150
+ /* AMIN */
151
+ int CUBLASWINAPI cublasIsamin(int n, const float* x, int incx);
152
+ int CUBLASWINAPI cublasIdamin(int n, const double* x, int incx);
153
+
154
+ int CUBLASWINAPI cublasIcamin(int n, const cuComplex* x, int incx);
155
+ int CUBLASWINAPI cublasIzamin(int n, const cuDoubleComplex* x, int incx);
156
+ /*------------------------------------------------------------------------*/
157
+ /* ASUM */
158
+ float CUBLASWINAPI cublasSasum(int n, const float* x, int incx);
159
+ double CUBLASWINAPI cublasDasum(int n, const double* x, int incx);
160
+ float CUBLASWINAPI cublasScasum(int n, const cuComplex* x, int incx);
161
+ double CUBLASWINAPI cublasDzasum(int n, const cuDoubleComplex* x, int incx);
162
+ /*------------------------------------------------------------------------*/
163
+ /* ROT */
164
+ void CUBLASWINAPI cublasSrot(int n, float* x, int incx, float* y, int incy, float sc, float ss);
165
+ void CUBLASWINAPI cublasDrot(int n, double* x, int incx, double* y, int incy, double sc, double ss);
166
+ void CUBLASWINAPI cublasCrot(int n, cuComplex* x, int incx, cuComplex* y, int incy, float c, cuComplex s);
167
+ void CUBLASWINAPI
168
+ cublasZrot(int n, cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy, double sc, cuDoubleComplex cs);
169
+ void CUBLASWINAPI cublasCsrot(int n, cuComplex* x, int incx, cuComplex* y, int incy, float c, float s);
170
+ void CUBLASWINAPI cublasZdrot(int n, cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy, double c, double s);
171
+ /*------------------------------------------------------------------------*/
172
+ /* ROTG */
173
+ void CUBLASWINAPI cublasSrotg(float* sa, float* sb, float* sc, float* ss);
174
+ void CUBLASWINAPI cublasDrotg(double* sa, double* sb, double* sc, double* ss);
175
+ void CUBLASWINAPI cublasCrotg(cuComplex* ca, cuComplex cb, float* sc, cuComplex* cs);
176
+ void CUBLASWINAPI cublasZrotg(cuDoubleComplex* ca, cuDoubleComplex cb, double* sc, cuDoubleComplex* cs);
177
+ /*------------------------------------------------------------------------*/
178
+ /* ROTM */
179
+ void CUBLASWINAPI cublasSrotm(int n, float* x, int incx, float* y, int incy, const float* sparam);
180
+ void CUBLASWINAPI cublasDrotm(int n, double* x, int incx, double* y, int incy, const double* sparam);
181
+ /*------------------------------------------------------------------------*/
182
+ /* ROTMG */
183
+ void CUBLASWINAPI cublasSrotmg(float* sd1, float* sd2, float* sx1, const float* sy1, float* sparam);
184
+ void CUBLASWINAPI cublasDrotmg(double* sd1, double* sd2, double* sx1, const double* sy1, double* sparam);
185
+
186
+ /* --------------- CUBLAS BLAS2 functions ---------------- */
187
+ /* GEMV */
188
+ void CUBLASWINAPI cublasSgemv(char trans,
189
+ int m,
190
+ int n,
191
+ float alpha,
192
+ const float* A,
193
+ int lda,
194
+ const float* x,
195
+ int incx,
196
+ float beta,
197
+ float* y,
198
+ int incy);
199
+ void CUBLASWINAPI cublasDgemv(char trans,
200
+ int m,
201
+ int n,
202
+ double alpha,
203
+ const double* A,
204
+ int lda,
205
+ const double* x,
206
+ int incx,
207
+ double beta,
208
+ double* y,
209
+ int incy);
210
+ void CUBLASWINAPI cublasCgemv(char trans,
211
+ int m,
212
+ int n,
213
+ cuComplex alpha,
214
+ const cuComplex* A,
215
+ int lda,
216
+ const cuComplex* x,
217
+ int incx,
218
+ cuComplex beta,
219
+ cuComplex* y,
220
+ int incy);
221
+ void CUBLASWINAPI cublasZgemv(char trans,
222
+ int m,
223
+ int n,
224
+ cuDoubleComplex alpha,
225
+ const cuDoubleComplex* A,
226
+ int lda,
227
+ const cuDoubleComplex* x,
228
+ int incx,
229
+ cuDoubleComplex beta,
230
+ cuDoubleComplex* y,
231
+ int incy);
232
+ /*------------------------------------------------------------------------*/
233
+ /* GBMV */
234
+ void CUBLASWINAPI cublasSgbmv(char trans,
235
+ int m,
236
+ int n,
237
+ int kl,
238
+ int ku,
239
+ float alpha,
240
+ const float* A,
241
+ int lda,
242
+ const float* x,
243
+ int incx,
244
+ float beta,
245
+ float* y,
246
+ int incy);
247
+ void CUBLASWINAPI cublasDgbmv(char trans,
248
+ int m,
249
+ int n,
250
+ int kl,
251
+ int ku,
252
+ double alpha,
253
+ const double* A,
254
+ int lda,
255
+ const double* x,
256
+ int incx,
257
+ double beta,
258
+ double* y,
259
+ int incy);
260
+ void CUBLASWINAPI cublasCgbmv(char trans,
261
+ int m,
262
+ int n,
263
+ int kl,
264
+ int ku,
265
+ cuComplex alpha,
266
+ const cuComplex* A,
267
+ int lda,
268
+ const cuComplex* x,
269
+ int incx,
270
+ cuComplex beta,
271
+ cuComplex* y,
272
+ int incy);
273
+ void CUBLASWINAPI cublasZgbmv(char trans,
274
+ int m,
275
+ int n,
276
+ int kl,
277
+ int ku,
278
+ cuDoubleComplex alpha,
279
+ const cuDoubleComplex* A,
280
+ int lda,
281
+ const cuDoubleComplex* x,
282
+ int incx,
283
+ cuDoubleComplex beta,
284
+ cuDoubleComplex* y,
285
+ int incy);
286
+ /*------------------------------------------------------------------------*/
287
+ /* TRMV */
288
+ void CUBLASWINAPI cublasStrmv(char uplo, char trans, char diag, int n, const float* A, int lda, float* x, int incx);
289
+ void CUBLASWINAPI cublasDtrmv(char uplo, char trans, char diag, int n, const double* A, int lda, double* x, int incx);
290
+ void CUBLASWINAPI
291
+ cublasCtrmv(char uplo, char trans, char diag, int n, const cuComplex* A, int lda, cuComplex* x, int incx);
292
+ void CUBLASWINAPI
293
+ cublasZtrmv(char uplo, char trans, char diag, int n, const cuDoubleComplex* A, int lda, cuDoubleComplex* x, int incx);
294
+ /*------------------------------------------------------------------------*/
295
+ /* TBMV */
296
+ void CUBLASWINAPI
297
+ cublasStbmv(char uplo, char trans, char diag, int n, int k, const float* A, int lda, float* x, int incx);
298
+ void CUBLASWINAPI
299
+ cublasDtbmv(char uplo, char trans, char diag, int n, int k, const double* A, int lda, double* x, int incx);
300
+ void CUBLASWINAPI
301
+ cublasCtbmv(char uplo, char trans, char diag, int n, int k, const cuComplex* A, int lda, cuComplex* x, int incx);
302
+ void CUBLASWINAPI cublasZtbmv(
303
+ char uplo, char trans, char diag, int n, int k, const cuDoubleComplex* A, int lda, cuDoubleComplex* x, int incx);
304
+ /*------------------------------------------------------------------------*/
305
+ /* TPMV */
306
+ void CUBLASWINAPI cublasStpmv(char uplo, char trans, char diag, int n, const float* AP, float* x, int incx);
307
+
308
+ void CUBLASWINAPI cublasDtpmv(char uplo, char trans, char diag, int n, const double* AP, double* x, int incx);
309
+
310
+ void CUBLASWINAPI cublasCtpmv(char uplo, char trans, char diag, int n, const cuComplex* AP, cuComplex* x, int incx);
311
+
312
+ void CUBLASWINAPI
313
+ cublasZtpmv(char uplo, char trans, char diag, int n, const cuDoubleComplex* AP, cuDoubleComplex* x, int incx);
314
+ /*------------------------------------------------------------------------*/
315
+ /* TRSV */
316
+ void CUBLASWINAPI cublasStrsv(char uplo, char trans, char diag, int n, const float* A, int lda, float* x, int incx);
317
+
318
+ void CUBLASWINAPI cublasDtrsv(char uplo, char trans, char diag, int n, const double* A, int lda, double* x, int incx);
319
+
320
+ void CUBLASWINAPI
321
+ cublasCtrsv(char uplo, char trans, char diag, int n, const cuComplex* A, int lda, cuComplex* x, int incx);
322
+
323
+ void CUBLASWINAPI
324
+ cublasZtrsv(char uplo, char trans, char diag, int n, const cuDoubleComplex* A, int lda, cuDoubleComplex* x, int incx);
325
+ /*------------------------------------------------------------------------*/
326
+ /* TPSV */
327
+ void CUBLASWINAPI cublasStpsv(char uplo, char trans, char diag, int n, const float* AP, float* x, int incx);
328
+
329
+ void CUBLASWINAPI cublasDtpsv(char uplo, char trans, char diag, int n, const double* AP, double* x, int incx);
330
+
331
+ void CUBLASWINAPI cublasCtpsv(char uplo, char trans, char diag, int n, const cuComplex* AP, cuComplex* x, int incx);
332
+
333
+ void CUBLASWINAPI
334
+ cublasZtpsv(char uplo, char trans, char diag, int n, const cuDoubleComplex* AP, cuDoubleComplex* x, int incx);
335
+ /*------------------------------------------------------------------------*/
336
+ /* TBSV */
337
+ void CUBLASWINAPI
338
+ cublasStbsv(char uplo, char trans, char diag, int n, int k, const float* A, int lda, float* x, int incx);
339
+
340
+ void CUBLASWINAPI
341
+ cublasDtbsv(char uplo, char trans, char diag, int n, int k, const double* A, int lda, double* x, int incx);
342
+ void CUBLASWINAPI
343
+ cublasCtbsv(char uplo, char trans, char diag, int n, int k, const cuComplex* A, int lda, cuComplex* x, int incx);
344
+
345
+ void CUBLASWINAPI cublasZtbsv(
346
+ char uplo, char trans, char diag, int n, int k, const cuDoubleComplex* A, int lda, cuDoubleComplex* x, int incx);
347
+ /*------------------------------------------------------------------------*/
348
+ /* SYMV/HEMV */
349
+ void CUBLASWINAPI cublasSsymv(
350
+ char uplo, int n, float alpha, const float* A, int lda, const float* x, int incx, float beta, float* y, int incy);
351
+ void CUBLASWINAPI cublasDsymv(char uplo,
352
+ int n,
353
+ double alpha,
354
+ const double* A,
355
+ int lda,
356
+ const double* x,
357
+ int incx,
358
+ double beta,
359
+ double* y,
360
+ int incy);
361
+ void CUBLASWINAPI cublasChemv(char uplo,
362
+ int n,
363
+ cuComplex alpha,
364
+ const cuComplex* A,
365
+ int lda,
366
+ const cuComplex* x,
367
+ int incx,
368
+ cuComplex beta,
369
+ cuComplex* y,
370
+ int incy);
371
+ void CUBLASWINAPI cublasZhemv(char uplo,
372
+ int n,
373
+ cuDoubleComplex alpha,
374
+ const cuDoubleComplex* A,
375
+ int lda,
376
+ const cuDoubleComplex* x,
377
+ int incx,
378
+ cuDoubleComplex beta,
379
+ cuDoubleComplex* y,
380
+ int incy);
381
+ /*------------------------------------------------------------------------*/
382
+ /* SBMV/HBMV */
383
+ void CUBLASWINAPI cublasSsbmv(char uplo,
384
+ int n,
385
+ int k,
386
+ float alpha,
387
+ const float* A,
388
+ int lda,
389
+ const float* x,
390
+ int incx,
391
+ float beta,
392
+ float* y,
393
+ int incy);
394
+ void CUBLASWINAPI cublasDsbmv(char uplo,
395
+ int n,
396
+ int k,
397
+ double alpha,
398
+ const double* A,
399
+ int lda,
400
+ const double* x,
401
+ int incx,
402
+ double beta,
403
+ double* y,
404
+ int incy);
405
+ void CUBLASWINAPI cublasChbmv(char uplo,
406
+ int n,
407
+ int k,
408
+ cuComplex alpha,
409
+ const cuComplex* A,
410
+ int lda,
411
+ const cuComplex* x,
412
+ int incx,
413
+ cuComplex beta,
414
+ cuComplex* y,
415
+ int incy);
416
+ void CUBLASWINAPI cublasZhbmv(char uplo,
417
+ int n,
418
+ int k,
419
+ cuDoubleComplex alpha,
420
+ const cuDoubleComplex* A,
421
+ int lda,
422
+ const cuDoubleComplex* x,
423
+ int incx,
424
+ cuDoubleComplex beta,
425
+ cuDoubleComplex* y,
426
+ int incy);
427
+ /*------------------------------------------------------------------------*/
428
+ /* SPMV/HPMV */
429
+ void CUBLASWINAPI
430
+ cublasSspmv(char uplo, int n, float alpha, const float* AP, const float* x, int incx, float beta, float* y, int incy);
431
+ void CUBLASWINAPI cublasDspmv(
432
+ char uplo, int n, double alpha, const double* AP, const double* x, int incx, double beta, double* y, int incy);
433
+ void CUBLASWINAPI cublasChpmv(char uplo,
434
+ int n,
435
+ cuComplex alpha,
436
+ const cuComplex* AP,
437
+ const cuComplex* x,
438
+ int incx,
439
+ cuComplex beta,
440
+ cuComplex* y,
441
+ int incy);
442
+ void CUBLASWINAPI cublasZhpmv(char uplo,
443
+ int n,
444
+ cuDoubleComplex alpha,
445
+ const cuDoubleComplex* AP,
446
+ const cuDoubleComplex* x,
447
+ int incx,
448
+ cuDoubleComplex beta,
449
+ cuDoubleComplex* y,
450
+ int incy);
451
+
452
+ /*------------------------------------------------------------------------*/
453
+ /* GER */
454
+ void CUBLASWINAPI
455
+ cublasSger(int m, int n, float alpha, const float* x, int incx, const float* y, int incy, float* A, int lda);
456
+ void CUBLASWINAPI
457
+ cublasDger(int m, int n, double alpha, const double* x, int incx, const double* y, int incy, double* A, int lda);
458
+
459
+ void CUBLASWINAPI cublasCgeru(
460
+ int m, int n, cuComplex alpha, const cuComplex* x, int incx, const cuComplex* y, int incy, cuComplex* A, int lda);
461
+ void CUBLASWINAPI cublasCgerc(
462
+ int m, int n, cuComplex alpha, const cuComplex* x, int incx, const cuComplex* y, int incy, cuComplex* A, int lda);
463
+ void CUBLASWINAPI cublasZgeru(int m,
464
+ int n,
465
+ cuDoubleComplex alpha,
466
+ const cuDoubleComplex* x,
467
+ int incx,
468
+ const cuDoubleComplex* y,
469
+ int incy,
470
+ cuDoubleComplex* A,
471
+ int lda);
472
+ void CUBLASWINAPI cublasZgerc(int m,
473
+ int n,
474
+ cuDoubleComplex alpha,
475
+ const cuDoubleComplex* x,
476
+ int incx,
477
+ const cuDoubleComplex* y,
478
+ int incy,
479
+ cuDoubleComplex* A,
480
+ int lda);
481
+ /*------------------------------------------------------------------------*/
482
+ /* SYR/HER */
483
+ void CUBLASWINAPI cublasSsyr(char uplo, int n, float alpha, const float* x, int incx, float* A, int lda);
484
+ void CUBLASWINAPI cublasDsyr(char uplo, int n, double alpha, const double* x, int incx, double* A, int lda);
485
+
486
+ void CUBLASWINAPI cublasCher(char uplo, int n, float alpha, const cuComplex* x, int incx, cuComplex* A, int lda);
487
+ void CUBLASWINAPI
488
+ cublasZher(char uplo, int n, double alpha, const cuDoubleComplex* x, int incx, cuDoubleComplex* A, int lda);
489
+
490
+ /*------------------------------------------------------------------------*/
491
+ /* SPR/HPR */
492
+ void CUBLASWINAPI cublasSspr(char uplo, int n, float alpha, const float* x, int incx, float* AP);
493
+ void CUBLASWINAPI cublasDspr(char uplo, int n, double alpha, const double* x, int incx, double* AP);
494
+ void CUBLASWINAPI cublasChpr(char uplo, int n, float alpha, const cuComplex* x, int incx, cuComplex* AP);
495
+ void CUBLASWINAPI cublasZhpr(char uplo, int n, double alpha, const cuDoubleComplex* x, int incx, cuDoubleComplex* AP);
496
+ /*------------------------------------------------------------------------*/
497
+ /* SYR2/HER2 */
498
+ void CUBLASWINAPI
499
+ cublasSsyr2(char uplo, int n, float alpha, const float* x, int incx, const float* y, int incy, float* A, int lda);
500
+ void CUBLASWINAPI
501
+ cublasDsyr2(char uplo, int n, double alpha, const double* x, int incx, const double* y, int incy, double* A, int lda);
502
+ void CUBLASWINAPI cublasCher2(char uplo,
503
+ int n,
504
+ cuComplex alpha,
505
+ const cuComplex* x,
506
+ int incx,
507
+ const cuComplex* y,
508
+ int incy,
509
+ cuComplex* A,
510
+ int lda);
511
+ void CUBLASWINAPI cublasZher2(char uplo,
512
+ int n,
513
+ cuDoubleComplex alpha,
514
+ const cuDoubleComplex* x,
515
+ int incx,
516
+ const cuDoubleComplex* y,
517
+ int incy,
518
+ cuDoubleComplex* A,
519
+ int lda);
520
+
521
+ /*------------------------------------------------------------------------*/
522
+ /* SPR2/HPR2 */
523
+ void CUBLASWINAPI
524
+ cublasSspr2(char uplo, int n, float alpha, const float* x, int incx, const float* y, int incy, float* AP);
525
+ void CUBLASWINAPI
526
+ cublasDspr2(char uplo, int n, double alpha, const double* x, int incx, const double* y, int incy, double* AP);
527
+ void CUBLASWINAPI cublasChpr2(
528
+ char uplo, int n, cuComplex alpha, const cuComplex* x, int incx, const cuComplex* y, int incy, cuComplex* AP);
529
+ void CUBLASWINAPI cublasZhpr2(char uplo,
530
+ int n,
531
+ cuDoubleComplex alpha,
532
+ const cuDoubleComplex* x,
533
+ int incx,
534
+ const cuDoubleComplex* y,
535
+ int incy,
536
+ cuDoubleComplex* AP);
537
+ /* ------------------------BLAS3 Functions ------------------------------- */
538
+ /* GEMM */
539
+ void CUBLASWINAPI cublasSgemm(char transa,
540
+ char transb,
541
+ int m,
542
+ int n,
543
+ int k,
544
+ float alpha,
545
+ const float* A,
546
+ int lda,
547
+ const float* B,
548
+ int ldb,
549
+ float beta,
550
+ float* C,
551
+ int ldc);
552
+ void CUBLASWINAPI cublasDgemm(char transa,
553
+ char transb,
554
+ int m,
555
+ int n,
556
+ int k,
557
+ double alpha,
558
+ const double* A,
559
+ int lda,
560
+ const double* B,
561
+ int ldb,
562
+ double beta,
563
+ double* C,
564
+ int ldc);
565
+ void CUBLASWINAPI cublasCgemm(char transa,
566
+ char transb,
567
+ int m,
568
+ int n,
569
+ int k,
570
+ cuComplex alpha,
571
+ const cuComplex* A,
572
+ int lda,
573
+ const cuComplex* B,
574
+ int ldb,
575
+ cuComplex beta,
576
+ cuComplex* C,
577
+ int ldc);
578
+ void CUBLASWINAPI cublasZgemm(char transa,
579
+ char transb,
580
+ int m,
581
+ int n,
582
+ int k,
583
+ cuDoubleComplex alpha,
584
+ const cuDoubleComplex* A,
585
+ int lda,
586
+ const cuDoubleComplex* B,
587
+ int ldb,
588
+ cuDoubleComplex beta,
589
+ cuDoubleComplex* C,
590
+ int ldc);
591
+ /* -------------------------------------------------------*/
592
+ /* SYRK */
593
+ void CUBLASWINAPI
594
+ cublasSsyrk(char uplo, char trans, int n, int k, float alpha, const float* A, int lda, float beta, float* C, int ldc);
595
+ void CUBLASWINAPI cublasDsyrk(
596
+ char uplo, char trans, int n, int k, double alpha, const double* A, int lda, double beta, double* C, int ldc);
597
+
598
+ void CUBLASWINAPI cublasCsyrk(char uplo,
599
+ char trans,
600
+ int n,
601
+ int k,
602
+ cuComplex alpha,
603
+ const cuComplex* A,
604
+ int lda,
605
+ cuComplex beta,
606
+ cuComplex* C,
607
+ int ldc);
608
+ void CUBLASWINAPI cublasZsyrk(char uplo,
609
+ char trans,
610
+ int n,
611
+ int k,
612
+ cuDoubleComplex alpha,
613
+ const cuDoubleComplex* A,
614
+ int lda,
615
+ cuDoubleComplex beta,
616
+ cuDoubleComplex* C,
617
+ int ldc);
618
+ /* ------------------------------------------------------- */
619
+ /* HERK */
620
+ void CUBLASWINAPI cublasCherk(
621
+ char uplo, char trans, int n, int k, float alpha, const cuComplex* A, int lda, float beta, cuComplex* C, int ldc);
622
+ void CUBLASWINAPI cublasZherk(char uplo,
623
+ char trans,
624
+ int n,
625
+ int k,
626
+ double alpha,
627
+ const cuDoubleComplex* A,
628
+ int lda,
629
+ double beta,
630
+ cuDoubleComplex* C,
631
+ int ldc);
632
+ /* ------------------------------------------------------- */
633
+ /* SYR2K */
634
+ void CUBLASWINAPI cublasSsyr2k(char uplo,
635
+ char trans,
636
+ int n,
637
+ int k,
638
+ float alpha,
639
+ const float* A,
640
+ int lda,
641
+ const float* B,
642
+ int ldb,
643
+ float beta,
644
+ float* C,
645
+ int ldc);
646
+
647
+ void CUBLASWINAPI cublasDsyr2k(char uplo,
648
+ char trans,
649
+ int n,
650
+ int k,
651
+ double alpha,
652
+ const double* A,
653
+ int lda,
654
+ const double* B,
655
+ int ldb,
656
+ double beta,
657
+ double* C,
658
+ int ldc);
659
+ void CUBLASWINAPI cublasCsyr2k(char uplo,
660
+ char trans,
661
+ int n,
662
+ int k,
663
+ cuComplex alpha,
664
+ const cuComplex* A,
665
+ int lda,
666
+ const cuComplex* B,
667
+ int ldb,
668
+ cuComplex beta,
669
+ cuComplex* C,
670
+ int ldc);
671
+
672
+ void CUBLASWINAPI cublasZsyr2k(char uplo,
673
+ char trans,
674
+ int n,
675
+ int k,
676
+ cuDoubleComplex alpha,
677
+ const cuDoubleComplex* A,
678
+ int lda,
679
+ const cuDoubleComplex* B,
680
+ int ldb,
681
+ cuDoubleComplex beta,
682
+ cuDoubleComplex* C,
683
+ int ldc);
684
+ /* ------------------------------------------------------- */
685
+ /* HER2K */
686
+ void CUBLASWINAPI cublasCher2k(char uplo,
687
+ char trans,
688
+ int n,
689
+ int k,
690
+ cuComplex alpha,
691
+ const cuComplex* A,
692
+ int lda,
693
+ const cuComplex* B,
694
+ int ldb,
695
+ float beta,
696
+ cuComplex* C,
697
+ int ldc);
698
+
699
+ void CUBLASWINAPI cublasZher2k(char uplo,
700
+ char trans,
701
+ int n,
702
+ int k,
703
+ cuDoubleComplex alpha,
704
+ const cuDoubleComplex* A,
705
+ int lda,
706
+ const cuDoubleComplex* B,
707
+ int ldb,
708
+ double beta,
709
+ cuDoubleComplex* C,
710
+ int ldc);
711
+
712
+ /*------------------------------------------------------------------------*/
713
+ /* SYMM*/
714
+ void CUBLASWINAPI cublasSsymm(char side,
715
+ char uplo,
716
+ int m,
717
+ int n,
718
+ float alpha,
719
+ const float* A,
720
+ int lda,
721
+ const float* B,
722
+ int ldb,
723
+ float beta,
724
+ float* C,
725
+ int ldc);
726
+ void CUBLASWINAPI cublasDsymm(char side,
727
+ char uplo,
728
+ int m,
729
+ int n,
730
+ double alpha,
731
+ const double* A,
732
+ int lda,
733
+ const double* B,
734
+ int ldb,
735
+ double beta,
736
+ double* C,
737
+ int ldc);
738
+
739
+ void CUBLASWINAPI cublasCsymm(char side,
740
+ char uplo,
741
+ int m,
742
+ int n,
743
+ cuComplex alpha,
744
+ const cuComplex* A,
745
+ int lda,
746
+ const cuComplex* B,
747
+ int ldb,
748
+ cuComplex beta,
749
+ cuComplex* C,
750
+ int ldc);
751
+
752
+ void CUBLASWINAPI cublasZsymm(char side,
753
+ char uplo,
754
+ int m,
755
+ int n,
756
+ cuDoubleComplex alpha,
757
+ const cuDoubleComplex* A,
758
+ int lda,
759
+ const cuDoubleComplex* B,
760
+ int ldb,
761
+ cuDoubleComplex beta,
762
+ cuDoubleComplex* C,
763
+ int ldc);
764
+ /*------------------------------------------------------------------------*/
765
+ /* HEMM*/
766
+ void CUBLASWINAPI cublasChemm(char side,
767
+ char uplo,
768
+ int m,
769
+ int n,
770
+ cuComplex alpha,
771
+ const cuComplex* A,
772
+ int lda,
773
+ const cuComplex* B,
774
+ int ldb,
775
+ cuComplex beta,
776
+ cuComplex* C,
777
+ int ldc);
778
+ void CUBLASWINAPI cublasZhemm(char side,
779
+ char uplo,
780
+ int m,
781
+ int n,
782
+ cuDoubleComplex alpha,
783
+ const cuDoubleComplex* A,
784
+ int lda,
785
+ const cuDoubleComplex* B,
786
+ int ldb,
787
+ cuDoubleComplex beta,
788
+ cuDoubleComplex* C,
789
+ int ldc);
790
+
791
+ /*------------------------------------------------------------------------*/
792
+ /* TRSM*/
793
+ void CUBLASWINAPI cublasStrsm(char side,
794
+ char uplo,
795
+ char transa,
796
+ char diag,
797
+ int m,
798
+ int n,
799
+ float alpha,
800
+ const float* A,
801
+ int lda,
802
+ float* B,
803
+ int ldb);
804
+
805
+ void CUBLASWINAPI cublasDtrsm(char side,
806
+ char uplo,
807
+ char transa,
808
+ char diag,
809
+ int m,
810
+ int n,
811
+ double alpha,
812
+ const double* A,
813
+ int lda,
814
+ double* B,
815
+ int ldb);
816
+
817
+ void CUBLASWINAPI cublasCtrsm(char side,
818
+ char uplo,
819
+ char transa,
820
+ char diag,
821
+ int m,
822
+ int n,
823
+ cuComplex alpha,
824
+ const cuComplex* A,
825
+ int lda,
826
+ cuComplex* B,
827
+ int ldb);
828
+
829
+ void CUBLASWINAPI cublasZtrsm(char side,
830
+ char uplo,
831
+ char transa,
832
+ char diag,
833
+ int m,
834
+ int n,
835
+ cuDoubleComplex alpha,
836
+ const cuDoubleComplex* A,
837
+ int lda,
838
+ cuDoubleComplex* B,
839
+ int ldb);
840
+ /*------------------------------------------------------------------------*/
841
+ /* TRMM*/
842
+ void CUBLASWINAPI cublasStrmm(char side,
843
+ char uplo,
844
+ char transa,
845
+ char diag,
846
+ int m,
847
+ int n,
848
+ float alpha,
849
+ const float* A,
850
+ int lda,
851
+ float* B,
852
+ int ldb);
853
+ void CUBLASWINAPI cublasDtrmm(char side,
854
+ char uplo,
855
+ char transa,
856
+ char diag,
857
+ int m,
858
+ int n,
859
+ double alpha,
860
+ const double* A,
861
+ int lda,
862
+ double* B,
863
+ int ldb);
864
+ void CUBLASWINAPI cublasCtrmm(char side,
865
+ char uplo,
866
+ char transa,
867
+ char diag,
868
+ int m,
869
+ int n,
870
+ cuComplex alpha,
871
+ const cuComplex* A,
872
+ int lda,
873
+ cuComplex* B,
874
+ int ldb);
875
+ void CUBLASWINAPI cublasZtrmm(char side,
876
+ char uplo,
877
+ char transa,
878
+ char diag,
879
+ int m,
880
+ int n,
881
+ cuDoubleComplex alpha,
882
+ const cuDoubleComplex* A,
883
+ int lda,
884
+ cuDoubleComplex* B,
885
+ int ldb);
886
+
887
+ #if defined(__cplusplus)
888
+ }
889
+ #endif /* __cplusplus */
890
+
891
+ #endif /* !defined(CUBLAS_H_) */
venv/lib/python3.10/site-packages/nvidia/cublas/include/cublasLt.h ADDED
@@ -0,0 +1,1815 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2022 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+ #pragma once
50
+
51
+ #ifndef CUBLASAPI
52
+ #ifdef __CUDACC__
53
+ #define CUBLASAPI __host__ __device__
54
+ #else
55
+ #define CUBLASAPI
56
+ #endif
57
+ #endif
58
+
59
+ #include <cublas_api.h>
60
+
61
+ #include <stdint.h>
62
+ #include <stddef.h>
63
+ #include <stdio.h>
64
+
65
+ #if defined(__cplusplus)
66
+ extern "C" {
67
+ #endif /* __cplusplus */
68
+
69
+ /** Opaque structure holding CUBLASLT context
70
+ */
71
+ typedef struct cublasLtContext* cublasLtHandle_t;
72
+
73
+ cublasStatus_t CUBLASWINAPI cublasLtCreate(cublasLtHandle_t* lightHandle);
74
+
75
+ cublasStatus_t CUBLASWINAPI cublasLtDestroy(cublasLtHandle_t lightHandle);
76
+
77
+ const char* CUBLASWINAPI cublasLtGetStatusName(cublasStatus_t status);
78
+
79
+ const char* CUBLASWINAPI cublasLtGetStatusString(cublasStatus_t status);
80
+
81
+ size_t CUBLASWINAPI cublasLtGetVersion(void);
82
+
83
+ size_t CUBLASWINAPI cublasLtGetCudartVersion(void);
84
+
85
+ cublasStatus_t CUBLASWINAPI cublasLtGetProperty(libraryPropertyType type, int* value);
86
+
87
+ cublasStatus_t CUBLASWINAPI cublasLtHeuristicsCacheGetCapacity(size_t* capacity);
88
+ cublasStatus_t CUBLASWINAPI cublasLtHeuristicsCacheSetCapacity(size_t capacity);
89
+
90
+ /** Restricts usage of CPU instructions (ISA) specified by the flags in the mask.
91
+ *
92
+ * Flags can be combined with bitwise OR(|) operator. Supported flags:
93
+ * - 0x1 -- x86-64 AVX512 ISA
94
+ *
95
+ * Default mask: 0 (any applicable ISA is allowed).
96
+ *
97
+ * The function returns the previous value of the mask.
98
+ * The function takes precedence over the environment variable CUBLASLT_DISABLE_CPU_INSTRUCTIONS_MASK.
99
+ */
100
+ unsigned CUBLASWINAPI cublasLtDisableCpuInstructionsSetMask(unsigned mask);
101
+
102
+ /** Semi-opaque descriptor for matrix memory layout
103
+ */
104
+ typedef struct {
105
+ uint64_t data[8];
106
+ } cublasLtMatrixLayoutOpaque_t;
107
+
108
+ /** Opaque descriptor for matrix memory layout
109
+ */
110
+ typedef cublasLtMatrixLayoutOpaque_t* cublasLtMatrixLayout_t;
111
+
112
+ /** Semi-opaque algorithm descriptor (to avoid complicated alloc/free schemes)
113
+ *
114
+ * This structure can be trivially serialized and later restored for use with the same version of cuBLAS library to save
115
+ * on selecting the right configuration again.
116
+ */
117
+ typedef struct {
118
+ uint64_t data[8];
119
+ } cublasLtMatmulAlgo_t;
120
+
121
+ /** Semi-opaque descriptor for cublasLtMatmul() operation details
122
+ */
123
+ typedef struct {
124
+ uint64_t data[23];
125
+ } cublasLtMatmulDescOpaque_t;
126
+
127
+ /** Opaque descriptor for cublasLtMatmul() operation details
128
+ */
129
+ typedef cublasLtMatmulDescOpaque_t* cublasLtMatmulDesc_t;
130
+
131
+ /** Semi-opaque descriptor for cublasLtMatrixTransform() operation details
132
+ */
133
+ typedef struct {
134
+ uint64_t data[8];
135
+ } cublasLtMatrixTransformDescOpaque_t;
136
+
137
+ /** Opaque descriptor for cublasLtMatrixTransform() operation details
138
+ */
139
+ typedef cublasLtMatrixTransformDescOpaque_t* cublasLtMatrixTransformDesc_t;
140
+
141
+ /** Semi-opaque descriptor for cublasLtMatmulPreference() operation details
142
+ */
143
+ typedef struct {
144
+ uint64_t data[8];
145
+ } cublasLtMatmulPreferenceOpaque_t;
146
+
147
+ /** Opaque descriptor for cublasLtMatmulAlgoGetHeuristic() configuration
148
+ */
149
+ typedef cublasLtMatmulPreferenceOpaque_t* cublasLtMatmulPreference_t;
150
+
151
+ /** Tile size (in C/D matrix Rows x Cols)
152
+ *
153
+ * General order of tile IDs is sorted by size first and by first dimension second.
154
+ */
155
+ typedef enum {
156
+ CUBLASLT_MATMUL_TILE_UNDEFINED = 0,
157
+ CUBLASLT_MATMUL_TILE_8x8 = 1,
158
+ CUBLASLT_MATMUL_TILE_8x16 = 2,
159
+ CUBLASLT_MATMUL_TILE_16x8 = 3,
160
+ CUBLASLT_MATMUL_TILE_8x32 = 4,
161
+ CUBLASLT_MATMUL_TILE_16x16 = 5,
162
+ CUBLASLT_MATMUL_TILE_32x8 = 6,
163
+ CUBLASLT_MATMUL_TILE_8x64 = 7,
164
+ CUBLASLT_MATMUL_TILE_16x32 = 8,
165
+ CUBLASLT_MATMUL_TILE_32x16 = 9,
166
+ CUBLASLT_MATMUL_TILE_64x8 = 10,
167
+ CUBLASLT_MATMUL_TILE_32x32 = 11,
168
+ CUBLASLT_MATMUL_TILE_32x64 = 12,
169
+ CUBLASLT_MATMUL_TILE_64x32 = 13,
170
+ CUBLASLT_MATMUL_TILE_32x128 = 14,
171
+ CUBLASLT_MATMUL_TILE_64x64 = 15,
172
+ CUBLASLT_MATMUL_TILE_128x32 = 16,
173
+ CUBLASLT_MATMUL_TILE_64x128 = 17,
174
+ CUBLASLT_MATMUL_TILE_128x64 = 18,
175
+ CUBLASLT_MATMUL_TILE_64x256 = 19,
176
+ CUBLASLT_MATMUL_TILE_128x128 = 20,
177
+ CUBLASLT_MATMUL_TILE_256x64 = 21,
178
+ CUBLASLT_MATMUL_TILE_64x512 = 22,
179
+ CUBLASLT_MATMUL_TILE_128x256 = 23,
180
+ CUBLASLT_MATMUL_TILE_256x128 = 24,
181
+ CUBLASLT_MATMUL_TILE_512x64 = 25,
182
+ CUBLASLT_MATMUL_TILE_64x96 = 26,
183
+ CUBLASLT_MATMUL_TILE_96x64 = 27,
184
+ CUBLASLT_MATMUL_TILE_96x128 = 28,
185
+ CUBLASLT_MATMUL_TILE_128x160 = 29,
186
+ CUBLASLT_MATMUL_TILE_160x128 = 30,
187
+ CUBLASLT_MATMUL_TILE_192x128 = 31,
188
+ CUBLASLT_MATMUL_TILE_128x192 = 32,
189
+ CUBLASLT_MATMUL_TILE_128x96 = 33,
190
+ CUBLASLT_MATMUL_TILE_32x256 = 34,
191
+ CUBLASLT_MATMUL_TILE_256x32 = 35,
192
+ CUBLASLT_MATMUL_TILE_END
193
+ } cublasLtMatmulTile_t;
194
+
195
+ /** Size and number of stages in which elements are read into shared memory
196
+ *
197
+ * General order of stages IDs is sorted by stage size first and by number of stages second.
198
+ */
199
+ typedef enum {
200
+ CUBLASLT_MATMUL_STAGES_UNDEFINED = 0,
201
+ CUBLASLT_MATMUL_STAGES_16x1 = 1,
202
+ CUBLASLT_MATMUL_STAGES_16x2 = 2,
203
+ CUBLASLT_MATMUL_STAGES_16x3 = 3,
204
+ CUBLASLT_MATMUL_STAGES_16x4 = 4,
205
+ CUBLASLT_MATMUL_STAGES_16x5 = 5,
206
+ CUBLASLT_MATMUL_STAGES_16x6 = 6,
207
+ CUBLASLT_MATMUL_STAGES_32x1 = 7,
208
+ CUBLASLT_MATMUL_STAGES_32x2 = 8,
209
+ CUBLASLT_MATMUL_STAGES_32x3 = 9,
210
+ CUBLASLT_MATMUL_STAGES_32x4 = 10,
211
+ CUBLASLT_MATMUL_STAGES_32x5 = 11,
212
+ CUBLASLT_MATMUL_STAGES_32x6 = 12,
213
+ CUBLASLT_MATMUL_STAGES_64x1 = 13,
214
+ CUBLASLT_MATMUL_STAGES_64x2 = 14,
215
+ CUBLASLT_MATMUL_STAGES_64x3 = 15,
216
+ CUBLASLT_MATMUL_STAGES_64x4 = 16,
217
+ CUBLASLT_MATMUL_STAGES_64x5 = 17,
218
+ CUBLASLT_MATMUL_STAGES_64x6 = 18,
219
+ CUBLASLT_MATMUL_STAGES_128x1 = 19,
220
+ CUBLASLT_MATMUL_STAGES_128x2 = 20,
221
+ CUBLASLT_MATMUL_STAGES_128x3 = 21,
222
+ CUBLASLT_MATMUL_STAGES_128x4 = 22,
223
+ CUBLASLT_MATMUL_STAGES_128x5 = 23,
224
+ CUBLASLT_MATMUL_STAGES_128x6 = 24,
225
+ CUBLASLT_MATMUL_STAGES_32x10 = 25,
226
+ CUBLASLT_MATMUL_STAGES_8x4 = 26,
227
+ CUBLASLT_MATMUL_STAGES_16x10 = 27,
228
+ CUBLASLT_MATMUL_STAGES_8x5 = 28,
229
+ CUBLASLT_MATMUL_STAGES_8x3 = 31,
230
+ CUBLASLT_MATMUL_STAGES_8xAUTO = 32,
231
+ CUBLASLT_MATMUL_STAGES_16xAUTO = 33,
232
+ CUBLASLT_MATMUL_STAGES_32xAUTO = 34,
233
+ CUBLASLT_MATMUL_STAGES_64xAUTO = 35,
234
+ CUBLASLT_MATMUL_STAGES_128xAUTO = 36,
235
+ CUBLASLT_MATMUL_STAGES_END
236
+ } cublasLtMatmulStages_t;
237
+
238
+ /** Thread Block Cluster size
239
+ *
240
+ * Typically dimensioned similar to cublasLtMatmulTile_t, with the third coordinate unused at this time.
241
+ */
242
+ typedef enum {
243
+ /** Let library pick cluster shape automatically */
244
+ CUBLASLT_CLUSTER_SHAPE_AUTO = 0,
245
+ CUBLASLT_CLUSTER_SHAPE_1x1x1 = 2,
246
+ CUBLASLT_CLUSTER_SHAPE_2x1x1 = 3,
247
+ CUBLASLT_CLUSTER_SHAPE_4x1x1 = 4,
248
+ CUBLASLT_CLUSTER_SHAPE_1x2x1 = 5,
249
+ CUBLASLT_CLUSTER_SHAPE_2x2x1 = 6,
250
+ CUBLASLT_CLUSTER_SHAPE_4x2x1 = 7,
251
+ CUBLASLT_CLUSTER_SHAPE_1x4x1 = 8,
252
+ CUBLASLT_CLUSTER_SHAPE_2x4x1 = 9,
253
+ CUBLASLT_CLUSTER_SHAPE_4x4x1 = 10,
254
+ CUBLASLT_CLUSTER_SHAPE_8x1x1 = 11,
255
+ CUBLASLT_CLUSTER_SHAPE_1x8x1 = 12,
256
+ CUBLASLT_CLUSTER_SHAPE_8x2x1 = 13,
257
+ CUBLASLT_CLUSTER_SHAPE_2x8x1 = 14,
258
+ CUBLASLT_CLUSTER_SHAPE_16x1x1 = 15,
259
+ CUBLASLT_CLUSTER_SHAPE_1x16x1 = 16,
260
+ CUBLASLT_CLUSTER_SHAPE_3x1x1 = 17,
261
+ CUBLASLT_CLUSTER_SHAPE_5x1x1 = 18,
262
+ CUBLASLT_CLUSTER_SHAPE_6x1x1 = 19,
263
+ CUBLASLT_CLUSTER_SHAPE_7x1x1 = 20,
264
+ CUBLASLT_CLUSTER_SHAPE_9x1x1 = 21,
265
+ CUBLASLT_CLUSTER_SHAPE_10x1x1 = 22,
266
+ CUBLASLT_CLUSTER_SHAPE_11x1x1 = 23,
267
+ CUBLASLT_CLUSTER_SHAPE_12x1x1 = 24,
268
+ CUBLASLT_CLUSTER_SHAPE_13x1x1 = 25,
269
+ CUBLASLT_CLUSTER_SHAPE_14x1x1 = 26,
270
+ CUBLASLT_CLUSTER_SHAPE_15x1x1 = 27,
271
+ CUBLASLT_CLUSTER_SHAPE_3x2x1 = 28,
272
+ CUBLASLT_CLUSTER_SHAPE_5x2x1 = 29,
273
+ CUBLASLT_CLUSTER_SHAPE_6x2x1 = 30,
274
+ CUBLASLT_CLUSTER_SHAPE_7x2x1 = 31,
275
+ CUBLASLT_CLUSTER_SHAPE_1x3x1 = 32,
276
+ CUBLASLT_CLUSTER_SHAPE_2x3x1 = 33,
277
+ CUBLASLT_CLUSTER_SHAPE_3x3x1 = 34,
278
+ CUBLASLT_CLUSTER_SHAPE_4x3x1 = 35,
279
+ CUBLASLT_CLUSTER_SHAPE_5x3x1 = 36,
280
+ CUBLASLT_CLUSTER_SHAPE_3x4x1 = 37,
281
+ CUBLASLT_CLUSTER_SHAPE_1x5x1 = 38,
282
+ CUBLASLT_CLUSTER_SHAPE_2x5x1 = 39,
283
+ CUBLASLT_CLUSTER_SHAPE_3x5x1 = 40,
284
+ CUBLASLT_CLUSTER_SHAPE_1x6x1 = 41,
285
+ CUBLASLT_CLUSTER_SHAPE_2x6x1 = 42,
286
+ CUBLASLT_CLUSTER_SHAPE_1x7x1 = 43,
287
+ CUBLASLT_CLUSTER_SHAPE_2x7x1 = 44,
288
+ CUBLASLT_CLUSTER_SHAPE_1x9x1 = 45,
289
+ CUBLASLT_CLUSTER_SHAPE_1x10x1 = 46,
290
+ CUBLASLT_CLUSTER_SHAPE_1x11x1 = 47,
291
+ CUBLASLT_CLUSTER_SHAPE_1x12x1 = 48,
292
+ CUBLASLT_CLUSTER_SHAPE_1x13x1 = 49,
293
+ CUBLASLT_CLUSTER_SHAPE_1x14x1 = 50,
294
+ CUBLASLT_CLUSTER_SHAPE_1x15x1 = 51,
295
+ CUBLASLT_CLUSTER_SHAPE_END
296
+ } cublasLtClusterShape_t;
297
+
298
+ /** Inner size of the kernel
299
+ *
300
+ * Represents various aspects of internal kernel design, that don't impact CUDA grid size but may have other more subtle
301
+ * effects.
302
+ *
303
+ */
304
+ typedef enum {
305
+ CUBLASLT_MATMUL_INNER_SHAPE_UNDEFINED = 0,
306
+ CUBLASLT_MATMUL_INNER_SHAPE_MMA884 = 1,
307
+ CUBLASLT_MATMUL_INNER_SHAPE_MMA1684 = 2,
308
+ CUBLASLT_MATMUL_INNER_SHAPE_MMA1688 = 3,
309
+ CUBLASLT_MATMUL_INNER_SHAPE_MMA16816 = 4,
310
+ CUBLASLT_MATMUL_INNER_SHAPE_END
311
+ } cublasLtMatmulInnerShape_t;
312
+
313
+ /** Pointer mode to use for alpha/beta */
314
+ typedef enum {
315
+ /** matches CUBLAS_POINTER_MODE_HOST, pointer targets a single value host memory */
316
+ CUBLASLT_POINTER_MODE_HOST = CUBLAS_POINTER_MODE_HOST,
317
+ /** matches CUBLAS_POINTER_MODE_DEVICE, pointer targets a single value device memory */
318
+ CUBLASLT_POINTER_MODE_DEVICE = CUBLAS_POINTER_MODE_DEVICE,
319
+ /** pointer targets an array in device memory */
320
+ CUBLASLT_POINTER_MODE_DEVICE_VECTOR = 2,
321
+ /** alpha pointer targets an array in device memory, beta is zero. Note:
322
+ CUBLASLT_MATMUL_DESC_ALPHA_VECTOR_BATCH_STRIDE is not supported, must be 0. */
323
+ CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_ZERO = 3,
324
+ /** alpha pointer targets an array in device memory, beta is a single value in host memory. */
325
+ CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_HOST = 4,
326
+ } cublasLtPointerMode_t;
327
+
328
+ /** Mask to define pointer mode capability */
329
+ typedef enum {
330
+ /** see CUBLASLT_POINTER_MODE_HOST */
331
+ CUBLASLT_POINTER_MODE_MASK_HOST = 1,
332
+ /** see CUBLASLT_POINTER_MODE_DEVICE */
333
+ CUBLASLT_POINTER_MODE_MASK_DEVICE = 2,
334
+ /** see CUBLASLT_POINTER_MODE_DEVICE_VECTOR */
335
+ CUBLASLT_POINTER_MODE_MASK_DEVICE_VECTOR = 4,
336
+ /** see CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_ZERO */
337
+ CUBLASLT_POINTER_MODE_MASK_ALPHA_DEVICE_VECTOR_BETA_ZERO = 8,
338
+ /** see CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_HOST */
339
+ CUBLASLT_POINTER_MODE_MASK_ALPHA_DEVICE_VECTOR_BETA_HOST = 16,
340
+ } cublasLtPointerModeMask_t;
341
+
342
+ /** Implementation details that may affect numerical behavior of algorithms. */
343
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_FMA (0x01ull << 0)
344
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_HMMA (0x02ull << 0)
345
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_IMMA (0x04ull << 0)
346
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_DMMA (0x08ull << 0)
347
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_TENSOR_OP_MASK (0xfeull << 0)
348
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_OP_TYPE_MASK (0xffull << 0)
349
+
350
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_16F (0x01ull << 8)
351
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_32F (0x02ull << 8)
352
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_64F (0x04ull << 8)
353
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_32I (0x08ull << 8)
354
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_TYPE_MASK (0xffull << 8)
355
+
356
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_16F (0x01ull << 16)
357
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_16BF (0x02ull << 16)
358
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_TF32 (0x04ull << 16)
359
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_32F (0x08ull << 16)
360
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_64F (0x10ull << 16)
361
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_8I (0x20ull << 16)
362
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_8F_E4M3 (0x40ull << 16)
363
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_8F_E5M2 (0x80ull << 16)
364
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_OP_INPUT_TYPE_MASK (0xffull << 16)
365
+
366
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_GAUSSIAN (0x01ull << 32)
367
+ typedef uint64_t cublasLtNumericalImplFlags_t;
368
+
369
+ /** Execute matrix multiplication (D = alpha * op(A) * op(B) + beta * C).
370
+ *
371
+ * \retval CUBLAS_STATUS_NOT_INITIALIZED if cuBLASLt handle has not been initialized
372
+ * \retval CUBLAS_STATUS_INVALID_VALUE if parameters are in conflict or in an impossible configuration; e.g.
373
+ * when workspaceSizeInBytes is less than workspace required by configured
374
+ * algo
375
+ * \retval CUBLAS_STATUS_NOT_SUPPORTED if current implementation on selected device doesn't support configured
376
+ * operation
377
+ * \retval CUBLAS_STATUS_ARCH_MISMATCH if configured operation cannot be run using selected device
378
+ * \retval CUBLAS_STATUS_EXECUTION_FAILED if cuda reported execution error from the device
379
+ * \retval CUBLAS_STATUS_SUCCESS if the operation completed successfully
380
+ */
381
+ cublasStatus_t CUBLASWINAPI cublasLtMatmul(cublasLtHandle_t lightHandle,
382
+ cublasLtMatmulDesc_t computeDesc,
383
+ const void* alpha, /* host or device pointer */
384
+ const void* A,
385
+ cublasLtMatrixLayout_t Adesc,
386
+ const void* B,
387
+ cublasLtMatrixLayout_t Bdesc,
388
+ const void* beta, /* host or device pointer */
389
+ const void* C,
390
+ cublasLtMatrixLayout_t Cdesc,
391
+ void* D,
392
+ cublasLtMatrixLayout_t Ddesc,
393
+ const cublasLtMatmulAlgo_t* algo,
394
+ void* workspace,
395
+ size_t workspaceSizeInBytes,
396
+ cudaStream_t stream);
397
+
398
+ /** Matrix layout conversion helper (C = alpha * op(A) + beta * op(B))
399
+ *
400
+ * Can be used to change memory order of data or to scale and shift the values.
401
+ *
402
+ * \retval CUBLAS_STATUS_NOT_INITIALIZED if cuBLASLt handle has not been initialized
403
+ * \retval CUBLAS_STATUS_INVALID_VALUE if parameters are in conflict or in an impossible configuration; e.g.
404
+ * when A is not NULL, but Adesc is NULL
405
+ * \retval CUBLAS_STATUS_NOT_SUPPORTED if current implementation on selected device doesn't support configured
406
+ * operation
407
+ * \retval CUBLAS_STATUS_ARCH_MISMATCH if configured operation cannot be run using selected device
408
+ * \retval CUBLAS_STATUS_EXECUTION_FAILED if cuda reported execution error from the device
409
+ * \retval CUBLAS_STATUS_SUCCESS if the operation completed successfully
410
+ */
411
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixTransform(cublasLtHandle_t lightHandle,
412
+ cublasLtMatrixTransformDesc_t transformDesc,
413
+ const void* alpha, /* host or device pointer */
414
+ const void* A,
415
+ cublasLtMatrixLayout_t Adesc,
416
+ const void* beta, /* host or device pointer */
417
+ const void* B,
418
+ cublasLtMatrixLayout_t Bdesc,
419
+ void* C,
420
+ cublasLtMatrixLayout_t Cdesc,
421
+ cudaStream_t stream);
422
+
423
+ /* ---------------------------------------------------------------------------------------*/
424
+ /* Helper functions for cublasLtMatrixLayout_t */
425
+ /* ---------------------------------------------------------------------------------------*/
426
+
427
+ /** Enum for data ordering */
428
+ typedef enum {
429
+ /** Column-major
430
+ *
431
+ * Leading dimension is the stride (in elements) to the beginning of next column in memory.
432
+ */
433
+ CUBLASLT_ORDER_COL = 0,
434
+ /** Row major
435
+ *
436
+ * Leading dimension is the stride (in elements) to the beginning of next row in memory.
437
+ */
438
+ CUBLASLT_ORDER_ROW = 1,
439
+ /** Column-major ordered tiles of 32 columns.
440
+ *
441
+ * Leading dimension is the stride (in elements) to the beginning of next group of 32-columns. E.g. if matrix has 33
442
+ * columns and 2 rows, ld must be at least (32) * 2 = 64.
443
+ */
444
+ CUBLASLT_ORDER_COL32 = 2,
445
+ /** Column-major ordered tiles of composite tiles with total 32 columns and 8 rows, tile composed of interleaved
446
+ * inner tiles of 4 columns within 4 even or odd rows in an alternating pattern.
447
+ *
448
+ * Leading dimension is the stride (in elements) to the beginning of the first 32 column x 8 row tile for the next
449
+ * 32-wide group of columns. E.g. if matrix has 33 columns and 1 row, ld must be at least (32 * 8) * 1 = 256.
450
+ */
451
+ CUBLASLT_ORDER_COL4_4R2_8C = 3,
452
+ /** Column-major ordered tiles of composite tiles with total 32 columns ands 32 rows.
453
+ * Element offset within the tile is calculated as (((row%8)/2*4+row/8)*2+row%2)*32+col.
454
+ *
455
+ * Leading dimension is the stride (in elements) to the beginning of the first 32 column x 32 row tile for the next
456
+ * 32-wide group of columns. E.g. if matrix has 33 columns and 1 row, ld must be at least (32*32)*1 = 1024.
457
+ */
458
+ CUBLASLT_ORDER_COL32_2R_4R4 = 4,
459
+
460
+ } cublasLtOrder_t;
461
+
462
+ /** Attributes of memory layout */
463
+ typedef enum {
464
+ /** Data type, see cudaDataType.
465
+ *
466
+ * uint32_t
467
+ */
468
+ CUBLASLT_MATRIX_LAYOUT_TYPE = 0,
469
+
470
+ /** Memory order of the data, see cublasLtOrder_t.
471
+ *
472
+ * int32_t, default: CUBLASLT_ORDER_COL
473
+ */
474
+ CUBLASLT_MATRIX_LAYOUT_ORDER = 1,
475
+
476
+ /** Number of rows.
477
+ *
478
+ * Usually only values that can be expressed as int32_t are supported.
479
+ *
480
+ * uint64_t
481
+ */
482
+ CUBLASLT_MATRIX_LAYOUT_ROWS = 2,
483
+
484
+ /** Number of columns.
485
+ *
486
+ * Usually only values that can be expressed as int32_t are supported.
487
+ *
488
+ * uint64_t
489
+ */
490
+ CUBLASLT_MATRIX_LAYOUT_COLS = 3,
491
+
492
+ /** Matrix leading dimension.
493
+ *
494
+ * For CUBLASLT_ORDER_COL this is stride (in elements) of matrix column, for more details and documentation for
495
+ * other memory orders see documentation for cublasLtOrder_t values.
496
+ *
497
+ * Currently only non-negative values are supported, must be large enough so that matrix memory locations are not
498
+ * overlapping (e.g. greater or equal to CUBLASLT_MATRIX_LAYOUT_ROWS in case of CUBLASLT_ORDER_COL).
499
+ *
500
+ * int64_t;
501
+ */
502
+ CUBLASLT_MATRIX_LAYOUT_LD = 4,
503
+
504
+ /** Number of matmul operations to perform in the batch.
505
+ *
506
+ * See also CUBLASLT_ALGO_CAP_STRIDED_BATCH_SUPPORT
507
+ *
508
+ * int32_t, default: 1
509
+ */
510
+ CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT = 5,
511
+
512
+ /** Stride (in elements) to the next matrix for strided batch operation.
513
+ *
514
+ * When matrix type is planar-complex (CUBLASLT_MATRIX_LAYOUT_PLANE_OFFSET != 0), batch stride
515
+ * is interpreted by cublasLtMatmul() in number of real valued sub-elements. E.g. for data of type CUDA_C_16F,
516
+ * offset of 1024B is encoded as a stride of value 512 (since each element of the real and imaginary matrices
517
+ * is a 2B (16bit) floating point type).
518
+ *
519
+ * NOTE: A bug in cublasLtMatrixTransform() causes it to interpret the batch stride for a planar-complex matrix
520
+ * as if it was specified in number of complex elements. Therefore an offset of 1024B must be encoded as stride
521
+ * value 256 when calling cublasLtMatrixTransform() (each complex element is 4B with real and imaginary values 2B
522
+ * each). This behavior is expected to be corrected in the next major cuBLAS version.
523
+ *
524
+ * int64_t, default: 0
525
+ */
526
+ CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET = 6,
527
+
528
+ /** Stride (in bytes) to the imaginary plane for planar complex layout.
529
+ *
530
+ * int64_t, default: 0 - 0 means that layout is regular (real and imaginary parts of complex numbers are interleaved
531
+ * in memory in each element)
532
+ */
533
+ CUBLASLT_MATRIX_LAYOUT_PLANE_OFFSET = 7,
534
+ } cublasLtMatrixLayoutAttribute_t;
535
+
536
+ /** Internal. Do not use directly.
537
+ */
538
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutInit_internal( //
539
+ cublasLtMatrixLayout_t matLayout,
540
+ size_t size,
541
+ cudaDataType type,
542
+ uint64_t rows,
543
+ uint64_t cols,
544
+ int64_t ld);
545
+
546
+ /** Initialize matrix layout descriptor in pre-allocated space.
547
+ *
548
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if size of the pre-allocated space is insufficient
549
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
550
+ */
551
+ static inline cublasStatus_t cublasLtMatrixLayoutInit(
552
+ cublasLtMatrixLayout_t matLayout, cudaDataType type, uint64_t rows, uint64_t cols, int64_t ld) {
553
+ return cublasLtMatrixLayoutInit_internal(matLayout, sizeof(*matLayout), type, rows, cols, ld);
554
+ }
555
+
556
+ /** Create new matrix layout descriptor.
557
+ *
558
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if memory could not be allocated
559
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
560
+ */
561
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutCreate( //
562
+ cublasLtMatrixLayout_t* matLayout,
563
+ cudaDataType type,
564
+ uint64_t rows,
565
+ uint64_t cols,
566
+ int64_t ld);
567
+
568
+ /** Destroy matrix layout descriptor.
569
+ *
570
+ * \retval CUBLAS_STATUS_SUCCESS if operation was successful
571
+ */
572
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutDestroy(cublasLtMatrixLayout_t matLayout);
573
+
574
+ /** Set matrix layout descriptor attribute.
575
+ *
576
+ * \param[in] matLayout The descriptor
577
+ * \param[in] attr The attribute
578
+ * \param[in] buf memory address containing the new value
579
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
580
+ *
581
+ * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for
582
+ * selected attribute
583
+ * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully
584
+ */
585
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutSetAttribute( //
586
+ cublasLtMatrixLayout_t matLayout,
587
+ cublasLtMatrixLayoutAttribute_t attr,
588
+ const void* buf,
589
+ size_t sizeInBytes);
590
+
591
+ /** Get matrix layout descriptor attribute.
592
+ *
593
+ * \param[in] matLayout The descriptor
594
+ * \param[in] attr The attribute
595
+ * \param[out] buf memory address containing the new value
596
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
597
+ * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of
598
+ * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents
599
+ *
600
+ * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero
601
+ * and buf is NULL or sizeInBytes doesn't match size of internal storage for
602
+ * selected attribute
603
+ * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory
604
+ */
605
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutGetAttribute( //
606
+ cublasLtMatrixLayout_t matLayout,
607
+ cublasLtMatrixLayoutAttribute_t attr,
608
+ void* buf,
609
+ size_t sizeInBytes,
610
+ size_t* sizeWritten);
611
+
612
+ /* ---------------------------------------------------------------------------------------*/
613
+ /* Helper functions for cublasLtMatmulDesc_t */
614
+ /* ---------------------------------------------------------------------------------------*/
615
+
616
+ /** Matmul descriptor attributes to define details of the operation. */
617
+ typedef enum {
618
+ /** Compute type, see cudaDataType. Defines data type used for multiply and accumulate operations and the
619
+ * accumulator during matrix multiplication.
620
+ *
621
+ * int32_t
622
+ */
623
+ CUBLASLT_MATMUL_DESC_COMPUTE_TYPE = 0,
624
+
625
+ /** Scale type, see cudaDataType. Defines data type of alpha and beta. Accumulator and value from matrix C are
626
+ * typically converted to scale type before final scaling. Value is then converted from scale type to type of matrix
627
+ * D before being stored in memory.
628
+ *
629
+ * int32_t, default: same as CUBLASLT_MATMUL_DESC_COMPUTE_TYPE
630
+ */
631
+ CUBLASLT_MATMUL_DESC_SCALE_TYPE = 1,
632
+
633
+ /** Pointer mode of alpha and beta, see cublasLtPointerMode_t. When CUBLASLT_POINTER_MODE_DEVICE_VECTOR is in use,
634
+ * alpha/beta vector lenghts must match number of output matrix rows.
635
+ *
636
+ * int32_t, default: CUBLASLT_POINTER_MODE_HOST
637
+ */
638
+ CUBLASLT_MATMUL_DESC_POINTER_MODE = 2,
639
+
640
+ /** Transform of matrix A, see cublasOperation_t.
641
+ *
642
+ * int32_t, default: CUBLAS_OP_N
643
+ */
644
+ CUBLASLT_MATMUL_DESC_TRANSA = 3,
645
+
646
+ /** Transform of matrix B, see cublasOperation_t.
647
+ *
648
+ * int32_t, default: CUBLAS_OP_N
649
+ */
650
+ CUBLASLT_MATMUL_DESC_TRANSB = 4,
651
+
652
+ /** Transform of matrix C, see cublasOperation_t.
653
+ *
654
+ * Currently only CUBLAS_OP_N is supported.
655
+ *
656
+ * int32_t, default: CUBLAS_OP_N
657
+ */
658
+ CUBLASLT_MATMUL_DESC_TRANSC = 5,
659
+
660
+ /** Matrix fill mode, see cublasFillMode_t.
661
+ *
662
+ * int32_t, default: CUBLAS_FILL_MODE_FULL
663
+ */
664
+ CUBLASLT_MATMUL_DESC_FILL_MODE = 6,
665
+
666
+ /** Epilogue function, see cublasLtEpilogue_t.
667
+ *
668
+ * uint32_t, default: CUBLASLT_EPILOGUE_DEFAULT
669
+ */
670
+ CUBLASLT_MATMUL_DESC_EPILOGUE = 7,
671
+
672
+ /** Bias or bias gradient vector pointer in the device memory.
673
+ *
674
+ * Bias case. See CUBLASLT_EPILOGUE_BIAS.
675
+ * For bias data type see CUBLASLT_MATMUL_DESC_BIAS_DATA_TYPE.
676
+ *
677
+ * Bias vector length must match matrix D rows count.
678
+ *
679
+ * Bias gradient case. See CUBLASLT_EPILOGUE_DRELU_BGRAD and CUBLASLT_EPILOGUE_DGELU_BGRAD.
680
+ * Bias gradient vector elements are the same type as the output elements
681
+ * (Ctype) with the exception of IMMA kernels (see above).
682
+ *
683
+ * Routines that don't dereference this pointer, like cublasLtMatmulAlgoGetHeuristic()
684
+ * depend on its value to determine expected pointer alignment.
685
+ *
686
+ * Bias case: const void *, default: NULL
687
+ * Bias gradient case: void *, default: NULL
688
+ */
689
+ CUBLASLT_MATMUL_DESC_BIAS_POINTER = 8,
690
+
691
+ /** Batch stride for bias or bias gradient vector.
692
+ *
693
+ * Used together with CUBLASLT_MATMUL_DESC_BIAS_POINTER when matrix D's CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT > 1.
694
+ *
695
+ * int64_t, default: 0
696
+ */
697
+ CUBLASLT_MATMUL_DESC_BIAS_BATCH_STRIDE = 10,
698
+
699
+ /** Pointer for epilogue auxiliary buffer.
700
+ *
701
+ * - Output vector for ReLu bit-mask in forward pass when CUBLASLT_EPILOGUE_RELU_AUX
702
+ * or CUBLASLT_EPILOGUE_RELU_AUX_BIAS epilogue is used.
703
+ * - Input vector for ReLu bit-mask in backward pass when
704
+ * CUBLASLT_EPILOGUE_DRELU_BGRAD epilogue is used.
705
+ *
706
+ * - Output of GELU input matrix in forward pass when
707
+ * CUBLASLT_EPILOGUE_GELU_AUX_BIAS epilogue is used.
708
+ * - Input of GELU input matrix for backward pass when
709
+ * CUBLASLT_EPILOGUE_DGELU_BGRAD epilogue is used.
710
+ *
711
+ * For aux data type see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_DATA_TYPE.
712
+ *
713
+ * Routines that don't dereference this pointer, like cublasLtMatmulAlgoGetHeuristic()
714
+ * depend on its value to determine expected pointer alignment.
715
+ *
716
+ * Requires setting CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD attribute.
717
+ *
718
+ * Forward pass: void *, default: NULL
719
+ * Backward pass: const void *, default: NULL
720
+ */
721
+ CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER = 11,
722
+
723
+ /** Leading dimension for epilogue auxiliary buffer.
724
+ *
725
+ * - ReLu bit-mask matrix leading dimension in elements (i.e. bits)
726
+ * when CUBLASLT_EPILOGUE_RELU_AUX, CUBLASLT_EPILOGUE_RELU_AUX_BIAS or CUBLASLT_EPILOGUE_DRELU_BGRAD epilogue is
727
+ * used. Must be divisible by 128 and be no less than the number of rows in the output matrix.
728
+ *
729
+ * - GELU input matrix leading dimension in elements
730
+ * when CUBLASLT_EPILOGUE_GELU_AUX_BIAS or CUBLASLT_EPILOGUE_DGELU_BGRAD epilogue used.
731
+ * Must be divisible by 8 and be no less than the number of rows in the output matrix.
732
+ *
733
+ * int64_t, default: 0
734
+ */
735
+ CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD = 12,
736
+
737
+ /** Batch stride for epilogue auxiliary buffer.
738
+ *
739
+ * - ReLu bit-mask matrix batch stride in elements (i.e. bits)
740
+ * when CUBLASLT_EPILOGUE_RELU_AUX, CUBLASLT_EPILOGUE_RELU_AUX_BIAS or CUBLASLT_EPILOGUE_DRELU_BGRAD epilogue is
741
+ * used. Must be divisible by 128.
742
+ *
743
+ * - GELU input matrix batch stride in elements
744
+ * when CUBLASLT_EPILOGUE_GELU_AUX_BIAS or CUBLASLT_EPILOGUE_DGELU_BGRAD epilogue used.
745
+ * Must be divisible by 8.
746
+ *
747
+ * int64_t, default: 0
748
+ */
749
+ CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_BATCH_STRIDE = 13,
750
+
751
+ /** Batch stride for alpha vector.
752
+ *
753
+ * Used together with CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_HOST when matrix D's
754
+ * CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT > 1. If CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_ZERO is set then
755
+ * CUBLASLT_MATMUL_DESC_ALPHA_VECTOR_BATCH_STRIDE must be set to 0 as this mode doesnt supported batched alpha vector.
756
+ *
757
+ * int64_t, default: 0
758
+ */
759
+ CUBLASLT_MATMUL_DESC_ALPHA_VECTOR_BATCH_STRIDE = 14,
760
+
761
+ /** Number of SMs to target for parallel execution. Optimizes heuristics for execution on a different number of SMs
762
+ * when user expects a concurrent stream to be using some of the device resources.
763
+ *
764
+ * int32_t, default: 0 - use the number reported by the device.
765
+ */
766
+ CUBLASLT_MATMUL_DESC_SM_COUNT_TARGET = 15,
767
+
768
+ /** Device pointer to the scale factor value that converts data in matrix A to the compute data type range.
769
+ *
770
+ * The scaling factor value must have the same type as the compute type.
771
+ *
772
+ * If not specified, or set to NULL, the scaling factor is assumed to be 1.
773
+ *
774
+ * If set for an unsupported matrix data, scale, and compute type combination, calling cublasLtMatmul()
775
+ * will return CUBLAS_INVALID_VALUE.
776
+ *
777
+ * const void *, default: NULL
778
+ */
779
+ CUBLASLT_MATMUL_DESC_A_SCALE_POINTER = 17,
780
+
781
+ /** Device pointer to the scale factor value to convert data in matrix B to compute data type range.
782
+ *
783
+ * The scaling factor value must have the same type as the compute type.
784
+ *
785
+ * If not specified, or set to NULL, the scaling factor is assumed to be 1.
786
+ *
787
+ * If set for an unsupported matrix data, scale, and compute type combination, calling cublasLtMatmul()
788
+ * will return CUBLAS_INVALID_VALUE.
789
+ *
790
+ * const void *, default: NULL
791
+ */
792
+ CUBLASLT_MATMUL_DESC_B_SCALE_POINTER = 18,
793
+
794
+ /** Device pointer to the scale factor value to convert data in matrix C to compute data type range.
795
+ *
796
+ * The scaling factor value must have the same type as the compute type.
797
+ *
798
+ * If not specified, or set to NULL, the scaling factor is assumed to be 1.
799
+ *
800
+ * If set for an unsupported matrix data, scale, and compute type combination, calling cublasLtMatmul()
801
+ * will return CUBLAS_INVALID_VALUE.
802
+ *
803
+ * const void *, default: NULL
804
+ */
805
+ CUBLASLT_MATMUL_DESC_C_SCALE_POINTER = 19,
806
+
807
+ /** Device pointer to the scale factor value to convert data in matrix D to compute data type range.
808
+ *
809
+ * The scaling factor value must have the same type as the compute type.
810
+ *
811
+ * If not specified, or set to NULL, the scaling factor is assumed to be 1.
812
+ *
813
+ * If set for an unsupported matrix data, scale, and compute type combination, calling cublasLtMatmul()
814
+ * will return CUBLAS_INVALID_VALUE.
815
+ *
816
+ * const void *, default: NULL
817
+ */
818
+ CUBLASLT_MATMUL_DESC_D_SCALE_POINTER = 20,
819
+
820
+ /** Device pointer to the memory location that on completion will be set to the maximum of absolute values in the
821
+ * output matrix.
822
+ *
823
+ * The computed value has the same type as the compute type.
824
+ *
825
+ * If not specified or set to NULL, the maximum absolute value is not computed. If set for an unsupported matrix
826
+ * data, scale, and compute type combination, calling cublasLtMatmul() will return CUBLAS_INVALID_VALUE.
827
+ *
828
+ * void *, default: NULL
829
+ */
830
+ CUBLASLT_MATMUL_DESC_AMAX_D_POINTER = 21,
831
+
832
+ /** Type of the data to be stored to the memory pointed to by CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
833
+ *
834
+ * If unset, the data type defaults to the type of elements of the output matrix with some exceptions, see details
835
+ * below.
836
+ *
837
+ * ReLu uses a bit-mask.
838
+ *
839
+ * GELU input matrix elements type is the same as the type of elements of
840
+ * the output matrix with some exceptions, see details below.
841
+ *
842
+ * For fp8 kernels with output type CUDA_R_8F_E4M3 the aux data type can be CUDA_R_8F_E4M3 or CUDA_R_16F with some
843
+ * restrictions. See https://docs.nvidia.com/cuda/cublas/index.html#cublasLtMatmulDescAttributes_t for more details.
844
+ *
845
+ * If set for an unsupported matrix data, scale, and compute type combination, calling cublasLtMatmul()
846
+ * will return CUBLAS_INVALID_VALUE.
847
+ *
848
+ * int32_t based on cudaDataType, default: -1
849
+ */
850
+ CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_DATA_TYPE = 22,
851
+
852
+ /** Device pointer to the scaling factor value to convert results from compute type data range to storage
853
+ * data range in the auxiliary matrix that is set via CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
854
+ *
855
+ * The scaling factor value must have the same type as the compute type.
856
+ *
857
+ * If not specified, or set to NULL, the scaling factor is assumed to be 1. If set for an unsupported matrix data,
858
+ * scale, and compute type combination, calling cublasLtMatmul() will return CUBLAS_INVALID_VALUE.
859
+ *
860
+ * void *, default: NULL
861
+ */
862
+ CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_SCALE_POINTER = 23,
863
+
864
+ /** Device pointer to the memory location that on completion will be set to the maximum of absolute values in the
865
+ * buffer that is set via CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
866
+ *
867
+ * The computed value has the same type as the compute type.
868
+ *
869
+ * If not specified or set to NULL, the maximum absolute value is not computed. If set for an unsupported matrix
870
+ * data, scale, and compute type combination, calling cublasLtMatmul() will return CUBLAS_INVALID_VALUE.
871
+ *
872
+ * void *, default: NULL
873
+ */
874
+ CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_AMAX_POINTER = 24,
875
+
876
+ /** Flag for managing fp8 fast accumulation mode.
877
+ * When enabled, problem execution might be faster but at the cost of lower accuracy because intermediate results
878
+ * will not periodically be promoted to a higher precision.
879
+ *
880
+ * int8_t, default: 0 - fast accumulation mode is disabled.
881
+ */
882
+ CUBLASLT_MATMUL_DESC_FAST_ACCUM = 25,
883
+
884
+ /** Type of bias or bias gradient vector in the device memory.
885
+ *
886
+ * Bias case: see CUBLASLT_EPILOGUE_BIAS.
887
+ *
888
+ * Bias vector elements are the same type as the elements of output matrix (Dtype) with the following exceptions:
889
+ * - IMMA kernels with computeType=CUDA_R_32I and Ctype=CUDA_R_8I where the bias vector elements
890
+ * are the same type as alpha, beta (CUBLASLT_MATMUL_DESC_SCALE_TYPE=CUDA_R_32F)
891
+ * - fp8 kernels with an output type of CUDA_R_32F, CUDA_R_8F_E4M3 or CUDA_R_8F_E5M2, See
892
+ * https://docs.nvidia.com/cuda/cublas/index.html#cublasLtMatmul for details.
893
+ *
894
+ * int32_t based on cudaDataType, default: -1
895
+ */
896
+ CUBLASLT_MATMUL_DESC_BIAS_DATA_TYPE = 26,
897
+ } cublasLtMatmulDescAttributes_t;
898
+
899
+ /** Internal. Do not use directly.
900
+ */
901
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulDescInit_internal( //
902
+ cublasLtMatmulDesc_t matmulDesc,
903
+ size_t size,
904
+ cublasComputeType_t computeType,
905
+ cudaDataType_t scaleType);
906
+
907
+ /** Initialize matmul operation descriptor in pre-allocated space.
908
+ *
909
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if size of the pre-allocated space is insufficient
910
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was initialized successfully
911
+ */
912
+ static inline cublasStatus_t cublasLtMatmulDescInit( //
913
+ cublasLtMatmulDesc_t matmulDesc,
914
+ cublasComputeType_t computeType,
915
+ cudaDataType_t scaleType) {
916
+ return cublasLtMatmulDescInit_internal(matmulDesc, sizeof(*matmulDesc), computeType, scaleType);
917
+ }
918
+
919
+ /** Create new matmul operation descriptor.
920
+ *
921
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if memory could not be allocated
922
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
923
+ */
924
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulDescCreate(cublasLtMatmulDesc_t* matmulDesc,
925
+ cublasComputeType_t computeType,
926
+ cudaDataType_t scaleType);
927
+
928
+ /** Destroy matmul operation descriptor.
929
+ *
930
+ * \retval CUBLAS_STATUS_SUCCESS if operation was successful
931
+ */
932
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulDescDestroy(cublasLtMatmulDesc_t matmulDesc);
933
+
934
+ /** Set matmul operation descriptor attribute.
935
+ *
936
+ * \param[in] matmulDesc The descriptor
937
+ * \param[in] attr The attribute
938
+ * \param[in] buf memory address containing the new value
939
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
940
+ *
941
+ * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for
942
+ * selected attribute
943
+ * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully
944
+ */
945
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulDescSetAttribute( //
946
+ cublasLtMatmulDesc_t matmulDesc,
947
+ cublasLtMatmulDescAttributes_t attr,
948
+ const void* buf,
949
+ size_t sizeInBytes);
950
+
951
+ /** Get matmul operation descriptor attribute.
952
+ *
953
+ * \param[in] matmulDesc The descriptor
954
+ * \param[in] attr The attribute
955
+ * \param[out] buf memory address containing the new value
956
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
957
+ * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of
958
+ * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents
959
+ *
960
+ * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero
961
+ * and buf is NULL or sizeInBytes doesn't match size of internal storage for
962
+ * selected attribute
963
+ * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory
964
+ */
965
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulDescGetAttribute( //
966
+ cublasLtMatmulDesc_t matmulDesc,
967
+ cublasLtMatmulDescAttributes_t attr,
968
+ void* buf,
969
+ size_t sizeInBytes,
970
+ size_t* sizeWritten);
971
+
972
+ /* ---------------------------------------------------------------------------------------*/
973
+ /* Helper functions for cublasLtMatrixTransformDesc_t */
974
+ /* ---------------------------------------------------------------------------------------*/
975
+
976
+ /** Matrix transform descriptor attributes to define details of the operation.
977
+ */
978
+ typedef enum {
979
+ /** Scale type, see cudaDataType. Inputs are converted to scale type for scaling and summation and results are then
980
+ * converted to output type to store in memory.
981
+ *
982
+ * int32_t
983
+ */
984
+ CUBLASLT_MATRIX_TRANSFORM_DESC_SCALE_TYPE,
985
+
986
+ /** Pointer mode of alpha and beta, see cublasLtPointerMode_t.
987
+ *
988
+ * int32_t, default: CUBLASLT_POINTER_MODE_HOST
989
+ */
990
+ CUBLASLT_MATRIX_TRANSFORM_DESC_POINTER_MODE,
991
+
992
+ /** Transform of matrix A, see cublasOperation_t.
993
+ *
994
+ * int32_t, default: CUBLAS_OP_N
995
+ */
996
+ CUBLASLT_MATRIX_TRANSFORM_DESC_TRANSA,
997
+
998
+ /** Transform of matrix B, see cublasOperation_t.
999
+ *
1000
+ * int32_t, default: CUBLAS_OP_N
1001
+ */
1002
+ CUBLASLT_MATRIX_TRANSFORM_DESC_TRANSB,
1003
+ } cublasLtMatrixTransformDescAttributes_t;
1004
+
1005
+ /** Internal. Do not use directly.
1006
+ */
1007
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescInit_internal(cublasLtMatrixTransformDesc_t transformDesc,
1008
+ size_t size,
1009
+ cudaDataType scaleType);
1010
+
1011
+ /** Initialize matrix transform operation descriptor in pre-allocated space.
1012
+ *
1013
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if size of the pre-allocated space is insufficient
1014
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
1015
+ */
1016
+ static inline cublasStatus_t cublasLtMatrixTransformDescInit(cublasLtMatrixTransformDesc_t transformDesc,
1017
+ cudaDataType scaleType) {
1018
+ return cublasLtMatrixTransformDescInit_internal(transformDesc, sizeof(*transformDesc), scaleType);
1019
+ }
1020
+
1021
+ /** Create new matrix transform operation descriptor.
1022
+ *
1023
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if memory could not be allocated
1024
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
1025
+ */
1026
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescCreate(cublasLtMatrixTransformDesc_t* transformDesc,
1027
+ cudaDataType scaleType);
1028
+
1029
+ /** Destroy matrix transform operation descriptor.
1030
+ *
1031
+ * \retval CUBLAS_STATUS_SUCCESS if operation was successful
1032
+ */
1033
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescDestroy(cublasLtMatrixTransformDesc_t transformDesc);
1034
+
1035
+ /** Set matrix transform operation descriptor attribute.
1036
+ *
1037
+ * \param[in] transformDesc The descriptor
1038
+ * \param[in] attr The attribute
1039
+ * \param[in] buf memory address containing the new value
1040
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1041
+ *
1042
+ * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for
1043
+ * selected attribute
1044
+ * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully
1045
+ */
1046
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescSetAttribute( //
1047
+ cublasLtMatrixTransformDesc_t transformDesc,
1048
+ cublasLtMatrixTransformDescAttributes_t attr,
1049
+ const void* buf,
1050
+ size_t sizeInBytes);
1051
+
1052
+ /** Get matrix transform operation descriptor attribute.
1053
+ *
1054
+ * \param[in] transformDesc The descriptor
1055
+ * \param[in] attr The attribute
1056
+ * \param[out] buf memory address containing the new value
1057
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1058
+ * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number
1059
+ * of bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents
1060
+ *
1061
+ * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero
1062
+ * and buf is NULL or sizeInBytes doesn't match size of internal storage for
1063
+ * selected attribute
1064
+ * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory
1065
+ */
1066
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescGetAttribute( //
1067
+ cublasLtMatrixTransformDesc_t transformDesc,
1068
+ cublasLtMatrixTransformDescAttributes_t attr,
1069
+ void* buf,
1070
+ size_t sizeInBytes,
1071
+ size_t* sizeWritten);
1072
+
1073
+ /** Reduction scheme for portions of the dot-product calculated in parallel (a. k. a. "split - K").
1074
+ */
1075
+ typedef enum {
1076
+ /** No reduction scheme, dot-product shall be performed in one sequence.
1077
+ */
1078
+ CUBLASLT_REDUCTION_SCHEME_NONE = 0,
1079
+
1080
+ /** Reduction is performed "in place" - using the output buffer (and output data type) and counters (in workspace) to
1081
+ * guarantee the sequentiality.
1082
+ */
1083
+ CUBLASLT_REDUCTION_SCHEME_INPLACE = 1,
1084
+
1085
+ /** Intermediate results are stored in compute type in the workspace and reduced in a separate step.
1086
+ */
1087
+ CUBLASLT_REDUCTION_SCHEME_COMPUTE_TYPE = 2,
1088
+
1089
+ /** Intermediate results are stored in output type in the workspace and reduced in a separate step.
1090
+ */
1091
+ CUBLASLT_REDUCTION_SCHEME_OUTPUT_TYPE = 4,
1092
+
1093
+ CUBLASLT_REDUCTION_SCHEME_MASK = 0x7,
1094
+ } cublasLtReductionScheme_t;
1095
+
1096
+ /** Postprocessing options for the epilogue
1097
+ */
1098
+ typedef enum {
1099
+ /** No special postprocessing, just scale and quantize results if necessary.
1100
+ */
1101
+ CUBLASLT_EPILOGUE_DEFAULT = 1,
1102
+
1103
+ /** ReLu, apply ReLu point-wise transform to the results (x:=max(x, 0)).
1104
+ */
1105
+ CUBLASLT_EPILOGUE_RELU = 2,
1106
+
1107
+ /** ReLu, apply ReLu point-wise transform to the results (x:=max(x, 0)).
1108
+ *
1109
+ * This epilogue mode produces an extra output, a ReLu bit-mask matrix,
1110
+ * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
1111
+ */
1112
+ CUBLASLT_EPILOGUE_RELU_AUX = (CUBLASLT_EPILOGUE_RELU | 128),
1113
+
1114
+ /** Bias, apply (broadcasted) Bias from bias vector. Bias vector length must match matrix D rows, it must be packed
1115
+ * (stride between vector elements is 1). Bias vector is broadcasted to all columns and added before applying final
1116
+ * postprocessing.
1117
+ */
1118
+ CUBLASLT_EPILOGUE_BIAS = 4,
1119
+
1120
+ /** ReLu and Bias, apply Bias and then ReLu transform
1121
+ */
1122
+ CUBLASLT_EPILOGUE_RELU_BIAS = (CUBLASLT_EPILOGUE_RELU | CUBLASLT_EPILOGUE_BIAS),
1123
+
1124
+ /** ReLu and Bias, apply Bias and then ReLu transform
1125
+ *
1126
+ * This epilogue mode produces an extra output, a ReLu bit-mask matrix,
1127
+ * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
1128
+ */
1129
+ CUBLASLT_EPILOGUE_RELU_AUX_BIAS = (CUBLASLT_EPILOGUE_RELU_AUX | CUBLASLT_EPILOGUE_BIAS),
1130
+
1131
+ /* ReLu gradient. Apply ReLu gradient to matmul output. Store ReLu gradient in the output matrix.
1132
+ *
1133
+ * This epilogue mode requires an extra input,
1134
+ * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
1135
+ */
1136
+ CUBLASLT_EPILOGUE_DRELU = 8 | 128,
1137
+
1138
+ /* ReLu and Bias gradients. Apply independently ReLu and Bias gradient to
1139
+ * matmul output. Store ReLu gradient in the output matrix, and Bias gradient
1140
+ * in the auxiliary output (see CUBLASLT_MATMUL_DESC_BIAS_POINTER).
1141
+ *
1142
+ * This epilogue mode requires an extra input,
1143
+ * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
1144
+ */
1145
+ CUBLASLT_EPILOGUE_DRELU_BGRAD = CUBLASLT_EPILOGUE_DRELU | 16,
1146
+
1147
+ /** GELU, apply GELU point-wise transform to the results (x:=GELU(x)).
1148
+ */
1149
+ CUBLASLT_EPILOGUE_GELU = 32,
1150
+
1151
+ /** GELU, apply GELU point-wise transform to the results (x:=GELU(x)).
1152
+ *
1153
+ * This epilogue mode outputs GELU input as a separate matrix (useful for training).
1154
+ * See CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
1155
+ */
1156
+ CUBLASLT_EPILOGUE_GELU_AUX = (CUBLASLT_EPILOGUE_GELU | 128),
1157
+
1158
+ /** GELU and Bias, apply Bias and then GELU transform
1159
+ */
1160
+ CUBLASLT_EPILOGUE_GELU_BIAS = (CUBLASLT_EPILOGUE_GELU | CUBLASLT_EPILOGUE_BIAS),
1161
+
1162
+ /** GELU and Bias, apply Bias and then GELU transform
1163
+ *
1164
+ * This epilogue mode outputs GELU input as a separate matrix (useful for training).
1165
+ * See CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
1166
+ */
1167
+ CUBLASLT_EPILOGUE_GELU_AUX_BIAS = (CUBLASLT_EPILOGUE_GELU_AUX | CUBLASLT_EPILOGUE_BIAS),
1168
+
1169
+ /* GELU gradient. Apply GELU gradient to matmul output. Store GELU gradient in the output matrix.
1170
+ *
1171
+ * This epilogue mode requires an extra input,
1172
+ * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
1173
+ */
1174
+ CUBLASLT_EPILOGUE_DGELU = 64 | 128,
1175
+
1176
+ /* GELU and Bias gradients. Apply independently GELU and Bias gradient to
1177
+ * matmul output. Store GELU gradient in the output matrix, and Bias gradient
1178
+ * in the auxiliary output (see CUBLASLT_MATMUL_DESC_BIAS_POINTER).
1179
+ *
1180
+ * This epilogue mode requires an extra input,
1181
+ * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
1182
+ */
1183
+ CUBLASLT_EPILOGUE_DGELU_BGRAD = CUBLASLT_EPILOGUE_DGELU | 16,
1184
+
1185
+ /** Bias gradient based on the input matrix A.
1186
+ *
1187
+ * The bias size corresponds to the number of rows of the matrix D.
1188
+ * The reduction happens over the GEMM's "k" dimension.
1189
+ *
1190
+ * Stores Bias gradient in the auxiliary output
1191
+ * (see CUBLASLT_MATMUL_DESC_BIAS_POINTER).
1192
+ */
1193
+ CUBLASLT_EPILOGUE_BGRADA = 256,
1194
+
1195
+ /** Bias gradient based on the input matrix B.
1196
+ *
1197
+ * The bias size corresponds to the number of columns of the matrix D.
1198
+ * The reduction happens over the GEMM's "k" dimension.
1199
+ *
1200
+ * Stores Bias gradient in the auxiliary output
1201
+ * (see CUBLASLT_MATMUL_DESC_BIAS_POINTER).
1202
+ */
1203
+ CUBLASLT_EPILOGUE_BGRADB = 512,
1204
+ } cublasLtEpilogue_t;
1205
+
1206
+ /** Matmul heuristic search mode
1207
+ */
1208
+ typedef enum {
1209
+ /** ask heuristics for best algo for given usecase
1210
+ */
1211
+ CUBLASLT_SEARCH_BEST_FIT = 0,
1212
+ /** only try to find best config for preconfigured algo id
1213
+ */
1214
+ CUBLASLT_SEARCH_LIMITED_BY_ALGO_ID = 1,
1215
+ /** reserved for future use
1216
+ */
1217
+ CUBLASLT_SEARCH_RESERVED_02 = 2,
1218
+ /** reserved for future use
1219
+ */
1220
+ CUBLASLT_SEARCH_RESERVED_03 = 3,
1221
+ /** reserved for future use
1222
+ */
1223
+ CUBLASLT_SEARCH_RESERVED_04 = 4,
1224
+ /** reserved for future use
1225
+ */
1226
+ CUBLASLT_SEARCH_RESERVED_05 = 5,
1227
+ } cublasLtMatmulSearch_t;
1228
+
1229
+ /** Algo search preference to fine tune the heuristic function. */
1230
+ typedef enum {
1231
+ /** Search mode, see cublasLtMatmulSearch_t.
1232
+ *
1233
+ * uint32_t, default: CUBLASLT_SEARCH_BEST_FIT
1234
+ */
1235
+ CUBLASLT_MATMUL_PREF_SEARCH_MODE = 0,
1236
+
1237
+ /** Maximum allowed workspace size in bytes.
1238
+ *
1239
+ * uint64_t, default: 0 - no workspace allowed
1240
+ */
1241
+ CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES = 1,
1242
+
1243
+ /** Reduction scheme mask, see cublasLtReductionScheme_t. Filters heuristic result to only include algo configs that
1244
+ * use one of the required modes.
1245
+ *
1246
+ * E.g. mask value of 0x03 will allow only INPLACE and COMPUTE_TYPE reduction schemes.
1247
+ *
1248
+ * uint32_t, default: CUBLASLT_REDUCTION_SCHEME_MASK (allows all reduction schemes)
1249
+ */
1250
+ CUBLASLT_MATMUL_PREF_REDUCTION_SCHEME_MASK = 3,
1251
+
1252
+ /** Minimum buffer alignment for matrix A (in bytes).
1253
+ *
1254
+ * Selecting a smaller value will exclude algorithms that can not work with matrix A that is not as strictly aligned
1255
+ * as they need.
1256
+ *
1257
+ * uint32_t, default: 256
1258
+ */
1259
+ CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_A_BYTES = 5,
1260
+
1261
+ /** Minimum buffer alignment for matrix B (in bytes).
1262
+ *
1263
+ * Selecting a smaller value will exclude algorithms that can not work with matrix B that is not as strictly aligned
1264
+ * as they need.
1265
+ *
1266
+ * uint32_t, default: 256
1267
+ */
1268
+ CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_B_BYTES = 6,
1269
+
1270
+ /** Minimum buffer alignment for matrix C (in bytes).
1271
+ *
1272
+ * Selecting a smaller value will exclude algorithms that can not work with matrix C that is not as strictly aligned
1273
+ * as they need.
1274
+ *
1275
+ * uint32_t, default: 256
1276
+ */
1277
+ CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_C_BYTES = 7,
1278
+
1279
+ /** Minimum buffer alignment for matrix D (in bytes).
1280
+ *
1281
+ * Selecting a smaller value will exclude algorithms that can not work with matrix D that is not as strictly aligned
1282
+ * as they need.
1283
+ *
1284
+ * uint32_t, default: 256
1285
+ */
1286
+ CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_D_BYTES = 8,
1287
+
1288
+ /** Maximum wave count.
1289
+ *
1290
+ * See cublasLtMatmulHeuristicResult_t::wavesCount.
1291
+ *
1292
+ * Selecting a non-zero value will exclude algorithms that report device utilization higher than specified.
1293
+ *
1294
+ * float, default: 0.0f
1295
+ */
1296
+ CUBLASLT_MATMUL_PREF_MAX_WAVES_COUNT = 9,
1297
+
1298
+ /** Numerical implementation details mask, see cublasLtNumericalImplFlags_t. Filters heuristic result to only include
1299
+ * algorithms that use the allowed implementations.
1300
+ *
1301
+ * uint64_t, default: uint64_t(-1) (allow everything)
1302
+ */
1303
+ CUBLASLT_MATMUL_PREF_IMPL_MASK = 12,
1304
+ } cublasLtMatmulPreferenceAttributes_t;
1305
+
1306
+ /** Internal. Do not use directly.
1307
+ */
1308
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceInit_internal(cublasLtMatmulPreference_t pref, size_t size);
1309
+
1310
+ /** Initialize matmul heuristic search preference descriptor in pre-allocated space.
1311
+ *
1312
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if size of the pre-allocated space is insufficient
1313
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
1314
+ */
1315
+ static inline cublasStatus_t cublasLtMatmulPreferenceInit(cublasLtMatmulPreference_t pref) {
1316
+ return cublasLtMatmulPreferenceInit_internal(pref, sizeof(*pref));
1317
+ }
1318
+
1319
+ /** Create new matmul heuristic search preference descriptor.
1320
+ *
1321
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if memory could not be allocated
1322
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
1323
+ */
1324
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceCreate(cublasLtMatmulPreference_t* pref);
1325
+
1326
+ /** Destroy matmul heuristic search preference descriptor.
1327
+ *
1328
+ * \retval CUBLAS_STATUS_SUCCESS if operation was successful
1329
+ */
1330
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceDestroy(cublasLtMatmulPreference_t pref);
1331
+
1332
+ /** Set matmul heuristic search preference descriptor attribute.
1333
+ *
1334
+ * \param[in] pref The descriptor
1335
+ * \param[in] attr The attribute
1336
+ * \param[in] buf memory address containing the new value
1337
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1338
+ *
1339
+ * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for
1340
+ * selected attribute
1341
+ * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully
1342
+ */
1343
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceSetAttribute( //
1344
+ cublasLtMatmulPreference_t pref,
1345
+ cublasLtMatmulPreferenceAttributes_t attr,
1346
+ const void* buf,
1347
+ size_t sizeInBytes);
1348
+
1349
+ /** Get matmul heuristic search preference descriptor attribute.
1350
+ *
1351
+ * \param[in] pref The descriptor
1352
+ * \param[in] attr The attribute
1353
+ * \param[out] buf memory address containing the new value
1354
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1355
+ * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of
1356
+ * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents
1357
+ *
1358
+ * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero
1359
+ * and buf is NULL or sizeInBytes doesn't match size of internal storage for
1360
+ * selected attribute
1361
+ * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory
1362
+ */
1363
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceGetAttribute( //
1364
+ cublasLtMatmulPreference_t pref,
1365
+ cublasLtMatmulPreferenceAttributes_t attr,
1366
+ void* buf,
1367
+ size_t sizeInBytes,
1368
+ size_t* sizeWritten);
1369
+
1370
+ /** Results structure used by cublasLtMatmulGetAlgo.
1371
+ *
1372
+ * Holds returned configured algo descriptor and its runtime properties.
1373
+ */
1374
+ typedef struct {
1375
+ /** Matmul algorithm descriptor.
1376
+ *
1377
+ * Must be initialized with cublasLtMatmulAlgoInit() if preferences' CUBLASLT_MATMUL_PERF_SEARCH_MODE is set to
1378
+ * CUBLASLT_SEARCH_LIMITED_BY_ALGO_ID
1379
+ */
1380
+ cublasLtMatmulAlgo_t algo;
1381
+
1382
+ /** Actual size of workspace memory required.
1383
+ */
1384
+ size_t workspaceSize;
1385
+
1386
+ /** Result status, other fields are only valid if after call to cublasLtMatmulAlgoGetHeuristic() this member is set to
1387
+ * CUBLAS_STATUS_SUCCESS.
1388
+ */
1389
+ cublasStatus_t state;
1390
+
1391
+ /** Waves count - a device utilization metric.
1392
+ *
1393
+ * wavesCount value of 1.0f suggests that when kernel is launched it will fully occupy the GPU.
1394
+ */
1395
+ float wavesCount;
1396
+
1397
+ int reserved[4];
1398
+ } cublasLtMatmulHeuristicResult_t;
1399
+
1400
+ /** Query cublasLt heuristic for algorithm appropriate for given use case.
1401
+ *
1402
+ * \param[in] lightHandle Pointer to the allocated cuBLASLt handle for the cuBLASLt
1403
+ * context. See cublasLtHandle_t.
1404
+ * \param[in] operationDesc Handle to the matrix multiplication descriptor.
1405
+ * \param[in] Adesc Handle to the layout descriptors for matrix A.
1406
+ * \param[in] Bdesc Handle to the layout descriptors for matrix B.
1407
+ * \param[in] Cdesc Handle to the layout descriptors for matrix C.
1408
+ * \param[in] Ddesc Handle to the layout descriptors for matrix D.
1409
+ * \param[in] preference Pointer to the structure holding the heuristic search
1410
+ * preferences descriptor. See cublasLtMatrixLayout_t.
1411
+ * \param[in] requestedAlgoCount Size of heuristicResultsArray (in elements) and requested
1412
+ * maximum number of algorithms to return.
1413
+ * \param[in, out] heuristicResultsArray Output algorithms and associated runtime characteristics,
1414
+ * ordered in increasing estimated compute time.
1415
+ * \param[out] returnAlgoCount The number of heuristicResultsArray elements written.
1416
+ *
1417
+ * \retval CUBLAS_STATUS_INVALID_VALUE if requestedAlgoCount is less or equal to zero
1418
+ * \retval CUBLAS_STATUS_NOT_SUPPORTED if no heuristic function available for current configuration
1419
+ * \retval CUBLAS_STATUS_SUCCESS if query was successful, inspect
1420
+ * heuristicResultsArray[0 to (returnAlgoCount - 1)].state
1421
+ * for detail status of results
1422
+ */
1423
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoGetHeuristic(cublasLtHandle_t lightHandle,
1424
+ cublasLtMatmulDesc_t operationDesc,
1425
+ cublasLtMatrixLayout_t Adesc,
1426
+ cublasLtMatrixLayout_t Bdesc,
1427
+ cublasLtMatrixLayout_t Cdesc,
1428
+ cublasLtMatrixLayout_t Ddesc,
1429
+ cublasLtMatmulPreference_t preference,
1430
+ int requestedAlgoCount,
1431
+ cublasLtMatmulHeuristicResult_t heuristicResultsArray[],
1432
+ int* returnAlgoCount);
1433
+
1434
+ /* ---------------------------------------------------------------------------------------*/
1435
+ /* Lower level API to be able to implement own Heuristic and Find routines */
1436
+ /* ---------------------------------------------------------------------------------------*/
1437
+
1438
+ /** Routine to get all algo IDs that can potentially run
1439
+ *
1440
+ * \param[in] int requestedAlgoCount requested number of algos (must be less or equal to size of algoIdsA
1441
+ * (in elements)) \param[out] algoIdsA array to write algoIds to \param[out] returnAlgoCount number of algoIds
1442
+ * actually written
1443
+ *
1444
+ * \retval CUBLAS_STATUS_INVALID_VALUE if requestedAlgoCount is less or equal to zero
1445
+ * \retval CUBLAS_STATUS_SUCCESS if query was successful, inspect returnAlgoCount to get actual number of IDs
1446
+ * available
1447
+ */
1448
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoGetIds(cublasLtHandle_t lightHandle,
1449
+ cublasComputeType_t computeType,
1450
+ cudaDataType_t scaleType,
1451
+ cudaDataType_t Atype,
1452
+ cudaDataType_t Btype,
1453
+ cudaDataType_t Ctype,
1454
+ cudaDataType_t Dtype,
1455
+ int requestedAlgoCount,
1456
+ int algoIdsArray[],
1457
+ int* returnAlgoCount);
1458
+
1459
+ /** Initialize algo structure
1460
+ *
1461
+ * \retval CUBLAS_STATUS_INVALID_VALUE if algo is NULL or algoId is outside of recognized range
1462
+ * \retval CUBLAS_STATUS_NOT_SUPPORTED if algoId is not supported for given combination of data types
1463
+ * \retval CUBLAS_STATUS_SUCCESS if the structure was successfully initialized
1464
+ */
1465
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoInit(cublasLtHandle_t lightHandle,
1466
+ cublasComputeType_t computeType,
1467
+ cudaDataType_t scaleType,
1468
+ cudaDataType_t Atype,
1469
+ cudaDataType_t Btype,
1470
+ cudaDataType_t Ctype,
1471
+ cudaDataType_t Dtype,
1472
+ int algoId,
1473
+ cublasLtMatmulAlgo_t* algo);
1474
+
1475
+ /** Check configured algo descriptor for correctness and support on current device.
1476
+ *
1477
+ * Result includes required workspace size and calculated wave count.
1478
+ *
1479
+ * CUBLAS_STATUS_SUCCESS doesn't fully guarantee algo will run (will fail if e.g. buffers are not correctly aligned);
1480
+ * but if cublasLtMatmulAlgoCheck fails, the algo will not run.
1481
+ *
1482
+ * \param[in] algo algo configuration to check
1483
+ * \param[out] result result structure to report algo runtime characteristics; algo field is never updated
1484
+ *
1485
+ * \retval CUBLAS_STATUS_INVALID_VALUE if matrix layout descriptors or operation descriptor don't match algo
1486
+ * descriptor
1487
+ * \retval CUBLAS_STATUS_NOT_SUPPORTED if algo configuration or data type combination is not currently supported on
1488
+ * given device
1489
+ * \retval CUBLAS_STATUS_ARCH_MISMATCH if algo configuration cannot be run using the selected device
1490
+ * \retval CUBLAS_STATUS_SUCCESS if check was successful
1491
+ */
1492
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoCheck( //
1493
+ cublasLtHandle_t lightHandle,
1494
+ cublasLtMatmulDesc_t operationDesc,
1495
+ cublasLtMatrixLayout_t Adesc,
1496
+ cublasLtMatrixLayout_t Bdesc,
1497
+ cublasLtMatrixLayout_t Cdesc,
1498
+ cublasLtMatrixLayout_t Ddesc,
1499
+ const cublasLtMatmulAlgo_t* algo, ///< may point to result->algo
1500
+ cublasLtMatmulHeuristicResult_t* result);
1501
+
1502
+ /** Capabilities Attributes that can be retrieved from an initialized Algo structure
1503
+ */
1504
+ typedef enum {
1505
+ /** support for split K, see CUBLASLT_ALGO_CONFIG_SPLITK_NUM
1506
+ *
1507
+ * int32_t, 0 means no support, supported otherwise
1508
+ */
1509
+ CUBLASLT_ALGO_CAP_SPLITK_SUPPORT = 0,
1510
+
1511
+ /** reduction scheme mask, see cublasLtReductionScheme_t; shows supported reduction schemes, if reduction scheme is
1512
+ * not masked out it is supported.
1513
+ *
1514
+ * e.g. int isReductionSchemeComputeTypeSupported ? (reductionSchemeMask & CUBLASLT_REDUCTION_SCHEME_COMPUTE_TYPE) ==
1515
+ * CUBLASLT_REDUCTION_SCHEME_COMPUTE_TYPE ? 1 : 0;
1516
+ *
1517
+ * uint32_t
1518
+ */
1519
+ CUBLASLT_ALGO_CAP_REDUCTION_SCHEME_MASK = 1,
1520
+
1521
+ /** support for cta swizzling, see CUBLASLT_ALGO_CONFIG_CTA_SWIZZLING
1522
+ *
1523
+ * uint32_t, 0 means no support, 1 means supported value of 1, other values are reserved
1524
+ */
1525
+ CUBLASLT_ALGO_CAP_CTA_SWIZZLING_SUPPORT = 2,
1526
+
1527
+ /** support strided batch
1528
+ *
1529
+ * int32_t, 0 means no support, supported otherwise
1530
+ */
1531
+ CUBLASLT_ALGO_CAP_STRIDED_BATCH_SUPPORT = 3,
1532
+
1533
+ /** support results out of place (D != C in D = alpha.A.B + beta.C)
1534
+ *
1535
+ * int32_t, 0 means no support, supported otherwise
1536
+ */
1537
+ CUBLASLT_ALGO_CAP_OUT_OF_PLACE_RESULT_SUPPORT = 4,
1538
+
1539
+ /** syrk/herk support (on top of regular gemm)
1540
+ *
1541
+ * int32_t, 0 means no support, supported otherwise
1542
+ */
1543
+ CUBLASLT_ALGO_CAP_UPLO_SUPPORT = 5,
1544
+
1545
+ /** tile ids possible to use, see cublasLtMatmulTile_t; if no tile ids are supported use
1546
+ * CUBLASLT_MATMUL_TILE_UNDEFINED
1547
+ *
1548
+ * use cublasLtMatmulAlgoCapGetAttribute() with sizeInBytes=0 to query actual count
1549
+ *
1550
+ * array of uint32_t
1551
+ */
1552
+ CUBLASLT_ALGO_CAP_TILE_IDS = 6,
1553
+
1554
+ /** custom option range is from 0 to CUBLASLT_ALGO_CAP_CUSTOM_OPTION_MAX (inclusive), see
1555
+ * CUBLASLT_ALGO_CONFIG_CUSTOM_OPTION
1556
+ *
1557
+ * int32_t
1558
+ */
1559
+ CUBLASLT_ALGO_CAP_CUSTOM_OPTION_MAX = 7,
1560
+
1561
+ /** whether algorithm supports custom (not COL or ROW memory order), see cublasLtOrder_t
1562
+ *
1563
+ * int32_t 0 means only COL and ROW memory order is allowed, non-zero means that algo might have different
1564
+ * requirements;
1565
+ */
1566
+ CUBLASLT_ALGO_CAP_CUSTOM_MEMORY_ORDER = 10,
1567
+
1568
+ /** bitmask enumerating pointer modes algorithm supports
1569
+ *
1570
+ * uint32_t, see cublasLtPointerModeMask_t
1571
+ */
1572
+ CUBLASLT_ALGO_CAP_POINTER_MODE_MASK = 11,
1573
+
1574
+ /** bitmask enumerating kinds of postprocessing algorithm supports in the epilogue
1575
+ *
1576
+ * uint32_t, see cublasLtEpilogue_t
1577
+ */
1578
+ CUBLASLT_ALGO_CAP_EPILOGUE_MASK = 12,
1579
+
1580
+ /** stages ids possible to use, see cublasLtMatmulStages_t; if no stages ids are supported use
1581
+ * CUBLASLT_MATMUL_STAGES_UNDEFINED
1582
+ *
1583
+ * use cublasLtMatmulAlgoCapGetAttribute() with sizeInBytes=0 to query actual count
1584
+ *
1585
+ * array of uint32_t
1586
+ */
1587
+ CUBLASLT_ALGO_CAP_STAGES_IDS = 13,
1588
+
1589
+ /** support for nagative ld for all of the matrices
1590
+ *
1591
+ * int32_t 0 means no support, supported otherwise
1592
+ */
1593
+ CUBLASLT_ALGO_CAP_LD_NEGATIVE = 14,
1594
+
1595
+ /** details about algorithm's implementation that affect it's numerical behavior
1596
+ *
1597
+ * uint64_t, see cublasLtNumericalImplFlags_t
1598
+ */
1599
+ CUBLASLT_ALGO_CAP_NUMERICAL_IMPL_FLAGS = 15,
1600
+
1601
+ /** minimum alignment required for A matrix in bytes
1602
+ * (required for buffer pointer, leading dimension, and possibly other strides defined for matrix memory order)
1603
+ *
1604
+ * uint32_t
1605
+ */
1606
+ CUBLASLT_ALGO_CAP_MIN_ALIGNMENT_A_BYTES = 16,
1607
+
1608
+ /** minimum alignment required for B matrix in bytes
1609
+ * (required for buffer pointer, leading dimension, and possibly other strides defined for matrix memory order)
1610
+ *
1611
+ * uint32_t
1612
+ */
1613
+ CUBLASLT_ALGO_CAP_MIN_ALIGNMENT_B_BYTES = 17,
1614
+
1615
+ /** minimum alignment required for C matrix in bytes
1616
+ * (required for buffer pointer, leading dimension, and possibly other strides defined for matrix memory order)
1617
+ *
1618
+ * uint32_t
1619
+ */
1620
+ CUBLASLT_ALGO_CAP_MIN_ALIGNMENT_C_BYTES = 18,
1621
+
1622
+ /** minimum alignment required for D matrix in bytes
1623
+ * (required for buffer pointer, leading dimension, and possibly other strides defined for matrix memory order)
1624
+ *
1625
+ * uint32_t
1626
+ */
1627
+ CUBLASLT_ALGO_CAP_MIN_ALIGNMENT_D_BYTES = 19,
1628
+ } cublasLtMatmulAlgoCapAttributes_t;
1629
+
1630
+ /** Get algo capability attribute.
1631
+ *
1632
+ * E.g. to get list of supported Tile IDs:
1633
+ * cublasLtMatmulTile_t tiles[CUBLASLT_MATMUL_TILE_END];
1634
+ * size_t num_tiles, size_written;
1635
+ * if (cublasLtMatmulAlgoCapGetAttribute(algo, CUBLASLT_ALGO_CAP_TILE_IDS, tiles, sizeof(tiles), size_written) ==
1636
+ * CUBLAS_STATUS_SUCCESS) { num_tiles = size_written / sizeof(tiles[0]);
1637
+ * }
1638
+ *
1639
+ * \param[in] algo The algo descriptor
1640
+ * \param[in] attr The attribute
1641
+ * \param[out] buf memory address containing the new value
1642
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1643
+ * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of
1644
+ * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents
1645
+ *
1646
+ * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero
1647
+ * and buf is NULL or sizeInBytes doesn't match size of internal storage for
1648
+ * selected attribute
1649
+ * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory
1650
+ */
1651
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoCapGetAttribute(const cublasLtMatmulAlgo_t* algo,
1652
+ cublasLtMatmulAlgoCapAttributes_t attr,
1653
+ void* buf,
1654
+ size_t sizeInBytes,
1655
+ size_t* sizeWritten);
1656
+
1657
+ /** Algo Configuration Attributes that can be set according to the Algo capabilities
1658
+ */
1659
+ typedef enum {
1660
+ /** algorithm index, see cublasLtMatmulAlgoGetIds()
1661
+ *
1662
+ * readonly, set by cublasLtMatmulAlgoInit()
1663
+ * int32_t
1664
+ */
1665
+ CUBLASLT_ALGO_CONFIG_ID = 0,
1666
+ /** tile id, see cublasLtMatmulTile_t
1667
+ *
1668
+ * uint32_t, default: CUBLASLT_MATMUL_TILE_UNDEFINED
1669
+ */
1670
+ CUBLASLT_ALGO_CONFIG_TILE_ID = 1,
1671
+ /** Number of K splits. If the number of K splits is greater than one, SPLITK_NUM parts
1672
+ * of matrix multiplication will be computed in parallel. The results will be accumulated
1673
+ * according to CUBLASLT_ALGO_CONFIG_REDUCTION_SCHEME
1674
+ *
1675
+ * int32_t, default: 1
1676
+ */
1677
+ CUBLASLT_ALGO_CONFIG_SPLITK_NUM = 2,
1678
+ /** reduction scheme, see cublasLtReductionScheme_t
1679
+ *
1680
+ * uint32_t, default: CUBLASLT_REDUCTION_SCHEME_NONE
1681
+ */
1682
+ CUBLASLT_ALGO_CONFIG_REDUCTION_SCHEME = 3,
1683
+ /** cta swizzling, change mapping from CUDA grid coordinates to parts of the matrices
1684
+ *
1685
+ * possible values: 0, 1, other values reserved
1686
+ *
1687
+ * uint32_t, default: 0
1688
+ */
1689
+ CUBLASLT_ALGO_CONFIG_CTA_SWIZZLING = 4,
1690
+ /** custom option, each algorithm can support some custom options that don't fit description of the other config
1691
+ * attributes, see CUBLASLT_ALGO_CAP_CUSTOM_OPTION_MAX to get accepted range for any specific case
1692
+ *
1693
+ * uint32_t, default: 0
1694
+ */
1695
+ CUBLASLT_ALGO_CONFIG_CUSTOM_OPTION = 5,
1696
+ /** stages id, see cublasLtMatmulStages_t
1697
+ *
1698
+ * uint32_t, default: CUBLASLT_MATMUL_STAGES_UNDEFINED
1699
+ */
1700
+ CUBLASLT_ALGO_CONFIG_STAGES_ID = 6,
1701
+ /** inner shape id, see cublasLtMatmulInnerShape_t
1702
+ *
1703
+ * uint16_t, default: 0 (CUBLASLT_MATMUL_INNER_SHAPE_UNDEFINED)
1704
+ */
1705
+ CUBLASLT_ALGO_CONFIG_INNER_SHAPE_ID = 7,
1706
+ /** Thread Block Cluster shape id, see cublasLtClusterShape_t. Defines cluster size to use.
1707
+ *
1708
+ * uint16_t, default: 0 (CUBLASLT_CLUSTER_SHAPE_AUTO)
1709
+ */
1710
+ CUBLASLT_ALGO_CONFIG_CLUSTER_SHAPE_ID = 8,
1711
+ } cublasLtMatmulAlgoConfigAttributes_t;
1712
+
1713
+ /** Set algo configuration attribute.
1714
+ *
1715
+ * \param[in] algo The algo descriptor
1716
+ * \param[in] attr The attribute
1717
+ * \param[in] buf memory address containing the new value
1718
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1719
+ *
1720
+ * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for
1721
+ * selected attribute
1722
+ * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully
1723
+ */
1724
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoConfigSetAttribute(cublasLtMatmulAlgo_t* algo,
1725
+ cublasLtMatmulAlgoConfigAttributes_t attr,
1726
+ const void* buf,
1727
+ size_t sizeInBytes);
1728
+
1729
+ /** Get algo configuration attribute.
1730
+ *
1731
+ * \param[in] algo The algo descriptor
1732
+ * \param[in] attr The attribute
1733
+ * \param[out] buf memory address containing the new value
1734
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1735
+ * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of
1736
+ * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents
1737
+ *
1738
+ * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero
1739
+ * and buf is NULL or sizeInBytes doesn't match size of internal storage for
1740
+ * selected attribute
1741
+ * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory
1742
+ */
1743
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoConfigGetAttribute(const cublasLtMatmulAlgo_t* algo,
1744
+ cublasLtMatmulAlgoConfigAttributes_t attr,
1745
+ void* buf,
1746
+ size_t sizeInBytes,
1747
+ size_t* sizeWritten);
1748
+
1749
+ /** Experimental: Logger callback type.
1750
+ */
1751
+ typedef void (*cublasLtLoggerCallback_t)(int logLevel, const char* functionName, const char* message);
1752
+
1753
+ /** Experimental: Logger callback setter.
1754
+ *
1755
+ * \param[in] callback a user defined callback function to be called by the logger
1756
+ *
1757
+ * \retval CUBLAS_STATUS_SUCCESS if callback was set successfully
1758
+ */
1759
+ cublasStatus_t CUBLASWINAPI cublasLtLoggerSetCallback(cublasLtLoggerCallback_t callback);
1760
+
1761
+ /** Experimental: Log file setter.
1762
+ *
1763
+ * \param[in] file an open file with write permissions
1764
+ *
1765
+ * \retval CUBLAS_STATUS_SUCCESS if log file was set successfully
1766
+ */
1767
+ cublasStatus_t CUBLASWINAPI cublasLtLoggerSetFile(FILE* file);
1768
+
1769
+ /** Experimental: Open log file.
1770
+ *
1771
+ * \param[in] logFile log file path. if the log file does not exist, it will be created
1772
+ *
1773
+ * \retval CUBLAS_STATUS_SUCCESS if log file was created successfully
1774
+ */
1775
+ cublasStatus_t CUBLASWINAPI cublasLtLoggerOpenFile(const char* logFile);
1776
+
1777
+ /** Experimental: Log level setter.
1778
+ *
1779
+ * \param[in] level log level, should be one of the following:
1780
+ * 0. Off
1781
+ * 1. Errors
1782
+ * 2. Performance Trace
1783
+ * 3. Performance Hints
1784
+ * 4. Heuristics Trace
1785
+ * 5. API Trace
1786
+ *
1787
+ * \retval CUBLAS_STATUS_INVALID_VALUE if log level is not one of the above levels
1788
+ *
1789
+ * \retval CUBLAS_STATUS_SUCCESS if log level was set successfully
1790
+ */
1791
+ cublasStatus_t CUBLASWINAPI cublasLtLoggerSetLevel(int level);
1792
+
1793
+ /** Experimental: Log mask setter.
1794
+ *
1795
+ * \param[in] mask log mask, should be a combination of the following masks:
1796
+ * 0. Off
1797
+ * 1. Errors
1798
+ * 2. Performance Trace
1799
+ * 4. Performance Hints
1800
+ * 8. Heuristics Trace
1801
+ * 16. API Trace
1802
+ *
1803
+ * \retval CUBLAS_STATUS_SUCCESS if log mask was set successfully
1804
+ */
1805
+ cublasStatus_t CUBLASWINAPI cublasLtLoggerSetMask(int mask);
1806
+
1807
+ /** Experimental: Disable logging for the entire session.
1808
+ *
1809
+ * \retval CUBLAS_STATUS_SUCCESS if disabled logging
1810
+ */
1811
+ cublasStatus_t CUBLASWINAPI cublasLtLoggerForceDisable();
1812
+
1813
+ #if defined(__cplusplus)
1814
+ }
1815
+ #endif /* __cplusplus */
venv/lib/python3.10/site-packages/nvidia/cublas/include/cublasXt.h ADDED
@@ -0,0 +1,693 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /* cublasXt : Host API, Out of Core and Multi-GPU BLAS Library
51
+
52
+ */
53
+
54
+ #if !defined(CUBLAS_XT_H_)
55
+ #define CUBLAS_XT_H_
56
+
57
+ #include "driver_types.h"
58
+ #include "cuComplex.h" /* import complex data type */
59
+
60
+ #include "cublas_v2.h"
61
+
62
+ #if defined(__cplusplus)
63
+ extern "C" {
64
+ #endif /* __cplusplus */
65
+
66
+ struct cublasXtContext;
67
+ typedef struct cublasXtContext* cublasXtHandle_t;
68
+
69
+ cublasStatus_t CUBLASWINAPI cublasXtCreate(cublasXtHandle_t* handle);
70
+ cublasStatus_t CUBLASWINAPI cublasXtDestroy(cublasXtHandle_t handle);
71
+ cublasStatus_t CUBLASWINAPI cublasXtGetNumBoards(int nbDevices, int deviceId[], int* nbBoards);
72
+ cublasStatus_t CUBLASWINAPI cublasXtMaxBoards(int* nbGpuBoards);
73
+ /* This routine selects the Gpus that the user want to use for CUBLAS-XT */
74
+ cublasStatus_t CUBLASWINAPI cublasXtDeviceSelect(cublasXtHandle_t handle, int nbDevices, int deviceId[]);
75
+
76
+ /* This routine allows to change the dimension of the tiles ( blockDim x blockDim ) */
77
+ cublasStatus_t CUBLASWINAPI cublasXtSetBlockDim(cublasXtHandle_t handle, int blockDim);
78
+ cublasStatus_t CUBLASWINAPI cublasXtGetBlockDim(cublasXtHandle_t handle, int* blockDim);
79
+
80
+ typedef enum { CUBLASXT_PINNING_DISABLED = 0, CUBLASXT_PINNING_ENABLED = 1 } cublasXtPinnedMemMode_t;
81
+ /* This routine allows to CUBLAS-XT to pin the Host memory if it find out that some of the matrix passed
82
+ are not pinned : Pinning/Unpinning the Host memory is still a costly operation
83
+ It is better if the user controls the memory on its own (by pinning/unpinning oly when necessary)
84
+ */
85
+ cublasStatus_t CUBLASWINAPI cublasXtGetPinningMemMode(cublasXtHandle_t handle, cublasXtPinnedMemMode_t* mode);
86
+ cublasStatus_t CUBLASWINAPI cublasXtSetPinningMemMode(cublasXtHandle_t handle, cublasXtPinnedMemMode_t mode);
87
+
88
+ /* This routines is to provide a CPU Blas routines, used for too small sizes or hybrid computation */
89
+ typedef enum {
90
+ CUBLASXT_FLOAT = 0,
91
+ CUBLASXT_DOUBLE = 1,
92
+ CUBLASXT_COMPLEX = 2,
93
+ CUBLASXT_DOUBLECOMPLEX = 3,
94
+ } cublasXtOpType_t;
95
+
96
+ typedef enum {
97
+ CUBLASXT_GEMM = 0,
98
+ CUBLASXT_SYRK = 1,
99
+ CUBLASXT_HERK = 2,
100
+ CUBLASXT_SYMM = 3,
101
+ CUBLASXT_HEMM = 4,
102
+ CUBLASXT_TRSM = 5,
103
+ CUBLASXT_SYR2K = 6,
104
+ CUBLASXT_HER2K = 7,
105
+
106
+ CUBLASXT_SPMM = 8,
107
+ CUBLASXT_SYRKX = 9,
108
+ CUBLASXT_HERKX = 10,
109
+ CUBLASXT_TRMM = 11,
110
+ CUBLASXT_ROUTINE_MAX = 12,
111
+ } cublasXtBlasOp_t;
112
+
113
+ /* Currently only 32-bit integer BLAS routines are supported */
114
+ cublasStatus_t CUBLASWINAPI cublasXtSetCpuRoutine(cublasXtHandle_t handle,
115
+ cublasXtBlasOp_t blasOp,
116
+ cublasXtOpType_t type,
117
+ void* blasFunctor);
118
+
119
+ /* Specified the percentage of work that should done by the CPU, default is 0 (no work) */
120
+ cublasStatus_t CUBLASWINAPI cublasXtSetCpuRatio(cublasXtHandle_t handle,
121
+ cublasXtBlasOp_t blasOp,
122
+ cublasXtOpType_t type,
123
+ float ratio);
124
+
125
+ /* GEMM */
126
+ cublasStatus_t CUBLASWINAPI cublasXtSgemm(cublasXtHandle_t handle,
127
+ cublasOperation_t transa,
128
+ cublasOperation_t transb,
129
+ size_t m,
130
+ size_t n,
131
+ size_t k,
132
+ const float* alpha,
133
+ const float* A,
134
+ size_t lda,
135
+ const float* B,
136
+ size_t ldb,
137
+ const float* beta,
138
+ float* C,
139
+ size_t ldc);
140
+
141
+ cublasStatus_t CUBLASWINAPI cublasXtDgemm(cublasXtHandle_t handle,
142
+ cublasOperation_t transa,
143
+ cublasOperation_t transb,
144
+ size_t m,
145
+ size_t n,
146
+ size_t k,
147
+ const double* alpha,
148
+ const double* A,
149
+ size_t lda,
150
+ const double* B,
151
+ size_t ldb,
152
+ const double* beta,
153
+ double* C,
154
+ size_t ldc);
155
+
156
+ cublasStatus_t CUBLASWINAPI cublasXtCgemm(cublasXtHandle_t handle,
157
+ cublasOperation_t transa,
158
+ cublasOperation_t transb,
159
+ size_t m,
160
+ size_t n,
161
+ size_t k,
162
+ const cuComplex* alpha,
163
+ const cuComplex* A,
164
+ size_t lda,
165
+ const cuComplex* B,
166
+ size_t ldb,
167
+ const cuComplex* beta,
168
+ cuComplex* C,
169
+ size_t ldc);
170
+
171
+ cublasStatus_t CUBLASWINAPI cublasXtZgemm(cublasXtHandle_t handle,
172
+ cublasOperation_t transa,
173
+ cublasOperation_t transb,
174
+ size_t m,
175
+ size_t n,
176
+ size_t k,
177
+ const cuDoubleComplex* alpha,
178
+ const cuDoubleComplex* A,
179
+ size_t lda,
180
+ const cuDoubleComplex* B,
181
+ size_t ldb,
182
+ const cuDoubleComplex* beta,
183
+ cuDoubleComplex* C,
184
+ size_t ldc);
185
+ /* ------------------------------------------------------- */
186
+ /* SYRK */
187
+ cublasStatus_t CUBLASWINAPI cublasXtSsyrk(cublasXtHandle_t handle,
188
+ cublasFillMode_t uplo,
189
+ cublasOperation_t trans,
190
+ size_t n,
191
+ size_t k,
192
+ const float* alpha,
193
+ const float* A,
194
+ size_t lda,
195
+ const float* beta,
196
+ float* C,
197
+ size_t ldc);
198
+
199
+ cublasStatus_t CUBLASWINAPI cublasXtDsyrk(cublasXtHandle_t handle,
200
+ cublasFillMode_t uplo,
201
+ cublasOperation_t trans,
202
+ size_t n,
203
+ size_t k,
204
+ const double* alpha,
205
+ const double* A,
206
+ size_t lda,
207
+ const double* beta,
208
+ double* C,
209
+ size_t ldc);
210
+
211
+ cublasStatus_t CUBLASWINAPI cublasXtCsyrk(cublasXtHandle_t handle,
212
+ cublasFillMode_t uplo,
213
+ cublasOperation_t trans,
214
+ size_t n,
215
+ size_t k,
216
+ const cuComplex* alpha,
217
+ const cuComplex* A,
218
+ size_t lda,
219
+ const cuComplex* beta,
220
+ cuComplex* C,
221
+ size_t ldc);
222
+
223
+ cublasStatus_t CUBLASWINAPI cublasXtZsyrk(cublasXtHandle_t handle,
224
+ cublasFillMode_t uplo,
225
+ cublasOperation_t trans,
226
+ size_t n,
227
+ size_t k,
228
+ const cuDoubleComplex* alpha,
229
+ const cuDoubleComplex* A,
230
+ size_t lda,
231
+ const cuDoubleComplex* beta,
232
+ cuDoubleComplex* C,
233
+ size_t ldc);
234
+ /* -------------------------------------------------------------------- */
235
+ /* HERK */
236
+ cublasStatus_t CUBLASWINAPI cublasXtCherk(cublasXtHandle_t handle,
237
+ cublasFillMode_t uplo,
238
+ cublasOperation_t trans,
239
+ size_t n,
240
+ size_t k,
241
+ const float* alpha,
242
+ const cuComplex* A,
243
+ size_t lda,
244
+ const float* beta,
245
+ cuComplex* C,
246
+ size_t ldc);
247
+
248
+ cublasStatus_t CUBLASWINAPI cublasXtZherk(cublasXtHandle_t handle,
249
+ cublasFillMode_t uplo,
250
+ cublasOperation_t trans,
251
+ size_t n,
252
+ size_t k,
253
+ const double* alpha,
254
+ const cuDoubleComplex* A,
255
+ size_t lda,
256
+ const double* beta,
257
+ cuDoubleComplex* C,
258
+ size_t ldc);
259
+ /* -------------------------------------------------------------------- */
260
+ /* SYR2K */
261
+ cublasStatus_t CUBLASWINAPI cublasXtSsyr2k(cublasXtHandle_t handle,
262
+ cublasFillMode_t uplo,
263
+ cublasOperation_t trans,
264
+ size_t n,
265
+ size_t k,
266
+ const float* alpha,
267
+ const float* A,
268
+ size_t lda,
269
+ const float* B,
270
+ size_t ldb,
271
+ const float* beta,
272
+ float* C,
273
+ size_t ldc);
274
+
275
+ cublasStatus_t CUBLASWINAPI cublasXtDsyr2k(cublasXtHandle_t handle,
276
+ cublasFillMode_t uplo,
277
+ cublasOperation_t trans,
278
+ size_t n,
279
+ size_t k,
280
+ const double* alpha,
281
+ const double* A,
282
+ size_t lda,
283
+ const double* B,
284
+ size_t ldb,
285
+ const double* beta,
286
+ double* C,
287
+ size_t ldc);
288
+
289
+ cublasStatus_t CUBLASWINAPI cublasXtCsyr2k(cublasXtHandle_t handle,
290
+ cublasFillMode_t uplo,
291
+ cublasOperation_t trans,
292
+ size_t n,
293
+ size_t k,
294
+ const cuComplex* alpha,
295
+ const cuComplex* A,
296
+ size_t lda,
297
+ const cuComplex* B,
298
+ size_t ldb,
299
+ const cuComplex* beta,
300
+ cuComplex* C,
301
+ size_t ldc);
302
+
303
+ cublasStatus_t CUBLASWINAPI cublasXtZsyr2k(cublasXtHandle_t handle,
304
+ cublasFillMode_t uplo,
305
+ cublasOperation_t trans,
306
+ size_t n,
307
+ size_t k,
308
+ const cuDoubleComplex* alpha,
309
+ const cuDoubleComplex* A,
310
+ size_t lda,
311
+ const cuDoubleComplex* B,
312
+ size_t ldb,
313
+ const cuDoubleComplex* beta,
314
+ cuDoubleComplex* C,
315
+ size_t ldc);
316
+ /* -------------------------------------------------------------------- */
317
+ /* HERKX : variant extension of HERK */
318
+ cublasStatus_t CUBLASWINAPI cublasXtCherkx(cublasXtHandle_t handle,
319
+ cublasFillMode_t uplo,
320
+ cublasOperation_t trans,
321
+ size_t n,
322
+ size_t k,
323
+ const cuComplex* alpha,
324
+ const cuComplex* A,
325
+ size_t lda,
326
+ const cuComplex* B,
327
+ size_t ldb,
328
+ const float* beta,
329
+ cuComplex* C,
330
+ size_t ldc);
331
+
332
+ cublasStatus_t CUBLASWINAPI cublasXtZherkx(cublasXtHandle_t handle,
333
+ cublasFillMode_t uplo,
334
+ cublasOperation_t trans,
335
+ size_t n,
336
+ size_t k,
337
+ const cuDoubleComplex* alpha,
338
+ const cuDoubleComplex* A,
339
+ size_t lda,
340
+ const cuDoubleComplex* B,
341
+ size_t ldb,
342
+ const double* beta,
343
+ cuDoubleComplex* C,
344
+ size_t ldc);
345
+
346
+ /* -------------------------------------------------------------------- */
347
+ /* TRSM */
348
+ cublasStatus_t CUBLASWINAPI cublasXtStrsm(cublasXtHandle_t handle,
349
+ cublasSideMode_t side,
350
+ cublasFillMode_t uplo,
351
+ cublasOperation_t trans,
352
+ cublasDiagType_t diag,
353
+ size_t m,
354
+ size_t n,
355
+ const float* alpha,
356
+ const float* A,
357
+ size_t lda,
358
+ float* B,
359
+ size_t ldb);
360
+
361
+ cublasStatus_t CUBLASWINAPI cublasXtDtrsm(cublasXtHandle_t handle,
362
+ cublasSideMode_t side,
363
+ cublasFillMode_t uplo,
364
+ cublasOperation_t trans,
365
+ cublasDiagType_t diag,
366
+ size_t m,
367
+ size_t n,
368
+ const double* alpha,
369
+ const double* A,
370
+ size_t lda,
371
+ double* B,
372
+ size_t ldb);
373
+
374
+ cublasStatus_t CUBLASWINAPI cublasXtCtrsm(cublasXtHandle_t handle,
375
+ cublasSideMode_t side,
376
+ cublasFillMode_t uplo,
377
+ cublasOperation_t trans,
378
+ cublasDiagType_t diag,
379
+ size_t m,
380
+ size_t n,
381
+ const cuComplex* alpha,
382
+ const cuComplex* A,
383
+ size_t lda,
384
+ cuComplex* B,
385
+ size_t ldb);
386
+
387
+ cublasStatus_t CUBLASWINAPI cublasXtZtrsm(cublasXtHandle_t handle,
388
+ cublasSideMode_t side,
389
+ cublasFillMode_t uplo,
390
+ cublasOperation_t trans,
391
+ cublasDiagType_t diag,
392
+ size_t m,
393
+ size_t n,
394
+ const cuDoubleComplex* alpha,
395
+ const cuDoubleComplex* A,
396
+ size_t lda,
397
+ cuDoubleComplex* B,
398
+ size_t ldb);
399
+ /* -------------------------------------------------------------------- */
400
+ /* SYMM : Symmetric Multiply Matrix*/
401
+ cublasStatus_t CUBLASWINAPI cublasXtSsymm(cublasXtHandle_t handle,
402
+ cublasSideMode_t side,
403
+ cublasFillMode_t uplo,
404
+ size_t m,
405
+ size_t n,
406
+ const float* alpha,
407
+ const float* A,
408
+ size_t lda,
409
+ const float* B,
410
+ size_t ldb,
411
+ const float* beta,
412
+ float* C,
413
+ size_t ldc);
414
+
415
+ cublasStatus_t CUBLASWINAPI cublasXtDsymm(cublasXtHandle_t handle,
416
+ cublasSideMode_t side,
417
+ cublasFillMode_t uplo,
418
+ size_t m,
419
+ size_t n,
420
+ const double* alpha,
421
+ const double* A,
422
+ size_t lda,
423
+ const double* B,
424
+ size_t ldb,
425
+ const double* beta,
426
+ double* C,
427
+ size_t ldc);
428
+
429
+ cublasStatus_t CUBLASWINAPI cublasXtCsymm(cublasXtHandle_t handle,
430
+ cublasSideMode_t side,
431
+ cublasFillMode_t uplo,
432
+ size_t m,
433
+ size_t n,
434
+ const cuComplex* alpha,
435
+ const cuComplex* A,
436
+ size_t lda,
437
+ const cuComplex* B,
438
+ size_t ldb,
439
+ const cuComplex* beta,
440
+ cuComplex* C,
441
+ size_t ldc);
442
+
443
+ cublasStatus_t CUBLASWINAPI cublasXtZsymm(cublasXtHandle_t handle,
444
+ cublasSideMode_t side,
445
+ cublasFillMode_t uplo,
446
+ size_t m,
447
+ size_t n,
448
+ const cuDoubleComplex* alpha,
449
+ const cuDoubleComplex* A,
450
+ size_t lda,
451
+ const cuDoubleComplex* B,
452
+ size_t ldb,
453
+ const cuDoubleComplex* beta,
454
+ cuDoubleComplex* C,
455
+ size_t ldc);
456
+ /* -------------------------------------------------------------------- */
457
+ /* HEMM : Hermitian Matrix Multiply */
458
+ cublasStatus_t CUBLASWINAPI cublasXtChemm(cublasXtHandle_t handle,
459
+ cublasSideMode_t side,
460
+ cublasFillMode_t uplo,
461
+ size_t m,
462
+ size_t n,
463
+ const cuComplex* alpha,
464
+ const cuComplex* A,
465
+ size_t lda,
466
+ const cuComplex* B,
467
+ size_t ldb,
468
+ const cuComplex* beta,
469
+ cuComplex* C,
470
+ size_t ldc);
471
+
472
+ cublasStatus_t CUBLASWINAPI cublasXtZhemm(cublasXtHandle_t handle,
473
+ cublasSideMode_t side,
474
+ cublasFillMode_t uplo,
475
+ size_t m,
476
+ size_t n,
477
+ const cuDoubleComplex* alpha,
478
+ const cuDoubleComplex* A,
479
+ size_t lda,
480
+ const cuDoubleComplex* B,
481
+ size_t ldb,
482
+ const cuDoubleComplex* beta,
483
+ cuDoubleComplex* C,
484
+ size_t ldc);
485
+
486
+ /* -------------------------------------------------------------------- */
487
+ /* SYRKX : variant extension of SYRK */
488
+ cublasStatus_t CUBLASWINAPI cublasXtSsyrkx(cublasXtHandle_t handle,
489
+ cublasFillMode_t uplo,
490
+ cublasOperation_t trans,
491
+ size_t n,
492
+ size_t k,
493
+ const float* alpha,
494
+ const float* A,
495
+ size_t lda,
496
+ const float* B,
497
+ size_t ldb,
498
+ const float* beta,
499
+ float* C,
500
+ size_t ldc);
501
+
502
+ cublasStatus_t CUBLASWINAPI cublasXtDsyrkx(cublasXtHandle_t handle,
503
+ cublasFillMode_t uplo,
504
+ cublasOperation_t trans,
505
+ size_t n,
506
+ size_t k,
507
+ const double* alpha,
508
+ const double* A,
509
+ size_t lda,
510
+ const double* B,
511
+ size_t ldb,
512
+ const double* beta,
513
+ double* C,
514
+ size_t ldc);
515
+
516
+ cublasStatus_t CUBLASWINAPI cublasXtCsyrkx(cublasXtHandle_t handle,
517
+ cublasFillMode_t uplo,
518
+ cublasOperation_t trans,
519
+ size_t n,
520
+ size_t k,
521
+ const cuComplex* alpha,
522
+ const cuComplex* A,
523
+ size_t lda,
524
+ const cuComplex* B,
525
+ size_t ldb,
526
+ const cuComplex* beta,
527
+ cuComplex* C,
528
+ size_t ldc);
529
+
530
+ cublasStatus_t CUBLASWINAPI cublasXtZsyrkx(cublasXtHandle_t handle,
531
+ cublasFillMode_t uplo,
532
+ cublasOperation_t trans,
533
+ size_t n,
534
+ size_t k,
535
+ const cuDoubleComplex* alpha,
536
+ const cuDoubleComplex* A,
537
+ size_t lda,
538
+ const cuDoubleComplex* B,
539
+ size_t ldb,
540
+ const cuDoubleComplex* beta,
541
+ cuDoubleComplex* C,
542
+ size_t ldc);
543
+ /* -------------------------------------------------------------------- */
544
+ /* HER2K : variant extension of HERK */
545
+ cublasStatus_t CUBLASWINAPI cublasXtCher2k(cublasXtHandle_t handle,
546
+ cublasFillMode_t uplo,
547
+ cublasOperation_t trans,
548
+ size_t n,
549
+ size_t k,
550
+ const cuComplex* alpha,
551
+ const cuComplex* A,
552
+ size_t lda,
553
+ const cuComplex* B,
554
+ size_t ldb,
555
+ const float* beta,
556
+ cuComplex* C,
557
+ size_t ldc);
558
+
559
+ cublasStatus_t CUBLASWINAPI cublasXtZher2k(cublasXtHandle_t handle,
560
+ cublasFillMode_t uplo,
561
+ cublasOperation_t trans,
562
+ size_t n,
563
+ size_t k,
564
+ const cuDoubleComplex* alpha,
565
+ const cuDoubleComplex* A,
566
+ size_t lda,
567
+ const cuDoubleComplex* B,
568
+ size_t ldb,
569
+ const double* beta,
570
+ cuDoubleComplex* C,
571
+ size_t ldc);
572
+
573
+ /* -------------------------------------------------------------------- */
574
+ /* SPMM : Symmetric Packed Multiply Matrix*/
575
+ cublasStatus_t CUBLASWINAPI cublasXtSspmm(cublasXtHandle_t handle,
576
+ cublasSideMode_t side,
577
+ cublasFillMode_t uplo,
578
+ size_t m,
579
+ size_t n,
580
+ const float* alpha,
581
+ const float* AP,
582
+ const float* B,
583
+ size_t ldb,
584
+ const float* beta,
585
+ float* C,
586
+ size_t ldc);
587
+
588
+ cublasStatus_t CUBLASWINAPI cublasXtDspmm(cublasXtHandle_t handle,
589
+ cublasSideMode_t side,
590
+ cublasFillMode_t uplo,
591
+ size_t m,
592
+ size_t n,
593
+ const double* alpha,
594
+ const double* AP,
595
+ const double* B,
596
+ size_t ldb,
597
+ const double* beta,
598
+ double* C,
599
+ size_t ldc);
600
+
601
+ cublasStatus_t CUBLASWINAPI cublasXtCspmm(cublasXtHandle_t handle,
602
+ cublasSideMode_t side,
603
+ cublasFillMode_t uplo,
604
+ size_t m,
605
+ size_t n,
606
+ const cuComplex* alpha,
607
+ const cuComplex* AP,
608
+ const cuComplex* B,
609
+ size_t ldb,
610
+ const cuComplex* beta,
611
+ cuComplex* C,
612
+ size_t ldc);
613
+
614
+ cublasStatus_t CUBLASWINAPI cublasXtZspmm(cublasXtHandle_t handle,
615
+ cublasSideMode_t side,
616
+ cublasFillMode_t uplo,
617
+ size_t m,
618
+ size_t n,
619
+ const cuDoubleComplex* alpha,
620
+ const cuDoubleComplex* AP,
621
+ const cuDoubleComplex* B,
622
+ size_t ldb,
623
+ const cuDoubleComplex* beta,
624
+ cuDoubleComplex* C,
625
+ size_t ldc);
626
+
627
+ /* -------------------------------------------------------------------- */
628
+ /* TRMM */
629
+ cublasStatus_t CUBLASWINAPI cublasXtStrmm(cublasXtHandle_t handle,
630
+ cublasSideMode_t side,
631
+ cublasFillMode_t uplo,
632
+ cublasOperation_t trans,
633
+ cublasDiagType_t diag,
634
+ size_t m,
635
+ size_t n,
636
+ const float* alpha,
637
+ const float* A,
638
+ size_t lda,
639
+ const float* B,
640
+ size_t ldb,
641
+ float* C,
642
+ size_t ldc);
643
+
644
+ cublasStatus_t CUBLASWINAPI cublasXtDtrmm(cublasXtHandle_t handle,
645
+ cublasSideMode_t side,
646
+ cublasFillMode_t uplo,
647
+ cublasOperation_t trans,
648
+ cublasDiagType_t diag,
649
+ size_t m,
650
+ size_t n,
651
+ const double* alpha,
652
+ const double* A,
653
+ size_t lda,
654
+ const double* B,
655
+ size_t ldb,
656
+ double* C,
657
+ size_t ldc);
658
+
659
+ cublasStatus_t CUBLASWINAPI cublasXtCtrmm(cublasXtHandle_t handle,
660
+ cublasSideMode_t side,
661
+ cublasFillMode_t uplo,
662
+ cublasOperation_t trans,
663
+ cublasDiagType_t diag,
664
+ size_t m,
665
+ size_t n,
666
+ const cuComplex* alpha,
667
+ const cuComplex* A,
668
+ size_t lda,
669
+ const cuComplex* B,
670
+ size_t ldb,
671
+ cuComplex* C,
672
+ size_t ldc);
673
+
674
+ cublasStatus_t CUBLASWINAPI cublasXtZtrmm(cublasXtHandle_t handle,
675
+ cublasSideMode_t side,
676
+ cublasFillMode_t uplo,
677
+ cublasOperation_t trans,
678
+ cublasDiagType_t diag,
679
+ size_t m,
680
+ size_t n,
681
+ const cuDoubleComplex* alpha,
682
+ const cuDoubleComplex* A,
683
+ size_t lda,
684
+ const cuDoubleComplex* B,
685
+ size_t ldb,
686
+ cuDoubleComplex* C,
687
+ size_t ldc);
688
+
689
+ #if defined(__cplusplus)
690
+ }
691
+ #endif /* __cplusplus */
692
+
693
+ #endif /* !defined(CUBLAS_XT_H_) */
venv/lib/python3.10/site-packages/nvidia/cublas/include/cublas_api.h ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/nvidia/cublas/include/cublas_v2.h ADDED
@@ -0,0 +1,478 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * This is the public header file for the new CUBLAS library API, it mapped the generic
52
+ * Cublas name functions to the actual _v2 implementations.
53
+ */
54
+
55
+ #if !defined(CUBLAS_V2_H_)
56
+ #define CUBLAS_V2_H_
57
+
58
+ #if defined(CUBLAS_H_)
59
+ #error "It is an error to include both cublas.h and cublas_v2.h"
60
+ #endif
61
+
62
+ #undef CUBLASAPI
63
+ #ifdef __CUDACC__
64
+ #define CUBLASAPI __host__ __device__
65
+ #else
66
+ #define CUBLASAPI
67
+ #endif
68
+
69
+ #include "cublas_api.h"
70
+
71
+ #define cublasCreate cublasCreate_v2
72
+ #define cublasDestroy cublasDestroy_v2
73
+ #define cublasGetVersion cublasGetVersion_v2
74
+ #define cublasSetWorkspace cublasSetWorkspace_v2
75
+ #define cublasSetStream cublasSetStream_v2
76
+ #define cublasGetStream cublasGetStream_v2
77
+ #define cublasGetPointerMode cublasGetPointerMode_v2
78
+ #define cublasSetPointerMode cublasSetPointerMode_v2
79
+
80
+ /* 32-bit integer */
81
+
82
+ /* Blas1 Routines */
83
+
84
+ #define cublasSnrm2 cublasSnrm2_v2
85
+ #define cublasDnrm2 cublasDnrm2_v2
86
+ #define cublasScnrm2 cublasScnrm2_v2
87
+ #define cublasDznrm2 cublasDznrm2_v2
88
+
89
+ #define cublasSdot cublasSdot_v2
90
+ #define cublasDdot cublasDdot_v2
91
+ #define cublasCdotu cublasCdotu_v2
92
+ #define cublasCdotc cublasCdotc_v2
93
+ #define cublasZdotu cublasZdotu_v2
94
+ #define cublasZdotc cublasZdotc_v2
95
+
96
+ #define cublasSscal cublasSscal_v2
97
+ #define cublasDscal cublasDscal_v2
98
+ #define cublasCscal cublasCscal_v2
99
+ #define cublasCsscal cublasCsscal_v2
100
+ #define cublasZscal cublasZscal_v2
101
+ #define cublasZdscal cublasZdscal_v2
102
+
103
+ #define cublasSaxpy cublasSaxpy_v2
104
+ #define cublasDaxpy cublasDaxpy_v2
105
+ #define cublasCaxpy cublasCaxpy_v2
106
+ #define cublasZaxpy cublasZaxpy_v2
107
+
108
+ #define cublasScopy cublasScopy_v2
109
+ #define cublasDcopy cublasDcopy_v2
110
+ #define cublasCcopy cublasCcopy_v2
111
+ #define cublasZcopy cublasZcopy_v2
112
+
113
+ #define cublasSswap cublasSswap_v2
114
+ #define cublasDswap cublasDswap_v2
115
+ #define cublasCswap cublasCswap_v2
116
+ #define cublasZswap cublasZswap_v2
117
+
118
+ #define cublasIsamax cublasIsamax_v2
119
+ #define cublasIdamax cublasIdamax_v2
120
+ #define cublasIcamax cublasIcamax_v2
121
+ #define cublasIzamax cublasIzamax_v2
122
+
123
+ #define cublasIsamin cublasIsamin_v2
124
+ #define cublasIdamin cublasIdamin_v2
125
+ #define cublasIcamin cublasIcamin_v2
126
+ #define cublasIzamin cublasIzamin_v2
127
+
128
+ #define cublasSasum cublasSasum_v2
129
+ #define cublasDasum cublasDasum_v2
130
+ #define cublasScasum cublasScasum_v2
131
+ #define cublasDzasum cublasDzasum_v2
132
+
133
+ #define cublasSrot cublasSrot_v2
134
+ #define cublasDrot cublasDrot_v2
135
+ #define cublasCrot cublasCrot_v2
136
+ #define cublasCsrot cublasCsrot_v2
137
+ #define cublasZrot cublasZrot_v2
138
+ #define cublasZdrot cublasZdrot_v2
139
+
140
+ #define cublasSrotg cublasSrotg_v2
141
+ #define cublasDrotg cublasDrotg_v2
142
+ #define cublasCrotg cublasCrotg_v2
143
+ #define cublasZrotg cublasZrotg_v2
144
+
145
+ #define cublasSrotm cublasSrotm_v2
146
+ #define cublasDrotm cublasDrotm_v2
147
+
148
+ #define cublasSrotmg cublasSrotmg_v2
149
+ #define cublasDrotmg cublasDrotmg_v2
150
+
151
+ /* Blas2 Routines */
152
+
153
+ #define cublasSgemv cublasSgemv_v2
154
+ #define cublasDgemv cublasDgemv_v2
155
+ #define cublasCgemv cublasCgemv_v2
156
+ #define cublasZgemv cublasZgemv_v2
157
+
158
+ #define cublasSgbmv cublasSgbmv_v2
159
+ #define cublasDgbmv cublasDgbmv_v2
160
+ #define cublasCgbmv cublasCgbmv_v2
161
+ #define cublasZgbmv cublasZgbmv_v2
162
+
163
+ #define cublasStrmv cublasStrmv_v2
164
+ #define cublasDtrmv cublasDtrmv_v2
165
+ #define cublasCtrmv cublasCtrmv_v2
166
+ #define cublasZtrmv cublasZtrmv_v2
167
+
168
+ #define cublasStbmv cublasStbmv_v2
169
+ #define cublasDtbmv cublasDtbmv_v2
170
+ #define cublasCtbmv cublasCtbmv_v2
171
+ #define cublasZtbmv cublasZtbmv_v2
172
+
173
+ #define cublasStpmv cublasStpmv_v2
174
+ #define cublasDtpmv cublasDtpmv_v2
175
+ #define cublasCtpmv cublasCtpmv_v2
176
+ #define cublasZtpmv cublasZtpmv_v2
177
+
178
+ #define cublasStrsv cublasStrsv_v2
179
+ #define cublasDtrsv cublasDtrsv_v2
180
+ #define cublasCtrsv cublasCtrsv_v2
181
+ #define cublasZtrsv cublasZtrsv_v2
182
+
183
+ #define cublasStpsv cublasStpsv_v2
184
+ #define cublasDtpsv cublasDtpsv_v2
185
+ #define cublasCtpsv cublasCtpsv_v2
186
+ #define cublasZtpsv cublasZtpsv_v2
187
+
188
+ #define cublasStbsv cublasStbsv_v2
189
+ #define cublasDtbsv cublasDtbsv_v2
190
+ #define cublasCtbsv cublasCtbsv_v2
191
+ #define cublasZtbsv cublasZtbsv_v2
192
+
193
+ #define cublasSsymv cublasSsymv_v2
194
+ #define cublasDsymv cublasDsymv_v2
195
+ #define cublasCsymv cublasCsymv_v2
196
+ #define cublasZsymv cublasZsymv_v2
197
+ #define cublasChemv cublasChemv_v2
198
+ #define cublasZhemv cublasZhemv_v2
199
+
200
+ #define cublasSsbmv cublasSsbmv_v2
201
+ #define cublasDsbmv cublasDsbmv_v2
202
+ #define cublasChbmv cublasChbmv_v2
203
+ #define cublasZhbmv cublasZhbmv_v2
204
+
205
+ #define cublasSspmv cublasSspmv_v2
206
+ #define cublasDspmv cublasDspmv_v2
207
+ #define cublasChpmv cublasChpmv_v2
208
+ #define cublasZhpmv cublasZhpmv_v2
209
+
210
+ #define cublasSger cublasSger_v2
211
+ #define cublasDger cublasDger_v2
212
+ #define cublasCgeru cublasCgeru_v2
213
+ #define cublasCgerc cublasCgerc_v2
214
+ #define cublasZgeru cublasZgeru_v2
215
+ #define cublasZgerc cublasZgerc_v2
216
+
217
+ #define cublasSsyr cublasSsyr_v2
218
+ #define cublasDsyr cublasDsyr_v2
219
+ #define cublasCsyr cublasCsyr_v2
220
+ #define cublasZsyr cublasZsyr_v2
221
+ #define cublasCher cublasCher_v2
222
+ #define cublasZher cublasZher_v2
223
+
224
+ #define cublasSspr cublasSspr_v2
225
+ #define cublasDspr cublasDspr_v2
226
+ #define cublasChpr cublasChpr_v2
227
+ #define cublasZhpr cublasZhpr_v2
228
+
229
+ #define cublasSsyr2 cublasSsyr2_v2
230
+ #define cublasDsyr2 cublasDsyr2_v2
231
+ #define cublasCsyr2 cublasCsyr2_v2
232
+ #define cublasZsyr2 cublasZsyr2_v2
233
+ #define cublasCher2 cublasCher2_v2
234
+ #define cublasZher2 cublasZher2_v2
235
+
236
+ #define cublasSspr2 cublasSspr2_v2
237
+ #define cublasDspr2 cublasDspr2_v2
238
+ #define cublasChpr2 cublasChpr2_v2
239
+ #define cublasZhpr2 cublasZhpr2_v2
240
+
241
+ /* Blas3 Routines */
242
+
243
+ #define cublasSgemm cublasSgemm_v2
244
+ #define cublasDgemm cublasDgemm_v2
245
+ #define cublasCgemm cublasCgemm_v2
246
+ #define cublasZgemm cublasZgemm_v2
247
+
248
+ #define cublasSsyrk cublasSsyrk_v2
249
+ #define cublasDsyrk cublasDsyrk_v2
250
+ #define cublasCsyrk cublasCsyrk_v2
251
+ #define cublasZsyrk cublasZsyrk_v2
252
+ #define cublasCherk cublasCherk_v2
253
+ #define cublasZherk cublasZherk_v2
254
+
255
+ #define cublasSsyr2k cublasSsyr2k_v2
256
+ #define cublasDsyr2k cublasDsyr2k_v2
257
+ #define cublasCsyr2k cublasCsyr2k_v2
258
+ #define cublasZsyr2k cublasZsyr2k_v2
259
+ #define cublasCher2k cublasCher2k_v2
260
+ #define cublasZher2k cublasZher2k_v2
261
+
262
+ #define cublasSsymm cublasSsymm_v2
263
+ #define cublasDsymm cublasDsymm_v2
264
+ #define cublasCsymm cublasCsymm_v2
265
+ #define cublasZsymm cublasZsymm_v2
266
+ #define cublasChemm cublasChemm_v2
267
+ #define cublasZhemm cublasZhemm_v2
268
+
269
+ #define cublasStrsm cublasStrsm_v2
270
+ #define cublasDtrsm cublasDtrsm_v2
271
+ #define cublasCtrsm cublasCtrsm_v2
272
+ #define cublasZtrsm cublasZtrsm_v2
273
+
274
+ #define cublasStrmm cublasStrmm_v2
275
+ #define cublasDtrmm cublasDtrmm_v2
276
+ #define cublasCtrmm cublasCtrmm_v2
277
+ #define cublasZtrmm cublasZtrmm_v2
278
+
279
+ /* 64-bit integer */
280
+
281
+ /* Blas1 Routines */
282
+
283
+ #define cublasSnrm2_64 cublasSnrm2_v2_64
284
+ #define cublasDnrm2_64 cublasDnrm2_v2_64
285
+ #define cublasScnrm2_64 cublasScnrm2_v2_64
286
+ #define cublasDznrm2_64 cublasDznrm2_v2_64
287
+
288
+ #define cublasSdot_64 cublasSdot_v2_64
289
+ #define cublasDdot_64 cublasDdot_v2_64
290
+ #define cublasCdotu_64 cublasCdotu_v2_64
291
+ #define cublasCdotc_64 cublasCdotc_v2_64
292
+ #define cublasZdotu_64 cublasZdotu_v2_64
293
+ #define cublasZdotc_64 cublasZdotc_v2_64
294
+
295
+ #define cublasSscal_64 cublasSscal_v2_64
296
+ #define cublasDscal_64 cublasDscal_v2_64
297
+ #define cublasCscal_64 cublasCscal_v2_64
298
+ #define cublasCsscal_64 cublasCsscal_v2_64
299
+ #define cublasZscal_64 cublasZscal_v2_64
300
+ #define cublasZdscal_64 cublasZdscal_v2_64
301
+
302
+ #define cublasSaxpy_64 cublasSaxpy_v2_64
303
+ #define cublasDaxpy_64 cublasDaxpy_v2_64
304
+ #define cublasCaxpy_64 cublasCaxpy_v2_64
305
+ #define cublasZaxpy_64 cublasZaxpy_v2_64
306
+
307
+ #define cublasScopy_64 cublasScopy_v2_64
308
+ #define cublasDcopy_64 cublasDcopy_v2_64
309
+ #define cublasCcopy_64 cublasCcopy_v2_64
310
+ #define cublasZcopy_64 cublasZcopy_v2_64
311
+
312
+ #define cublasSswap_64 cublasSswap_v2_64
313
+ #define cublasDswap_64 cublasDswap_v2_64
314
+ #define cublasCswap_64 cublasCswap_v2_64
315
+ #define cublasZswap_64 cublasZswap_v2_64
316
+
317
+ #define cublasIsamax_64 cublasIsamax_v2_64
318
+ #define cublasIdamax_64 cublasIdamax_v2_64
319
+ #define cublasIcamax_64 cublasIcamax_v2_64
320
+ #define cublasIzamax_64 cublasIzamax_v2_64
321
+
322
+ #define cublasIsamin_64 cublasIsamin_v2_64
323
+ #define cublasIdamin_64 cublasIdamin_v2_64
324
+ #define cublasIcamin_64 cublasIcamin_v2_64
325
+ #define cublasIzamin_64 cublasIzamin_v2_64
326
+
327
+ #define cublasSasum_64 cublasSasum_v2_64
328
+ #define cublasDasum_64 cublasDasum_v2_64
329
+ #define cublasScasum_64 cublasScasum_v2_64
330
+ #define cublasDzasum_64 cublasDzasum_v2_64
331
+
332
+ #define cublasSrot_64 cublasSrot_v2_64
333
+ #define cublasDrot_64 cublasDrot_v2_64
334
+ #define cublasCrot_64 cublasCrot_v2_64
335
+ #define cublasCsrot_64 cublasCsrot_v2_64
336
+ #define cublasZrot_64 cublasZrot_v2_64
337
+ #define cublasZdrot_64 cublasZdrot_v2_64
338
+
339
+ #define cublasSrotg_64 cublasSrotg_v2_64
340
+ #define cublasDrotg_64 cublasDrotg_v2_64
341
+ #define cublasCrotg_64 cublasCrotg_v2_64
342
+ #define cublasZrotg_64 cublasZrotg_v2_64
343
+
344
+ #define cublasSrotm_64 cublasSrotm_v2_64
345
+ #define cublasDrotm_64 cublasDrotm_v2_64
346
+
347
+ #define cublasSrotmg_64 cublasSrotmg_v2_64
348
+ #define cublasDrotmg_64 cublasDrotmg_v2_64
349
+
350
+ /* Blas2 Routines */
351
+
352
+ #define cublasSgemv_64 cublasSgemv_v2_64
353
+ #define cublasDgemv_64 cublasDgemv_v2_64
354
+ #define cublasCgemv_64 cublasCgemv_v2_64
355
+ #define cublasZgemv_64 cublasZgemv_v2_64
356
+
357
+ #define cublasSgbmv_64 cublasSgbmv_v2_64
358
+ #define cublasDgbmv_64 cublasDgbmv_v2_64
359
+ #define cublasCgbmv_64 cublasCgbmv_v2_64
360
+ #define cublasZgbmv_64 cublasZgbmv_v2_64
361
+
362
+ #define cublasStrmv_64 cublasStrmv_v2_64
363
+ #define cublasDtrmv_64 cublasDtrmv_v2_64
364
+ #define cublasCtrmv_64 cublasCtrmv_v2_64
365
+ #define cublasZtrmv_64 cublasZtrmv_v2_64
366
+
367
+ #define cublasStbmv_64 cublasStbmv_v2_64
368
+ #define cublasDtbmv_64 cublasDtbmv_v2_64
369
+ #define cublasCtbmv_64 cublasCtbmv_v2_64
370
+ #define cublasZtbmv_64 cublasZtbmv_v2_64
371
+
372
+ #define cublasStpmv_64 cublasStpmv_v2_64
373
+ #define cublasDtpmv_64 cublasDtpmv_v2_64
374
+ #define cublasCtpmv_64 cublasCtpmv_v2_64
375
+ #define cublasZtpmv_64 cublasZtpmv_v2_64
376
+
377
+ #define cublasStrsv_64 cublasStrsv_v2_64
378
+ #define cublasDtrsv_64 cublasDtrsv_v2_64
379
+ #define cublasCtrsv_64 cublasCtrsv_v2_64
380
+ #define cublasZtrsv_64 cublasZtrsv_v2_64
381
+
382
+ #define cublasStpsv_64 cublasStpsv_v2_64
383
+ #define cublasDtpsv_64 cublasDtpsv_v2_64
384
+ #define cublasCtpsv_64 cublasCtpsv_v2_64
385
+ #define cublasZtpsv_64 cublasZtpsv_v2_64
386
+
387
+ #define cublasStbsv_64 cublasStbsv_v2_64
388
+ #define cublasDtbsv_64 cublasDtbsv_v2_64
389
+ #define cublasCtbsv_64 cublasCtbsv_v2_64
390
+ #define cublasZtbsv_64 cublasZtbsv_v2_64
391
+
392
+ #define cublasSsymv_64 cublasSsymv_v2_64
393
+ #define cublasDsymv_64 cublasDsymv_v2_64
394
+ #define cublasCsymv_64 cublasCsymv_v2_64
395
+ #define cublasZsymv_64 cublasZsymv_v2_64
396
+ #define cublasChemv_64 cublasChemv_v2_64
397
+ #define cublasZhemv_64 cublasZhemv_v2_64
398
+
399
+ #define cublasSsbmv_64 cublasSsbmv_v2_64
400
+ #define cublasDsbmv_64 cublasDsbmv_v2_64
401
+ #define cublasChbmv_64 cublasChbmv_v2_64
402
+ #define cublasZhbmv_64 cublasZhbmv_v2_64
403
+
404
+ #define cublasSspmv_64 cublasSspmv_v2_64
405
+ #define cublasDspmv_64 cublasDspmv_v2_64
406
+ #define cublasChpmv_64 cublasChpmv_v2_64
407
+ #define cublasZhpmv_64 cublasZhpmv_v2_64
408
+
409
+ #define cublasSger_64 cublasSger_v2_64
410
+ #define cublasDger_64 cublasDger_v2_64
411
+ #define cublasCgeru_64 cublasCgeru_v2_64
412
+ #define cublasCgerc_64 cublasCgerc_v2_64
413
+ #define cublasZgeru_64 cublasZgeru_v2_64
414
+ #define cublasZgerc_64 cublasZgerc_v2_64
415
+
416
+ #define cublasSsyr_64 cublasSsyr_v2_64
417
+ #define cublasDsyr_64 cublasDsyr_v2_64
418
+ #define cublasCsyr_64 cublasCsyr_v2_64
419
+ #define cublasZsyr_64 cublasZsyr_v2_64
420
+ #define cublasCher_64 cublasCher_v2_64
421
+ #define cublasZher_64 cublasZher_v2_64
422
+
423
+ #define cublasSspr_64 cublasSspr_v2_64
424
+ #define cublasDspr_64 cublasDspr_v2_64
425
+ #define cublasChpr_64 cublasChpr_v2_64
426
+ #define cublasZhpr_64 cublasZhpr_v2_64
427
+
428
+ #define cublasSsyr2_64 cublasSsyr2_v2_64
429
+ #define cublasDsyr2_64 cublasDsyr2_v2_64
430
+ #define cublasCsyr2_64 cublasCsyr2_v2_64
431
+ #define cublasZsyr2_64 cublasZsyr2_v2_64
432
+ #define cublasCher2_64 cublasCher2_v2_64
433
+ #define cublasZher2_64 cublasZher2_v2_64
434
+
435
+ #define cublasSspr2_64 cublasSspr2_v2_64
436
+ #define cublasDspr2_64 cublasDspr2_v2_64
437
+ #define cublasChpr2_64 cublasChpr2_v2_64
438
+ #define cublasZhpr2_64 cublasZhpr2_v2_64
439
+
440
+ /* Blas3 Routines */
441
+
442
+ #define cublasSgemm_64 cublasSgemm_v2_64
443
+ #define cublasDgemm_64 cublasDgemm_v2_64
444
+ #define cublasCgemm_64 cublasCgemm_v2_64
445
+ #define cublasZgemm_64 cublasZgemm_v2_64
446
+
447
+ #define cublasSsyrk_64 cublasSsyrk_v2_64
448
+ #define cublasDsyrk_64 cublasDsyrk_v2_64
449
+ #define cublasCsyrk_64 cublasCsyrk_v2_64
450
+ #define cublasZsyrk_64 cublasZsyrk_v2_64
451
+ #define cublasCherk_64 cublasCherk_v2_64
452
+ #define cublasZherk_64 cublasZherk_v2_64
453
+
454
+ #define cublasSsyr2k_64 cublasSsyr2k_v2_64
455
+ #define cublasDsyr2k_64 cublasDsyr2k_v2_64
456
+ #define cublasCsyr2k_64 cublasCsyr2k_v2_64
457
+ #define cublasZsyr2k_64 cublasZsyr2k_v2_64
458
+ #define cublasCher2k_64 cublasCher2k_v2_64
459
+ #define cublasZher2k_64 cublasZher2k_v2_64
460
+
461
+ #define cublasSsymm_64 cublasSsymm_v2_64
462
+ #define cublasDsymm_64 cublasDsymm_v2_64
463
+ #define cublasCsymm_64 cublasCsymm_v2_64
464
+ #define cublasZsymm_64 cublasZsymm_v2_64
465
+ #define cublasChemm_64 cublasChemm_v2_64
466
+ #define cublasZhemm_64 cublasZhemm_v2_64
467
+
468
+ #define cublasStrsm_64 cublasStrsm_v2_64
469
+ #define cublasDtrsm_64 cublasDtrsm_v2_64
470
+ #define cublasCtrsm_64 cublasCtrsm_v2_64
471
+ #define cublasZtrsm_64 cublasZtrsm_v2_64
472
+
473
+ #define cublasStrmm_64 cublasStrmm_v2_64
474
+ #define cublasDtrmm_64 cublasDtrmm_v2_64
475
+ #define cublasCtrmm_64 cublasCtrmm_v2_64
476
+ #define cublasZtrmm_64 cublasZtrmm_v2_64
477
+
478
+ #endif /* !defined(CUBLAS_V2_H_) */
venv/lib/python3.10/site-packages/nvidia/cublas/include/nvblas.h ADDED
@@ -0,0 +1,824 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(NVBLAS_H_)
51
+ #define NVBLAS_H_
52
+
53
+ #include "driver_types.h"
54
+ #include "cuComplex.h" /* import complex data type */
55
+
56
+ #if defined(__cplusplus)
57
+ extern "C" {
58
+ #endif
59
+
60
+ /* GEMM */
61
+ void sgemm_(const char* transa,
62
+ const char* transb,
63
+ const int* m,
64
+ const int* n,
65
+ const int* k,
66
+ const float* alpha,
67
+ const float* a,
68
+ const int* lda,
69
+ const float* b,
70
+ const int* ldb,
71
+ const float* beta,
72
+ float* c,
73
+ const int* ldc);
74
+
75
+ void dgemm_(const char* transa,
76
+ const char* transb,
77
+ const int* m,
78
+ const int* n,
79
+ const int* k,
80
+ const double* alpha,
81
+ const double* a,
82
+ const int* lda,
83
+ const double* b,
84
+ const int* ldb,
85
+ const double* beta,
86
+ double* c,
87
+ const int* ldc);
88
+
89
+ void cgemm_(const char* transa,
90
+ const char* transb,
91
+ const int* m,
92
+ const int* n,
93
+ const int* k,
94
+ const cuComplex* alpha,
95
+ const cuComplex* a,
96
+ const int* lda,
97
+ const cuComplex* b,
98
+ const int* ldb,
99
+ const cuComplex* beta,
100
+ cuComplex* c,
101
+ const int* ldc);
102
+
103
+ void zgemm_(const char* transa,
104
+ const char* transb,
105
+ const int* m,
106
+ const int* n,
107
+ const int* k,
108
+ const cuDoubleComplex* alpha,
109
+ const cuDoubleComplex* a,
110
+ const int* lda,
111
+ const cuDoubleComplex* b,
112
+ const int* ldb,
113
+ const cuDoubleComplex* beta,
114
+ cuDoubleComplex* c,
115
+ const int* ldc);
116
+
117
+ void sgemm(const char* transa,
118
+ const char* transb,
119
+ const int* m,
120
+ const int* n,
121
+ const int* k,
122
+ const float* alpha,
123
+ const float* a,
124
+ const int* lda,
125
+ const float* b,
126
+ const int* ldb,
127
+ const float* beta,
128
+ float* c,
129
+ const int* ldc);
130
+
131
+ void dgemm(const char* transa,
132
+ const char* transb,
133
+ const int* m,
134
+ const int* n,
135
+ const int* k,
136
+ const double* alpha,
137
+ const double* a,
138
+ const int* lda,
139
+ const double* b,
140
+ const int* ldb,
141
+ const double* beta,
142
+ double* c,
143
+ const int* ldc);
144
+
145
+ void cgemm(const char* transa,
146
+ const char* transb,
147
+ const int* m,
148
+ const int* n,
149
+ const int* k,
150
+ const cuComplex* alpha,
151
+ const cuComplex* a,
152
+ const int* lda,
153
+ const cuComplex* b,
154
+ const int* ldb,
155
+ const cuComplex* beta,
156
+ cuComplex* c,
157
+ const int* ldc);
158
+
159
+ void zgemm(const char* transa,
160
+ const char* transb,
161
+ const int* m,
162
+ const int* n,
163
+ const int* k,
164
+ const cuDoubleComplex* alpha,
165
+ const cuDoubleComplex* a,
166
+ const int* lda,
167
+ const cuDoubleComplex* b,
168
+ const int* ldb,
169
+ const cuDoubleComplex* beta,
170
+ cuDoubleComplex* c,
171
+ const int* ldc);
172
+
173
+ /* SYRK */
174
+ void ssyrk_(const char* uplo,
175
+ const char* trans,
176
+ const int* n,
177
+ const int* k,
178
+ const float* alpha,
179
+ const float* a,
180
+ const int* lda,
181
+ const float* beta,
182
+ float* c,
183
+ const int* ldc);
184
+
185
+ void dsyrk_(const char* uplo,
186
+ const char* trans,
187
+ const int* n,
188
+ const int* k,
189
+ const double* alpha,
190
+ const double* a,
191
+ const int* lda,
192
+ const double* beta,
193
+ double* c,
194
+ const int* ldc);
195
+
196
+ void csyrk_(const char* uplo,
197
+ const char* trans,
198
+ const int* n,
199
+ const int* k,
200
+ const cuComplex* alpha,
201
+ const cuComplex* a,
202
+ const int* lda,
203
+ const cuComplex* beta,
204
+ cuComplex* c,
205
+ const int* ldc);
206
+
207
+ void zsyrk_(const char* uplo,
208
+ const char* trans,
209
+ const int* n,
210
+ const int* k,
211
+ const cuDoubleComplex* alpha,
212
+ const cuDoubleComplex* a,
213
+ const int* lda,
214
+ const cuDoubleComplex* beta,
215
+ cuDoubleComplex* c,
216
+ const int* ldc);
217
+
218
+ void ssyrk(const char* uplo,
219
+ const char* trans,
220
+ const int* n,
221
+ const int* k,
222
+ const float* alpha,
223
+ const float* a,
224
+ const int* lda,
225
+ const float* beta,
226
+ float* c,
227
+ const int* ldc);
228
+
229
+ void dsyrk(const char* uplo,
230
+ const char* trans,
231
+ const int* n,
232
+ const int* k,
233
+ const double* alpha,
234
+ const double* a,
235
+ const int* lda,
236
+ const double* beta,
237
+ double* c,
238
+ const int* ldc);
239
+
240
+ void csyrk(const char* uplo,
241
+ const char* trans,
242
+ const int* n,
243
+ const int* k,
244
+ const cuComplex* alpha,
245
+ const cuComplex* a,
246
+ const int* lda,
247
+ const cuComplex* beta,
248
+ cuComplex* c,
249
+ const int* ldc);
250
+
251
+ void zsyrk(const char* uplo,
252
+ const char* trans,
253
+ const int* n,
254
+ const int* k,
255
+ const cuDoubleComplex* alpha,
256
+ const cuDoubleComplex* a,
257
+ const int* lda,
258
+ const cuDoubleComplex* beta,
259
+ cuDoubleComplex* c,
260
+ const int* ldc);
261
+
262
+ /* HERK */
263
+ void cherk_(const char* uplo,
264
+ const char* trans,
265
+ const int* n,
266
+ const int* k,
267
+ const float* alpha,
268
+ const cuComplex* a,
269
+ const int* lda,
270
+ const float* beta,
271
+ cuComplex* c,
272
+ const int* ldc);
273
+
274
+ void zherk_(const char* uplo,
275
+ const char* trans,
276
+ const int* n,
277
+ const int* k,
278
+ const double* alpha,
279
+ const cuDoubleComplex* a,
280
+ const int* lda,
281
+ const double* beta,
282
+ cuDoubleComplex* c,
283
+ const int* ldc);
284
+
285
+ void cherk(const char* uplo,
286
+ const char* trans,
287
+ const int* n,
288
+ const int* k,
289
+ const float* alpha,
290
+ const cuComplex* a,
291
+ const int* lda,
292
+ const float* beta,
293
+ cuComplex* c,
294
+ const int* ldc);
295
+
296
+ void zherk(const char* uplo,
297
+ const char* trans,
298
+ const int* n,
299
+ const int* k,
300
+ const double* alpha,
301
+ const cuDoubleComplex* a,
302
+ const int* lda,
303
+ const double* beta,
304
+ cuDoubleComplex* c,
305
+ const int* ldc);
306
+
307
+ /* TRSM */
308
+ void strsm_(const char* side,
309
+ const char* uplo,
310
+ const char* transa,
311
+ const char* diag,
312
+ const int* m,
313
+ const int* n,
314
+ const float* alpha,
315
+ const float* a,
316
+ const int* lda,
317
+ float* b,
318
+ const int* ldb);
319
+
320
+ void dtrsm_(const char* side,
321
+ const char* uplo,
322
+ const char* transa,
323
+ const char* diag,
324
+ const int* m,
325
+ const int* n,
326
+ const double* alpha,
327
+ const double* a,
328
+ const int* lda,
329
+ double* b,
330
+ const int* ldb);
331
+
332
+ void ctrsm_(const char* side,
333
+ const char* uplo,
334
+ const char* transa,
335
+ const char* diag,
336
+ const int* m,
337
+ const int* n,
338
+ const cuComplex* alpha,
339
+ const cuComplex* a,
340
+ const int* lda,
341
+ cuComplex* b,
342
+ const int* ldb);
343
+
344
+ void ztrsm_(const char* side,
345
+ const char* uplo,
346
+ const char* transa,
347
+ const char* diag,
348
+ const int* m,
349
+ const int* n,
350
+ const cuDoubleComplex* alpha,
351
+ const cuDoubleComplex* a,
352
+ const int* lda,
353
+ cuDoubleComplex* b,
354
+ const int* ldb);
355
+
356
+ void strsm(const char* side,
357
+ const char* uplo,
358
+ const char* transa,
359
+ const char* diag,
360
+ const int* m,
361
+ const int* n,
362
+ const float* alpha,
363
+ const float* a,
364
+ const int* lda,
365
+ float* b,
366
+ const int* ldb);
367
+
368
+ void dtrsm(const char* side,
369
+ const char* uplo,
370
+ const char* transa,
371
+ const char* diag,
372
+ const int* m,
373
+ const int* n,
374
+ const double* alpha,
375
+ const double* a,
376
+ const int* lda,
377
+ double* b,
378
+ const int* ldb);
379
+
380
+ void ctrsm(const char* side,
381
+ const char* uplo,
382
+ const char* transa,
383
+ const char* diag,
384
+ const int* m,
385
+ const int* n,
386
+ const cuComplex* alpha,
387
+ const cuComplex* a,
388
+ const int* lda,
389
+ cuComplex* b,
390
+ const int* ldb);
391
+
392
+ void ztrsm(const char* side,
393
+ const char* uplo,
394
+ const char* transa,
395
+ const char* diag,
396
+ const int* m,
397
+ const int* n,
398
+ const cuDoubleComplex* alpha,
399
+ const cuDoubleComplex* a,
400
+ const int* lda,
401
+ cuDoubleComplex* b,
402
+ const int* ldb);
403
+
404
+ /* SYMM */
405
+ void ssymm_(const char* side,
406
+ const char* uplo,
407
+ const int* m,
408
+ const int* n,
409
+ const float* alpha,
410
+ const float* a,
411
+ const int* lda,
412
+ const float* b,
413
+ const int* ldb,
414
+ const float* beta,
415
+ float* c,
416
+ const int* ldc);
417
+
418
+ void dsymm_(const char* side,
419
+ const char* uplo,
420
+ const int* m,
421
+ const int* n,
422
+ const double* alpha,
423
+ const double* a,
424
+ const int* lda,
425
+ const double* b,
426
+ const int* ldb,
427
+ const double* beta,
428
+ double* c,
429
+ const int* ldc);
430
+
431
+ void csymm_(const char* side,
432
+ const char* uplo,
433
+ const int* m,
434
+ const int* n,
435
+ const cuComplex* alpha,
436
+ const cuComplex* a,
437
+ const int* lda,
438
+ const cuComplex* b,
439
+ const int* ldb,
440
+ const cuComplex* beta,
441
+ cuComplex* c,
442
+ const int* ldc);
443
+
444
+ void zsymm_(const char* side,
445
+ const char* uplo,
446
+ const int* m,
447
+ const int* n,
448
+ const cuDoubleComplex* alpha,
449
+ const cuDoubleComplex* a,
450
+ const int* lda,
451
+ const cuDoubleComplex* b,
452
+ const int* ldb,
453
+ const cuDoubleComplex* beta,
454
+ cuDoubleComplex* c,
455
+ const int* ldc);
456
+
457
+ void ssymm(const char* side,
458
+ const char* uplo,
459
+ const int* m,
460
+ const int* n,
461
+ const float* alpha,
462
+ const float* a,
463
+ const int* lda,
464
+ const float* b,
465
+ const int* ldb,
466
+ const float* beta,
467
+ float* c,
468
+ const int* ldc);
469
+
470
+ void dsymm(const char* side,
471
+ const char* uplo,
472
+ const int* m,
473
+ const int* n,
474
+ const double* alpha,
475
+ const double* a,
476
+ const int* lda,
477
+ const double* b,
478
+ const int* ldb,
479
+ const double* beta,
480
+ double* c,
481
+ const int* ldc);
482
+
483
+ void csymm(const char* side,
484
+ const char* uplo,
485
+ const int* m,
486
+ const int* n,
487
+ const cuComplex* alpha,
488
+ const cuComplex* a,
489
+ const int* lda,
490
+ const cuComplex* b,
491
+ const int* ldb,
492
+ const cuComplex* beta,
493
+ cuComplex* c,
494
+ const int* ldc);
495
+
496
+ void zsymm(const char* side,
497
+ const char* uplo,
498
+ const int* m,
499
+ const int* n,
500
+ const cuDoubleComplex* alpha,
501
+ const cuDoubleComplex* a,
502
+ const int* lda,
503
+ const cuDoubleComplex* b,
504
+ const int* ldb,
505
+ const cuDoubleComplex* beta,
506
+ cuDoubleComplex* c,
507
+ const int* ldc);
508
+
509
+ /* HEMM */
510
+ void chemm_(const char* side,
511
+ const char* uplo,
512
+ const int* m,
513
+ const int* n,
514
+ const cuComplex* alpha,
515
+ const cuComplex* a,
516
+ const int* lda,
517
+ const cuComplex* b,
518
+ const int* ldb,
519
+ const cuComplex* beta,
520
+ cuComplex* c,
521
+ const int* ldc);
522
+
523
+ void zhemm_(const char* side,
524
+ const char* uplo,
525
+ const int* m,
526
+ const int* n,
527
+ const cuDoubleComplex* alpha,
528
+ const cuDoubleComplex* a,
529
+ const int* lda,
530
+ const cuDoubleComplex* b,
531
+ const int* ldb,
532
+ const cuDoubleComplex* beta,
533
+ cuDoubleComplex* c,
534
+ const int* ldc);
535
+
536
+ /* HEMM with no underscore*/
537
+ void chemm(const char* side,
538
+ const char* uplo,
539
+ const int* m,
540
+ const int* n,
541
+ const cuComplex* alpha,
542
+ const cuComplex* a,
543
+ const int* lda,
544
+ const cuComplex* b,
545
+ const int* ldb,
546
+ const cuComplex* beta,
547
+ cuComplex* c,
548
+ const int* ldc);
549
+
550
+ void zhemm(const char* side,
551
+ const char* uplo,
552
+ const int* m,
553
+ const int* n,
554
+ const cuDoubleComplex* alpha,
555
+ const cuDoubleComplex* a,
556
+ const int* lda,
557
+ const cuDoubleComplex* b,
558
+ const int* ldb,
559
+ const cuDoubleComplex* beta,
560
+ cuDoubleComplex* c,
561
+ const int* ldc);
562
+
563
+ /* SYR2K */
564
+ void ssyr2k_(const char* uplo,
565
+ const char* trans,
566
+ const int* n,
567
+ const int* k,
568
+ const float* alpha,
569
+ const float* a,
570
+ const int* lda,
571
+ const float* b,
572
+ const int* ldb,
573
+ const float* beta,
574
+ float* c,
575
+ const int* ldc);
576
+
577
+ void dsyr2k_(const char* uplo,
578
+ const char* trans,
579
+ const int* n,
580
+ const int* k,
581
+ const double* alpha,
582
+ const double* a,
583
+ const int* lda,
584
+ const double* b,
585
+ const int* ldb,
586
+ const double* beta,
587
+ double* c,
588
+ const int* ldc);
589
+
590
+ void csyr2k_(const char* uplo,
591
+ const char* trans,
592
+ const int* n,
593
+ const int* k,
594
+ const cuComplex* alpha,
595
+ const cuComplex* a,
596
+ const int* lda,
597
+ const cuComplex* b,
598
+ const int* ldb,
599
+ const cuComplex* beta,
600
+ cuComplex* c,
601
+ const int* ldc);
602
+
603
+ void zsyr2k_(const char* uplo,
604
+ const char* trans,
605
+ const int* n,
606
+ const int* k,
607
+ const cuDoubleComplex* alpha,
608
+ const cuDoubleComplex* a,
609
+ const int* lda,
610
+ const cuDoubleComplex* b,
611
+ const int* ldb,
612
+ const cuDoubleComplex* beta,
613
+ cuDoubleComplex* c,
614
+ const int* ldc);
615
+
616
+ /* SYR2K no_underscore*/
617
+ void ssyr2k(const char* uplo,
618
+ const char* trans,
619
+ const int* n,
620
+ const int* k,
621
+ const float* alpha,
622
+ const float* a,
623
+ const int* lda,
624
+ const float* b,
625
+ const int* ldb,
626
+ const float* beta,
627
+ float* c,
628
+ const int* ldc);
629
+
630
+ void dsyr2k(const char* uplo,
631
+ const char* trans,
632
+ const int* n,
633
+ const int* k,
634
+ const double* alpha,
635
+ const double* a,
636
+ const int* lda,
637
+ const double* b,
638
+ const int* ldb,
639
+ const double* beta,
640
+ double* c,
641
+ const int* ldc);
642
+
643
+ void csyr2k(const char* uplo,
644
+ const char* trans,
645
+ const int* n,
646
+ const int* k,
647
+ const cuComplex* alpha,
648
+ const cuComplex* a,
649
+ const int* lda,
650
+ const cuComplex* b,
651
+ const int* ldb,
652
+ const cuComplex* beta,
653
+ cuComplex* c,
654
+ const int* ldc);
655
+
656
+ void zsyr2k(const char* uplo,
657
+ const char* trans,
658
+ const int* n,
659
+ const int* k,
660
+ const cuDoubleComplex* alpha,
661
+ const cuDoubleComplex* a,
662
+ const int* lda,
663
+ const cuDoubleComplex* b,
664
+ const int* ldb,
665
+ const cuDoubleComplex* beta,
666
+ cuDoubleComplex* c,
667
+ const int* ldc);
668
+
669
+ /* HERK */
670
+ void cher2k_(const char* uplo,
671
+ const char* trans,
672
+ const int* n,
673
+ const int* k,
674
+ const cuComplex* alpha,
675
+ const cuComplex* a,
676
+ const int* lda,
677
+ const cuComplex* b,
678
+ const int* ldb,
679
+ const float* beta,
680
+ cuComplex* c,
681
+ const int* ldc);
682
+
683
+ void zher2k_(const char* uplo,
684
+ const char* trans,
685
+ const int* n,
686
+ const int* k,
687
+ const cuDoubleComplex* alpha,
688
+ const cuDoubleComplex* a,
689
+ const int* lda,
690
+ const cuDoubleComplex* b,
691
+ const int* ldb,
692
+ const double* beta,
693
+ cuDoubleComplex* c,
694
+ const int* ldc);
695
+
696
+ /* HER2K with no underscore */
697
+ void cher2k(const char* uplo,
698
+ const char* trans,
699
+ const int* n,
700
+ const int* k,
701
+ const cuComplex* alpha,
702
+ const cuComplex* a,
703
+ const int* lda,
704
+ const cuComplex* b,
705
+ const int* ldb,
706
+ const float* beta,
707
+ cuComplex* c,
708
+ const int* ldc);
709
+
710
+ void zher2k(const char* uplo,
711
+ const char* trans,
712
+ const int* n,
713
+ const int* k,
714
+ const cuDoubleComplex* alpha,
715
+ const cuDoubleComplex* a,
716
+ const int* lda,
717
+ const cuDoubleComplex* b,
718
+ const int* ldb,
719
+ const double* beta,
720
+ cuDoubleComplex* c,
721
+ const int* ldc);
722
+
723
+ /* TRMM */
724
+ void strmm_(const char* side,
725
+ const char* uplo,
726
+ const char* transa,
727
+ const char* diag,
728
+ const int* m,
729
+ const int* n,
730
+ const float* alpha,
731
+ const float* a,
732
+ const int* lda,
733
+ float* b,
734
+ const int* ldb);
735
+
736
+ void dtrmm_(const char* side,
737
+ const char* uplo,
738
+ const char* transa,
739
+ const char* diag,
740
+ const int* m,
741
+ const int* n,
742
+ const double* alpha,
743
+ const double* a,
744
+ const int* lda,
745
+ double* b,
746
+ const int* ldb);
747
+
748
+ void ctrmm_(const char* side,
749
+ const char* uplo,
750
+ const char* transa,
751
+ const char* diag,
752
+ const int* m,
753
+ const int* n,
754
+ const cuComplex* alpha,
755
+ const cuComplex* a,
756
+ const int* lda,
757
+ cuComplex* b,
758
+ const int* ldb);
759
+
760
+ void ztrmm_(const char* side,
761
+ const char* uplo,
762
+ const char* transa,
763
+ const char* diag,
764
+ const int* m,
765
+ const int* n,
766
+ const cuDoubleComplex* alpha,
767
+ const cuDoubleComplex* a,
768
+ const int* lda,
769
+ cuDoubleComplex* b,
770
+ const int* ldb);
771
+
772
+ void strmm(const char* side,
773
+ const char* uplo,
774
+ const char* transa,
775
+ const char* diag,
776
+ const int* m,
777
+ const int* n,
778
+ const float* alpha,
779
+ const float* a,
780
+ const int* lda,
781
+ float* b,
782
+ const int* ldb);
783
+
784
+ void dtrmm(const char* side,
785
+ const char* uplo,
786
+ const char* transa,
787
+ const char* diag,
788
+ const int* m,
789
+ const int* n,
790
+ const double* alpha,
791
+ const double* a,
792
+ const int* lda,
793
+ double* b,
794
+ const int* ldb);
795
+
796
+ void ctrmm(const char* side,
797
+ const char* uplo,
798
+ const char* transa,
799
+ const char* diag,
800
+ const int* m,
801
+ const int* n,
802
+ const cuComplex* alpha,
803
+ const cuComplex* a,
804
+ const int* lda,
805
+ cuComplex* b,
806
+ const int* ldb);
807
+
808
+ void ztrmm(const char* side,
809
+ const char* uplo,
810
+ const char* transa,
811
+ const char* diag,
812
+ const int* m,
813
+ const int* n,
814
+ const cuDoubleComplex* alpha,
815
+ const cuDoubleComplex* a,
816
+ const int* lda,
817
+ cuDoubleComplex* b,
818
+ const int* ldb);
819
+
820
+ #if defined(__cplusplus)
821
+ }
822
+ #endif /* __cplusplus */
823
+
824
+ #endif /* !defined(NVBLAS_H_) */
venv/lib/python3.10/site-packages/nvidia/cublas/lib/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/nvidia/cublas/lib/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (185 Bytes). View file
 
venv/lib/python3.10/site-packages/nvidia/cublas/lib/libnvblas.so.12 ADDED
Binary file (737 kB). View file
 
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (185 Bytes). View file
 
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/Openacc/cupti_openacc.h ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #include <cuda_stdint.h>
51
+
52
+ #if !defined(_CUPTI_OPENACC_H_)
53
+ #define _CUPTI_OPENACC_H_
54
+
55
+ #ifndef CUPTIAPI
56
+ #ifdef _WIN32
57
+ #define CUPTIAPI __stdcall
58
+ #else
59
+ #define CUPTIAPI
60
+ #endif
61
+ #endif
62
+
63
+ #if defined(__LP64__)
64
+ #define CUPTILP64 1
65
+ #elif defined(_WIN64)
66
+ #define CUPTILP64 1
67
+ #else
68
+ #undef CUPTILP64
69
+ #endif
70
+
71
+ #if defined(__cplusplus)
72
+ extern "C" {
73
+ #endif
74
+
75
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
76
+ #pragma GCC visibility push(default)
77
+ #endif
78
+
79
+ /**
80
+ * \brief Initialize OpenACC support
81
+ *
82
+ * \param profRegister function of type acc_prof_reg as obtained from acc_register_library
83
+ * \param profUnregister function of type acc_prof_reg as obtained from acc_register_library
84
+ * \param profLookup function of type acc_prof_lookup as obtained from acc_register_library
85
+ */
86
+ CUptiResult CUPTIAPI
87
+ cuptiOpenACCInitialize(void *profRegister, void *profUnregister, void *profLookup);
88
+
89
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
90
+ #pragma GCC visibility pop
91
+ #endif
92
+
93
+ #if defined(__cplusplus)
94
+ }
95
+ #endif
96
+
97
+ #endif /*_CUPTI_OPENACC_H_*/
98
+
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/Openmp/cupti_openmp.h ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #include <cuda_stdint.h>
51
+ #include "Openmp/omp-tools.h"
52
+
53
+ #if !defined(_CUPTI_OPENMP_H_)
54
+ #define _CUPTI_OPENMP_H_
55
+
56
+ #ifndef CUPTIAPI
57
+ #ifdef _WIN32
58
+ #define CUPTIAPI __stdcall
59
+ #else
60
+ #define CUPTIAPI
61
+ #endif
62
+ #endif
63
+
64
+ #if defined(__LP64__)
65
+ #define CUPTILP64 1
66
+ #elif defined(_WIN64)
67
+ #define CUPTILP64 1
68
+ #else
69
+ #undef CUPTILP64
70
+ #endif
71
+
72
+ #if defined(__cplusplus)
73
+ extern "C" {
74
+ #endif
75
+
76
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
77
+ #pragma GCC visibility push(default)
78
+ #endif
79
+
80
+ /**
81
+ * \brief Initialize OPENMP support (deprecated, used before OpenMP 5.0)
82
+ *
83
+ */
84
+ int CUPTIAPI cuptiOpenMpInitialize(ompt_function_lookup_t ompt_fn_lookup, const char *runtime_version, unsigned int ompt_version);
85
+
86
+ /**
87
+ * \brief Initialize OPENMP support
88
+ *
89
+ */
90
+ int CUPTIAPI cuptiOpenMpInitialize_v2(ompt_function_lookup_t lookup, int initial_device_num, ompt_data_t *tool_data);
91
+
92
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
93
+ #pragma GCC visibility pop
94
+ #endif
95
+
96
+ #if defined(__cplusplus)
97
+ }
98
+ #endif
99
+
100
+ #endif /*_CUPTI_OPENMP_H_*/
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/Openmp/omp-tools.h ADDED
@@ -0,0 +1,1083 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * include/50/omp-tools.h.var
3
+ */
4
+
5
+ //===----------------------------------------------------------------------===//
6
+ //
7
+ // The LLVM Compiler Infrastructure
8
+ //
9
+ // This file is dual licensed under the MIT and the University of Illinois Open
10
+ // Source Licenses. See LICENSE.txt for details.
11
+ //
12
+ //===----------------------------------------------------------------------===//
13
+
14
+ #ifndef __OMPT__
15
+ #define __OMPT__
16
+
17
+ /*****************************************************************************
18
+ * system include files
19
+ *****************************************************************************/
20
+
21
+ #include <stdint.h>
22
+ #include <stddef.h>
23
+
24
+ /*****************************************************************************
25
+ * iteration macros
26
+ *****************************************************************************/
27
+
28
+ #define FOREACH_OMPT_INQUIRY_FN(macro) \
29
+ macro (ompt_enumerate_states) \
30
+ macro (ompt_enumerate_mutex_impls) \
31
+ \
32
+ macro (ompt_set_callback) \
33
+ macro (ompt_get_callback) \
34
+ \
35
+ macro (ompt_get_state) \
36
+ \
37
+ macro (ompt_get_parallel_info) \
38
+ macro (ompt_get_task_info) \
39
+ macro (ompt_get_task_memory) \
40
+ macro (ompt_get_thread_data) \
41
+ macro (ompt_get_unique_id) \
42
+ macro (ompt_finalize_tool) \
43
+ \
44
+ macro(ompt_get_num_procs) \
45
+ macro(ompt_get_num_places) \
46
+ macro(ompt_get_place_proc_ids) \
47
+ macro(ompt_get_place_num) \
48
+ macro(ompt_get_partition_place_nums) \
49
+ macro(ompt_get_proc_id) \
50
+ \
51
+ macro(ompt_get_target_info) \
52
+ macro(ompt_get_num_devices)
53
+
54
+ #define FOREACH_OMPT_STATE(macro) \
55
+ \
56
+ /* first available state */ \
57
+ macro (ompt_state_undefined, 0x102) /* undefined thread state */ \
58
+ \
59
+ /* work states (0..15) */ \
60
+ macro (ompt_state_work_serial, 0x000) /* working outside parallel */ \
61
+ macro (ompt_state_work_parallel, 0x001) /* working within parallel */ \
62
+ macro (ompt_state_work_reduction, 0x002) /* performing a reduction */ \
63
+ \
64
+ /* barrier wait states (16..31) */ \
65
+ macro (ompt_state_wait_barrier, 0x010) /* waiting at a barrier */ \
66
+ macro (ompt_state_wait_barrier_implicit_parallel, 0x011) \
67
+ /* implicit barrier at the end of parallel region */\
68
+ macro (ompt_state_wait_barrier_implicit_workshare, 0x012) \
69
+ /* implicit barrier at the end of worksharing */ \
70
+ macro (ompt_state_wait_barrier_implicit, 0x013) /* implicit barrier */ \
71
+ macro (ompt_state_wait_barrier_explicit, 0x014) /* explicit barrier */ \
72
+ \
73
+ /* task wait states (32..63) */ \
74
+ macro (ompt_state_wait_taskwait, 0x020) /* waiting at a taskwait */ \
75
+ macro (ompt_state_wait_taskgroup, 0x021) /* waiting at a taskgroup */ \
76
+ \
77
+ /* mutex wait states (64..127) */ \
78
+ macro (ompt_state_wait_mutex, 0x040) \
79
+ macro (ompt_state_wait_lock, 0x041) /* waiting for lock */ \
80
+ macro (ompt_state_wait_critical, 0x042) /* waiting for critical */ \
81
+ macro (ompt_state_wait_atomic, 0x043) /* waiting for atomic */ \
82
+ macro (ompt_state_wait_ordered, 0x044) /* waiting for ordered */ \
83
+ \
84
+ /* target wait states (128..255) */ \
85
+ macro (ompt_state_wait_target, 0x080) /* waiting for target region */ \
86
+ macro (ompt_state_wait_target_map, 0x081) /* waiting for target data mapping operation */ \
87
+ macro (ompt_state_wait_target_update, 0x082) /* waiting for target update operation */ \
88
+ \
89
+ /* misc (256..511) */ \
90
+ macro (ompt_state_idle, 0x100) /* waiting for work */ \
91
+ macro (ompt_state_overhead, 0x101) /* overhead excluding wait states */ \
92
+ \
93
+ /* implementation-specific states (512..) */
94
+
95
+
96
+ #define FOREACH_KMP_MUTEX_IMPL(macro) \
97
+ macro (kmp_mutex_impl_none, 0) /* unknown implementation */ \
98
+ macro (kmp_mutex_impl_spin, 1) /* based on spin */ \
99
+ macro (kmp_mutex_impl_queuing, 2) /* based on some fair policy */ \
100
+ macro (kmp_mutex_impl_speculative, 3) /* based on HW-supported speculation */
101
+
102
+ #define FOREACH_OMPT_EVENT(macro) \
103
+ \
104
+ /*--- Mandatory Events ---*/ \
105
+ macro (ompt_callback_thread_begin, ompt_callback_thread_begin_t, 1) /* thread begin */ \
106
+ macro (ompt_callback_thread_end, ompt_callback_thread_end_t, 2) /* thread end */ \
107
+ \
108
+ macro (ompt_callback_parallel_begin, ompt_callback_parallel_begin_t, 3) /* parallel begin */ \
109
+ macro (ompt_callback_parallel_end, ompt_callback_parallel_end_t, 4) /* parallel end */ \
110
+ \
111
+ macro (ompt_callback_task_create, ompt_callback_task_create_t, 5) /* task begin */ \
112
+ macro (ompt_callback_task_schedule, ompt_callback_task_schedule_t, 6) /* task schedule */ \
113
+ macro (ompt_callback_implicit_task, ompt_callback_implicit_task_t, 7) /* implicit task */ \
114
+ \
115
+ macro (ompt_callback_target, ompt_callback_target_t, 8) /* target */ \
116
+ macro (ompt_callback_target_data_op, ompt_callback_target_data_op_t, 9) /* target data op */ \
117
+ macro (ompt_callback_target_submit, ompt_callback_target_submit_t, 10) /* target submit */ \
118
+ \
119
+ macro (ompt_callback_control_tool, ompt_callback_control_tool_t, 11) /* control tool */ \
120
+ \
121
+ macro (ompt_callback_device_initialize, ompt_callback_device_initialize_t, 12) /* device initialize */ \
122
+ macro (ompt_callback_device_finalize, ompt_callback_device_finalize_t, 13) /* device finalize */ \
123
+ \
124
+ macro (ompt_callback_device_load, ompt_callback_device_load_t, 14) /* device load */ \
125
+ macro (ompt_callback_device_unload, ompt_callback_device_unload_t, 15) /* device unload */ \
126
+ \
127
+ /* Optional Events */ \
128
+ macro (ompt_callback_sync_region_wait, ompt_callback_sync_region_t, 16) /* sync region wait begin or end */ \
129
+ \
130
+ macro (ompt_callback_mutex_released, ompt_callback_mutex_t, 17) /* mutex released */ \
131
+ \
132
+ macro (ompt_callback_dependences, ompt_callback_dependences_t, 18) /* report task dependences */ \
133
+ macro (ompt_callback_task_dependence, ompt_callback_task_dependence_t, 19) /* report task dependence */ \
134
+ \
135
+ macro (ompt_callback_work, ompt_callback_work_t, 20) /* task at work begin or end */ \
136
+ \
137
+ macro (ompt_callback_master, ompt_callback_master_t, 21) /* task at master begin or end */ \
138
+ \
139
+ macro (ompt_callback_target_map, ompt_callback_target_map_t, 22) /* target map */ \
140
+ \
141
+ macro (ompt_callback_sync_region, ompt_callback_sync_region_t, 23) /* sync region begin or end */ \
142
+ \
143
+ macro (ompt_callback_lock_init, ompt_callback_mutex_acquire_t, 24) /* lock init */ \
144
+ macro (ompt_callback_lock_destroy, ompt_callback_mutex_t, 25) /* lock destroy */ \
145
+ \
146
+ macro (ompt_callback_mutex_acquire, ompt_callback_mutex_acquire_t, 26) /* mutex acquire */ \
147
+ macro (ompt_callback_mutex_acquired, ompt_callback_mutex_t, 27) /* mutex acquired */ \
148
+ \
149
+ macro (ompt_callback_nest_lock, ompt_callback_nest_lock_t, 28) /* nest lock */ \
150
+ \
151
+ macro (ompt_callback_flush, ompt_callback_flush_t, 29) /* after executing flush */ \
152
+ \
153
+ macro (ompt_callback_cancel, ompt_callback_cancel_t, 30) /* cancel innermost binding region */ \
154
+ \
155
+ macro (ompt_callback_reduction, ompt_callback_sync_region_t, 31) /* reduction */ \
156
+ \
157
+ macro (ompt_callback_dispatch, ompt_callback_dispatch_t, 32) /* dispatch of work */
158
+
159
+ /*****************************************************************************
160
+ * implementation specific types
161
+ *****************************************************************************/
162
+
163
+ typedef enum kmp_mutex_impl_t {
164
+ #define kmp_mutex_impl_macro(impl, code) impl = code,
165
+ FOREACH_KMP_MUTEX_IMPL(kmp_mutex_impl_macro)
166
+ #undef kmp_mutex_impl_macro
167
+ } kmp_mutex_impl_t;
168
+
169
+ /*****************************************************************************
170
+ * definitions generated from spec
171
+ *****************************************************************************/
172
+
173
+ typedef enum ompt_callbacks_t {
174
+ ompt_callback_thread_begin = 1,
175
+ ompt_callback_thread_end = 2,
176
+ ompt_callback_parallel_begin = 3,
177
+ ompt_callback_parallel_end = 4,
178
+ ompt_callback_task_create = 5,
179
+ ompt_callback_task_schedule = 6,
180
+ ompt_callback_implicit_task = 7,
181
+ ompt_callback_target = 8,
182
+ ompt_callback_target_data_op = 9,
183
+ ompt_callback_target_submit = 10,
184
+ ompt_callback_control_tool = 11,
185
+ ompt_callback_device_initialize = 12,
186
+ ompt_callback_device_finalize = 13,
187
+ ompt_callback_device_load = 14,
188
+ ompt_callback_device_unload = 15,
189
+ ompt_callback_sync_region_wait = 16,
190
+ ompt_callback_mutex_released = 17,
191
+ ompt_callback_dependences = 18,
192
+ ompt_callback_task_dependence = 19,
193
+ ompt_callback_work = 20,
194
+ ompt_callback_master = 21,
195
+ ompt_callback_target_map = 22,
196
+ ompt_callback_sync_region = 23,
197
+ ompt_callback_lock_init = 24,
198
+ ompt_callback_lock_destroy = 25,
199
+ ompt_callback_mutex_acquire = 26,
200
+ ompt_callback_mutex_acquired = 27,
201
+ ompt_callback_nest_lock = 28,
202
+ ompt_callback_flush = 29,
203
+ ompt_callback_cancel = 30,
204
+ ompt_callback_reduction = 31,
205
+ ompt_callback_dispatch = 32
206
+ } ompt_callbacks_t;
207
+
208
+ typedef enum ompt_record_t {
209
+ ompt_record_ompt = 1,
210
+ ompt_record_native = 2,
211
+ ompt_record_invalid = 3
212
+ } ompt_record_t;
213
+
214
+ typedef enum ompt_record_native_t {
215
+ ompt_record_native_info = 1,
216
+ ompt_record_native_event = 2
217
+ } ompt_record_native_t;
218
+
219
+ typedef enum ompt_set_result_t {
220
+ ompt_set_error = 0,
221
+ ompt_set_never = 1,
222
+ ompt_set_impossible = 2,
223
+ ompt_set_sometimes = 3,
224
+ ompt_set_sometimes_paired = 4,
225
+ ompt_set_always = 5
226
+ } ompt_set_result_t;
227
+
228
+ typedef uint64_t ompt_id_t;
229
+
230
+ typedef uint64_t ompt_device_time_t;
231
+
232
+ typedef uint64_t ompt_buffer_cursor_t;
233
+
234
+ typedef enum ompt_thread_t {
235
+ ompt_thread_initial = 1,
236
+ ompt_thread_worker = 2,
237
+ ompt_thread_other = 3,
238
+ ompt_thread_unknown = 4
239
+ } ompt_thread_t;
240
+
241
+ typedef enum ompt_scope_endpoint_t {
242
+ ompt_scope_begin = 1,
243
+ ompt_scope_end = 2
244
+ } ompt_scope_endpoint_t;
245
+
246
+ typedef enum ompt_dispatch_t {
247
+ ompt_dispatch_iteration = 1,
248
+ ompt_dispatch_section = 2
249
+ } ompt_dispatch_t;
250
+
251
+ typedef enum ompt_sync_region_t {
252
+ ompt_sync_region_barrier = 1,
253
+ ompt_sync_region_barrier_implicit = 2,
254
+ ompt_sync_region_barrier_explicit = 3,
255
+ ompt_sync_region_barrier_implementation = 4,
256
+ ompt_sync_region_taskwait = 5,
257
+ ompt_sync_region_taskgroup = 6,
258
+ ompt_sync_region_reduction = 7
259
+ } ompt_sync_region_t;
260
+
261
+ typedef enum ompt_target_data_op_t {
262
+ ompt_target_data_alloc = 1,
263
+ ompt_target_data_transfer_to_device = 2,
264
+ ompt_target_data_transfer_from_device = 3,
265
+ ompt_target_data_delete = 4,
266
+ ompt_target_data_associate = 5,
267
+ ompt_target_data_disassociate = 6
268
+ } ompt_target_data_op_t;
269
+
270
+ typedef enum ompt_work_t {
271
+ ompt_work_loop = 1,
272
+ ompt_work_sections = 2,
273
+ ompt_work_single_executor = 3,
274
+ ompt_work_single_other = 4,
275
+ ompt_work_workshare = 5,
276
+ ompt_work_distribute = 6,
277
+ ompt_work_taskloop = 7
278
+ } ompt_work_t;
279
+
280
+ typedef enum ompt_mutex_t {
281
+ ompt_mutex_lock = 1,
282
+ ompt_mutex_test_lock = 2,
283
+ ompt_mutex_nest_lock = 3,
284
+ ompt_mutex_test_nest_lock = 4,
285
+ ompt_mutex_critical = 5,
286
+ ompt_mutex_atomic = 6,
287
+ ompt_mutex_ordered = 7
288
+ } ompt_mutex_t;
289
+
290
+ typedef enum ompt_native_mon_flag_t {
291
+ ompt_native_data_motion_explicit = 0x01,
292
+ ompt_native_data_motion_implicit = 0x02,
293
+ ompt_native_kernel_invocation = 0x04,
294
+ ompt_native_kernel_execution = 0x08,
295
+ ompt_native_driver = 0x10,
296
+ ompt_native_runtime = 0x20,
297
+ ompt_native_overhead = 0x40,
298
+ ompt_native_idleness = 0x80
299
+ } ompt_native_mon_flag_t;
300
+
301
+ typedef enum ompt_task_flag_t {
302
+ ompt_task_initial = 0x00000001,
303
+ ompt_task_implicit = 0x00000002,
304
+ ompt_task_explicit = 0x00000004,
305
+ ompt_task_target = 0x00000008,
306
+ ompt_task_undeferred = 0x08000000,
307
+ ompt_task_untied = 0x10000000,
308
+ ompt_task_final = 0x20000000,
309
+ ompt_task_mergeable = 0x40000000,
310
+ ompt_task_merged = 0x80000000
311
+ } ompt_task_flag_t;
312
+
313
+ typedef enum ompt_task_status_t {
314
+ ompt_task_complete = 1,
315
+ ompt_task_yield = 2,
316
+ ompt_task_cancel = 3,
317
+ ompt_task_detach = 4,
318
+ ompt_task_early_fulfill = 5,
319
+ ompt_task_late_fulfill = 6,
320
+ ompt_task_switch = 7
321
+ } ompt_task_status_t;
322
+
323
+ typedef enum ompt_target_t {
324
+ ompt_target = 1,
325
+ ompt_target_enter_data = 2,
326
+ ompt_target_exit_data = 3,
327
+ ompt_target_update = 4
328
+ } ompt_target_t;
329
+
330
+ typedef enum ompt_parallel_flag_t {
331
+ ompt_parallel_invoker_program = 0x00000001,
332
+ ompt_parallel_invoker_runtime = 0x00000002,
333
+ ompt_parallel_league = 0x40000000,
334
+ ompt_parallel_team = 0x80000000
335
+ } ompt_parallel_flag_t;
336
+
337
+ typedef enum ompt_target_map_flag_t {
338
+ ompt_target_map_flag_to = 0x01,
339
+ ompt_target_map_flag_from = 0x02,
340
+ ompt_target_map_flag_alloc = 0x04,
341
+ ompt_target_map_flag_release = 0x08,
342
+ ompt_target_map_flag_delete = 0x10,
343
+ ompt_target_map_flag_implicit = 0x20
344
+ } ompt_target_map_flag_t;
345
+
346
+ typedef enum ompt_dependence_type_t {
347
+ ompt_dependence_type_in = 1,
348
+ ompt_dependence_type_out = 2,
349
+ ompt_dependence_type_inout = 3,
350
+ ompt_dependence_type_mutexinoutset = 4,
351
+ ompt_dependence_type_source = 5,
352
+ ompt_dependence_type_sink = 6
353
+ } ompt_dependence_type_t;
354
+
355
+ typedef enum ompt_cancel_flag_t {
356
+ ompt_cancel_parallel = 0x01,
357
+ ompt_cancel_sections = 0x02,
358
+ ompt_cancel_loop = 0x04,
359
+ ompt_cancel_taskgroup = 0x08,
360
+ ompt_cancel_activated = 0x10,
361
+ ompt_cancel_detected = 0x20,
362
+ ompt_cancel_discarded_task = 0x40
363
+ } ompt_cancel_flag_t;
364
+
365
+ typedef uint64_t ompt_hwid_t;
366
+
367
+ typedef uint64_t ompt_wait_id_t;
368
+
369
+ typedef enum ompt_frame_flag_t {
370
+ ompt_frame_runtime = 0x00,
371
+ ompt_frame_application = 0x01,
372
+ ompt_frame_cfa = 0x10,
373
+ ompt_frame_framepointer = 0x20,
374
+ ompt_frame_stackaddress = 0x30
375
+ } ompt_frame_flag_t;
376
+
377
+ typedef enum ompt_state_t {
378
+ ompt_state_work_serial = 0x000,
379
+ ompt_state_work_parallel = 0x001,
380
+ ompt_state_work_reduction = 0x002,
381
+
382
+ ompt_state_wait_barrier = 0x010,
383
+ ompt_state_wait_barrier_implicit_parallel = 0x011,
384
+ ompt_state_wait_barrier_implicit_workshare = 0x012,
385
+ ompt_state_wait_barrier_implicit = 0x013,
386
+ ompt_state_wait_barrier_explicit = 0x014,
387
+
388
+ ompt_state_wait_taskwait = 0x020,
389
+ ompt_state_wait_taskgroup = 0x021,
390
+
391
+ ompt_state_wait_mutex = 0x040,
392
+ ompt_state_wait_lock = 0x041,
393
+ ompt_state_wait_critical = 0x042,
394
+ ompt_state_wait_atomic = 0x043,
395
+ ompt_state_wait_ordered = 0x044,
396
+
397
+ ompt_state_wait_target = 0x080,
398
+ ompt_state_wait_target_map = 0x081,
399
+ ompt_state_wait_target_update = 0x082,
400
+
401
+ ompt_state_idle = 0x100,
402
+ ompt_state_overhead = 0x101,
403
+ ompt_state_undefined = 0x102
404
+ } ompt_state_t;
405
+
406
+ typedef uint64_t (*ompt_get_unique_id_t) (void);
407
+
408
+ typedef uint64_t ompd_size_t;
409
+
410
+ typedef uint64_t ompd_wait_id_t;
411
+
412
+ typedef uint64_t ompd_addr_t;
413
+ typedef int64_t ompd_word_t;
414
+ typedef uint64_t ompd_seg_t;
415
+
416
+ typedef uint64_t ompd_device_t;
417
+
418
+ typedef uint64_t ompd_thread_id_t;
419
+
420
+ typedef enum ompd_scope_t {
421
+ ompd_scope_global = 1,
422
+ ompd_scope_address_space = 2,
423
+ ompd_scope_thread = 3,
424
+ ompd_scope_parallel = 4,
425
+ ompd_scope_implicit_task = 5,
426
+ ompd_scope_task = 6
427
+ } ompd_scope_t;
428
+
429
+ typedef uint64_t ompd_icv_id_t;
430
+
431
+ typedef enum ompd_rc_t {
432
+ ompd_rc_ok = 0,
433
+ ompd_rc_unavailable = 1,
434
+ ompd_rc_stale_handle = 2,
435
+ ompd_rc_bad_input = 3,
436
+ ompd_rc_error = 4,
437
+ ompd_rc_unsupported = 5,
438
+ ompd_rc_needs_state_tracking = 6,
439
+ ompd_rc_incompatible = 7,
440
+ ompd_rc_device_read_error = 8,
441
+ ompd_rc_device_write_error = 9,
442
+ ompd_rc_nomem = 10,
443
+ } ompd_rc_t;
444
+
445
+ typedef void (*ompt_interface_fn_t) (void);
446
+
447
+ typedef ompt_interface_fn_t (*ompt_function_lookup_t) (
448
+ const char *interface_function_name
449
+ );
450
+
451
+ typedef union ompt_data_t {
452
+ uint64_t value;
453
+ void *ptr;
454
+ } ompt_data_t;
455
+
456
+ typedef struct ompt_frame_t {
457
+ ompt_data_t exit_frame;
458
+ ompt_data_t enter_frame;
459
+ int exit_frame_flags;
460
+ int enter_frame_flags;
461
+ } ompt_frame_t;
462
+
463
+ typedef void (*ompt_callback_t) (void);
464
+
465
+ typedef void ompt_device_t;
466
+
467
+ typedef void ompt_buffer_t;
468
+
469
+ typedef void (*ompt_callback_buffer_request_t) (
470
+ int device_num,
471
+ ompt_buffer_t **buffer,
472
+ size_t *bytes
473
+ );
474
+
475
+ typedef void (*ompt_callback_buffer_complete_t) (
476
+ int device_num,
477
+ ompt_buffer_t *buffer,
478
+ size_t bytes,
479
+ ompt_buffer_cursor_t begin,
480
+ int buffer_owned
481
+ );
482
+
483
+ typedef void (*ompt_finalize_t) (
484
+ ompt_data_t *tool_data
485
+ );
486
+
487
+ typedef int (*ompt_initialize_t) (
488
+ ompt_function_lookup_t lookup,
489
+ int initial_device_num,
490
+ ompt_data_t *tool_data
491
+ );
492
+
493
+ typedef struct ompt_start_tool_result_t {
494
+ ompt_initialize_t initialize;
495
+ ompt_finalize_t finalize;
496
+ ompt_data_t tool_data;
497
+ } ompt_start_tool_result_t;
498
+
499
+ typedef struct ompt_record_abstract_t {
500
+ ompt_record_native_t rclass;
501
+ const char *type;
502
+ ompt_device_time_t start_time;
503
+ ompt_device_time_t end_time;
504
+ ompt_hwid_t hwid;
505
+ } ompt_record_abstract_t;
506
+
507
+ typedef struct ompt_dependence_t {
508
+ ompt_data_t variable;
509
+ ompt_dependence_type_t dependence_type;
510
+ } ompt_dependence_t;
511
+
512
+ typedef int (*ompt_enumerate_states_t) (
513
+ int current_state,
514
+ int *next_state,
515
+ const char **next_state_name
516
+ );
517
+
518
+ typedef int (*ompt_enumerate_mutex_impls_t) (
519
+ int current_impl,
520
+ int *next_impl,
521
+ const char **next_impl_name
522
+ );
523
+
524
+ typedef ompt_set_result_t (*ompt_set_callback_t) (
525
+ ompt_callbacks_t event,
526
+ ompt_callback_t callback
527
+ );
528
+
529
+ typedef int (*ompt_get_callback_t) (
530
+ ompt_callbacks_t event,
531
+ ompt_callback_t *callback
532
+ );
533
+
534
+ typedef ompt_data_t *(*ompt_get_thread_data_t) (void);
535
+
536
+ typedef int (*ompt_get_num_procs_t) (void);
537
+
538
+ typedef int (*ompt_get_num_places_t) (void);
539
+
540
+ typedef int (*ompt_get_place_proc_ids_t) (
541
+ int place_num,
542
+ int ids_size,
543
+ int *ids
544
+ );
545
+
546
+ typedef int (*ompt_get_place_num_t) (void);
547
+
548
+ typedef int (*ompt_get_partition_place_nums_t) (
549
+ int place_nums_size,
550
+ int *place_nums
551
+ );
552
+
553
+ typedef int (*ompt_get_proc_id_t) (void);
554
+
555
+ typedef int (*ompt_get_state_t) (
556
+ ompt_wait_id_t *wait_id
557
+ );
558
+
559
+ typedef int (*ompt_get_parallel_info_t) (
560
+ int ancestor_level,
561
+ ompt_data_t **parallel_data,
562
+ int *team_size
563
+ );
564
+
565
+ typedef int (*ompt_get_task_info_t) (
566
+ int ancestor_level,
567
+ int *flags,
568
+ ompt_data_t **task_data,
569
+ ompt_frame_t **task_frame,
570
+ ompt_data_t **parallel_data,
571
+ int *thread_num
572
+ );
573
+
574
+ typedef int (*ompt_get_task_memory_t)(
575
+ void **addr,
576
+ size_t *size,
577
+ int block
578
+ );
579
+
580
+ typedef int (*ompt_get_target_info_t) (
581
+ uint64_t *device_num,
582
+ ompt_id_t *target_id,
583
+ ompt_id_t *host_op_id
584
+ );
585
+
586
+ typedef int (*ompt_get_num_devices_t) (void);
587
+
588
+ typedef void (*ompt_finalize_tool_t) (void);
589
+
590
+ typedef int (*ompt_get_device_num_procs_t) (
591
+ ompt_device_t *device
592
+ );
593
+
594
+ typedef ompt_device_time_t (*ompt_get_device_time_t) (
595
+ ompt_device_t *device
596
+ );
597
+
598
+ typedef double (*ompt_translate_time_t) (
599
+ ompt_device_t *device,
600
+ ompt_device_time_t time
601
+ );
602
+
603
+ typedef ompt_set_result_t (*ompt_set_trace_ompt_t) (
604
+ ompt_device_t *device,
605
+ unsigned int enable,
606
+ unsigned int etype
607
+ );
608
+
609
+ typedef ompt_set_result_t (*ompt_set_trace_native_t) (
610
+ ompt_device_t *device,
611
+ int enable,
612
+ int flags
613
+ );
614
+
615
+ typedef int (*ompt_start_trace_t) (
616
+ ompt_device_t *device,
617
+ ompt_callback_buffer_request_t request,
618
+ ompt_callback_buffer_complete_t complete
619
+ );
620
+
621
+ typedef int (*ompt_pause_trace_t) (
622
+ ompt_device_t *device,
623
+ int begin_pause
624
+ );
625
+
626
+ typedef int (*ompt_flush_trace_t) (
627
+ ompt_device_t *device
628
+ );
629
+
630
+ typedef int (*ompt_stop_trace_t) (
631
+ ompt_device_t *device
632
+ );
633
+
634
+ typedef int (*ompt_advance_buffer_cursor_t) (
635
+ ompt_device_t *device,
636
+ ompt_buffer_t *buffer,
637
+ size_t size,
638
+ ompt_buffer_cursor_t current,
639
+ ompt_buffer_cursor_t *next
640
+ );
641
+
642
+ typedef ompt_record_t (*ompt_get_record_type_t) (
643
+ ompt_buffer_t *buffer,
644
+ ompt_buffer_cursor_t current
645
+ );
646
+
647
+ typedef void *(*ompt_get_record_native_t) (
648
+ ompt_buffer_t *buffer,
649
+ ompt_buffer_cursor_t current,
650
+ ompt_id_t *host_op_id
651
+ );
652
+
653
+ typedef ompt_record_abstract_t *
654
+ (*ompt_get_record_abstract_t) (
655
+ void *native_record
656
+ );
657
+
658
+ typedef void (*ompt_callback_thread_begin_t) (
659
+ ompt_thread_t thread_type,
660
+ ompt_data_t *thread_data
661
+ );
662
+
663
+ typedef struct ompt_record_thread_begin_t {
664
+ ompt_thread_t thread_type;
665
+ } ompt_record_thread_begin_t;
666
+
667
+ typedef void (*ompt_callback_thread_end_t) (
668
+ ompt_data_t *thread_data
669
+ );
670
+
671
+ typedef void (*ompt_callback_parallel_begin_t) (
672
+ ompt_data_t *encountering_task_data,
673
+ const ompt_frame_t *encountering_task_frame,
674
+ ompt_data_t *parallel_data,
675
+ unsigned int requested_parallelism,
676
+ int flags,
677
+ const void *codeptr_ra
678
+ );
679
+
680
+ typedef struct ompt_record_parallel_begin_t {
681
+ ompt_id_t encountering_task_id;
682
+ ompt_id_t parallel_id;
683
+ unsigned int requested_parallelism;
684
+ int flags;
685
+ const void *codeptr_ra;
686
+ } ompt_record_parallel_begin_t;
687
+
688
+ typedef void (*ompt_callback_parallel_end_t) (
689
+ ompt_data_t *parallel_data,
690
+ ompt_data_t *encountering_task_data,
691
+ int flags,
692
+ const void *codeptr_ra
693
+ );
694
+
695
+ typedef struct ompt_record_parallel_end_t {
696
+ ompt_id_t parallel_id;
697
+ ompt_id_t encountering_task_id;
698
+ int flags;
699
+ const void *codeptr_ra;
700
+ } ompt_record_parallel_end_t;
701
+
702
+ typedef void (*ompt_callback_work_t) (
703
+ ompt_work_t wstype,
704
+ ompt_scope_endpoint_t endpoint,
705
+ ompt_data_t *parallel_data,
706
+ ompt_data_t *task_data,
707
+ uint64_t count,
708
+ const void *codeptr_ra
709
+ );
710
+
711
+ typedef struct ompt_record_work_t {
712
+ ompt_work_t wstype;
713
+ ompt_scope_endpoint_t endpoint;
714
+ ompt_id_t parallel_id;
715
+ ompt_id_t task_id;
716
+ uint64_t count;
717
+ const void *codeptr_ra;
718
+ } ompt_record_work_t;
719
+
720
+ typedef void (*ompt_callback_dispatch_t) (
721
+ ompt_data_t *parallel_data,
722
+ ompt_data_t *task_data,
723
+ ompt_dispatch_t kind,
724
+ ompt_data_t instance
725
+ );
726
+
727
+ typedef struct ompt_record_dispatch_t {
728
+ ompt_id_t parallel_id;
729
+ ompt_id_t task_id;
730
+ ompt_dispatch_t kind;
731
+ ompt_data_t instance;
732
+ } ompt_record_dispatch_t;
733
+
734
+ typedef void (*ompt_callback_task_create_t) (
735
+ ompt_data_t *encountering_task_data,
736
+ const ompt_frame_t *encountering_task_frame,
737
+ ompt_data_t *new_task_data,
738
+ int flags,
739
+ int has_dependences,
740
+ const void *codeptr_ra
741
+ );
742
+
743
+ typedef struct ompt_record_task_create_t {
744
+ ompt_id_t encountering_task_id;
745
+ ompt_id_t new_task_id;
746
+ int flags;
747
+ int has_dependences;
748
+ const void *codeptr_ra;
749
+ } ompt_record_task_create_t;
750
+
751
+ typedef void (*ompt_callback_dependences_t) (
752
+ ompt_data_t *task_data,
753
+ const ompt_dependence_t *deps,
754
+ int ndeps
755
+ );
756
+
757
+ typedef struct ompt_record_dependences_t {
758
+ ompt_id_t task_id;
759
+ ompt_dependence_t dep;
760
+ int ndeps;
761
+ } ompt_record_dependences_t;
762
+
763
+ typedef void (*ompt_callback_task_dependence_t) (
764
+ ompt_data_t *src_task_data,
765
+ ompt_data_t *sink_task_data
766
+ );
767
+
768
+ typedef struct ompt_record_task_dependence_t {
769
+ ompt_id_t src_task_id;
770
+ ompt_id_t sink_task_id;
771
+ } ompt_record_task_dependence_t;
772
+
773
+ typedef void (*ompt_callback_task_schedule_t) (
774
+ ompt_data_t *prior_task_data,
775
+ ompt_task_status_t prior_task_status,
776
+ ompt_data_t *next_task_data
777
+ );
778
+
779
+ typedef struct ompt_record_task_schedule_t {
780
+ ompt_id_t prior_task_id;
781
+ ompt_task_status_t prior_task_status;
782
+ ompt_id_t next_task_id;
783
+ } ompt_record_task_schedule_t;
784
+
785
+ typedef void (*ompt_callback_implicit_task_t) (
786
+ ompt_scope_endpoint_t endpoint,
787
+ ompt_data_t *parallel_data,
788
+ ompt_data_t *task_data,
789
+ unsigned int actual_parallelism,
790
+ unsigned int index,
791
+ int flags
792
+ );
793
+
794
+ typedef struct ompt_record_implicit_task_t {
795
+ ompt_scope_endpoint_t endpoint;
796
+ ompt_id_t parallel_id;
797
+ ompt_id_t task_id;
798
+ unsigned int actual_parallelism;
799
+ unsigned int index;
800
+ int flags;
801
+ } ompt_record_implicit_task_t;
802
+
803
+ typedef void (*ompt_callback_master_t) (
804
+ ompt_scope_endpoint_t endpoint,
805
+ ompt_data_t *parallel_data,
806
+ ompt_data_t *task_data,
807
+ const void *codeptr_ra
808
+ );
809
+
810
+ typedef struct ompt_record_master_t {
811
+ ompt_scope_endpoint_t endpoint;
812
+ ompt_id_t parallel_id;
813
+ ompt_id_t task_id;
814
+ const void *codeptr_ra;
815
+ } ompt_record_master_t;
816
+
817
+ typedef void (*ompt_callback_sync_region_t) (
818
+ ompt_sync_region_t kind,
819
+ ompt_scope_endpoint_t endpoint,
820
+ ompt_data_t *parallel_data,
821
+ ompt_data_t *task_data,
822
+ const void *codeptr_ra
823
+ );
824
+
825
+ typedef struct ompt_record_sync_region_t {
826
+ ompt_sync_region_t kind;
827
+ ompt_scope_endpoint_t endpoint;
828
+ ompt_id_t parallel_id;
829
+ ompt_id_t task_id;
830
+ const void *codeptr_ra;
831
+ } ompt_record_sync_region_t;
832
+
833
+ typedef void (*ompt_callback_mutex_acquire_t) (
834
+ ompt_mutex_t kind,
835
+ unsigned int hint,
836
+ unsigned int impl,
837
+ ompt_wait_id_t wait_id,
838
+ const void *codeptr_ra
839
+ );
840
+
841
+ typedef struct ompt_record_mutex_acquire_t {
842
+ ompt_mutex_t kind;
843
+ unsigned int hint;
844
+ unsigned int impl;
845
+ ompt_wait_id_t wait_id;
846
+ const void *codeptr_ra;
847
+ } ompt_record_mutex_acquire_t;
848
+
849
+ typedef void (*ompt_callback_mutex_t) (
850
+ ompt_mutex_t kind,
851
+ ompt_wait_id_t wait_id,
852
+ const void *codeptr_ra
853
+ );
854
+
855
+ typedef struct ompt_record_mutex_t {
856
+ ompt_mutex_t kind;
857
+ ompt_wait_id_t wait_id;
858
+ const void *codeptr_ra;
859
+ } ompt_record_mutex_t;
860
+
861
+ typedef void (*ompt_callback_nest_lock_t) (
862
+ ompt_scope_endpoint_t endpoint,
863
+ ompt_wait_id_t wait_id,
864
+ const void *codeptr_ra
865
+ );
866
+
867
+ typedef struct ompt_record_nest_lock_t {
868
+ ompt_scope_endpoint_t endpoint;
869
+ ompt_wait_id_t wait_id;
870
+ const void *codeptr_ra;
871
+ } ompt_record_nest_lock_t;
872
+
873
+ typedef void (*ompt_callback_flush_t) (
874
+ ompt_data_t *thread_data,
875
+ const void *codeptr_ra
876
+ );
877
+
878
+ typedef struct ompt_record_flush_t {
879
+ const void *codeptr_ra;
880
+ } ompt_record_flush_t;
881
+
882
+ typedef void (*ompt_callback_cancel_t) (
883
+ ompt_data_t *task_data,
884
+ int flags,
885
+ const void *codeptr_ra
886
+ );
887
+
888
+ typedef struct ompt_record_cancel_t {
889
+ ompt_id_t task_id;
890
+ int flags;
891
+ const void *codeptr_ra;
892
+ } ompt_record_cancel_t;
893
+
894
+ typedef void (*ompt_callback_device_initialize_t) (
895
+ int device_num,
896
+ const char *type,
897
+ ompt_device_t *device,
898
+ ompt_function_lookup_t lookup,
899
+ const char *documentation
900
+ );
901
+
902
+ typedef void (*ompt_callback_device_finalize_t) (
903
+ int device_num
904
+ );
905
+
906
+ typedef void (*ompt_callback_device_load_t) (
907
+ int device_num,
908
+ const char *filename,
909
+ int64_t offset_in_file,
910
+ void *vma_in_file,
911
+ size_t bytes,
912
+ void *host_addr,
913
+ void *device_addr,
914
+ uint64_t module_id
915
+ );
916
+
917
+ typedef void (*ompt_callback_device_unload_t) (
918
+ int device_num,
919
+ uint64_t module_id
920
+ );
921
+
922
+ typedef void (*ompt_callback_target_data_op_t) (
923
+ ompt_id_t target_id,
924
+ ompt_id_t host_op_id,
925
+ ompt_target_data_op_t optype,
926
+ void *src_addr,
927
+ int src_device_num,
928
+ void *dest_addr,
929
+ int dest_device_num,
930
+ size_t bytes,
931
+ const void *codeptr_ra
932
+ );
933
+
934
+ typedef struct ompt_record_target_data_op_t {
935
+ ompt_id_t host_op_id;
936
+ ompt_target_data_op_t optype;
937
+ void *src_addr;
938
+ int src_device_num;
939
+ void *dest_addr;
940
+ int dest_device_num;
941
+ size_t bytes;
942
+ ompt_device_time_t end_time;
943
+ const void *codeptr_ra;
944
+ } ompt_record_target_data_op_t;
945
+
946
+ typedef void (*ompt_callback_target_t) (
947
+ ompt_target_t kind,
948
+ ompt_scope_endpoint_t endpoint,
949
+ int device_num,
950
+ ompt_data_t *task_data,
951
+ ompt_id_t target_id,
952
+ const void *codeptr_ra
953
+ );
954
+
955
+ typedef struct ompt_record_target_t {
956
+ ompt_target_t kind;
957
+ ompt_scope_endpoint_t endpoint;
958
+ int device_num;
959
+ ompt_id_t task_id;
960
+ ompt_id_t target_id;
961
+ const void *codeptr_ra;
962
+ } ompt_record_target_t;
963
+
964
+ typedef void (*ompt_callback_target_map_t) (
965
+ ompt_id_t target_id,
966
+ unsigned int nitems,
967
+ void **host_addr,
968
+ void **device_addr,
969
+ size_t *bytes,
970
+ unsigned int *mapping_flags,
971
+ const void *codeptr_ra
972
+ );
973
+
974
+ typedef struct ompt_record_target_map_t {
975
+ ompt_id_t target_id;
976
+ unsigned int nitems;
977
+ void **host_addr;
978
+ void **device_addr;
979
+ size_t *bytes;
980
+ unsigned int *mapping_flags;
981
+ const void *codeptr_ra;
982
+ } ompt_record_target_map_t;
983
+
984
+ typedef void (*ompt_callback_target_submit_t) (
985
+ ompt_id_t target_id,
986
+ ompt_id_t host_op_id,
987
+ unsigned int requested_num_teams
988
+ );
989
+
990
+ typedef struct ompt_record_target_kernel_t {
991
+ ompt_id_t host_op_id;
992
+ unsigned int requested_num_teams;
993
+ unsigned int granted_num_teams;
994
+ ompt_device_time_t end_time;
995
+ } ompt_record_target_kernel_t;
996
+
997
+ typedef int (*ompt_callback_control_tool_t) (
998
+ uint64_t command,
999
+ uint64_t modifier,
1000
+ void *arg,
1001
+ const void *codeptr_ra
1002
+ );
1003
+
1004
+ typedef struct ompt_record_control_tool_t {
1005
+ uint64_t command;
1006
+ uint64_t modifier;
1007
+ const void *codeptr_ra;
1008
+ } ompt_record_control_tool_t;
1009
+
1010
+ typedef struct ompd_address_t {
1011
+ ompd_seg_t segment;
1012
+ ompd_addr_t address;
1013
+ } ompd_address_t;
1014
+
1015
+ typedef struct ompd_frame_info_t {
1016
+ ompd_address_t frame_address;
1017
+ ompd_word_t frame_flag;
1018
+ } ompd_frame_info_t;
1019
+
1020
+ typedef struct _ompd_aspace_handle ompd_address_space_handle_t;
1021
+ typedef struct _ompd_thread_handle ompd_thread_handle_t;
1022
+ typedef struct _ompd_parallel_handle ompd_parallel_handle_t;
1023
+ typedef struct _ompd_task_handle ompd_task_handle_t;
1024
+
1025
+ typedef struct _ompd_aspace_cont ompd_address_space_context_t;
1026
+ typedef struct _ompd_thread_cont ompd_thread_context_t;
1027
+
1028
+ typedef struct ompd_device_type_sizes_t {
1029
+ uint8_t sizeof_char;
1030
+ uint8_t sizeof_short;
1031
+ uint8_t sizeof_int;
1032
+ uint8_t sizeof_long;
1033
+ uint8_t sizeof_long_long;
1034
+ uint8_t sizeof_pointer;
1035
+ } ompd_device_type_sizes_t;
1036
+
1037
+ typedef struct ompt_record_ompt_t {
1038
+ ompt_callbacks_t type;
1039
+ ompt_device_time_t time;
1040
+ ompt_id_t thread_id;
1041
+ ompt_id_t target_id;
1042
+ union {
1043
+ ompt_record_thread_begin_t thread_begin;
1044
+ ompt_record_parallel_begin_t parallel_begin;
1045
+ ompt_record_parallel_end_t parallel_end;
1046
+ ompt_record_work_t work;
1047
+ ompt_record_dispatch_t dispatch;
1048
+ ompt_record_task_create_t task_create;
1049
+ ompt_record_dependences_t dependences;
1050
+ ompt_record_task_dependence_t task_dependence;
1051
+ ompt_record_task_schedule_t task_schedule;
1052
+ ompt_record_implicit_task_t implicit_task;
1053
+ ompt_record_master_t master;
1054
+ ompt_record_sync_region_t sync_region;
1055
+ ompt_record_mutex_acquire_t mutex_acquire;
1056
+ ompt_record_mutex_t mutex;
1057
+ ompt_record_nest_lock_t nest_lock;
1058
+ ompt_record_flush_t flush;
1059
+ ompt_record_cancel_t cancel;
1060
+ ompt_record_target_t target;
1061
+ ompt_record_target_data_op_t target_data_op;
1062
+ ompt_record_target_map_t target_map;
1063
+ ompt_record_target_kernel_t target_kernel;
1064
+ ompt_record_control_tool_t control_tool;
1065
+ } record;
1066
+ } ompt_record_ompt_t;
1067
+
1068
+ typedef ompt_record_ompt_t *(*ompt_get_record_ompt_t) (
1069
+ ompt_buffer_t *buffer,
1070
+ ompt_buffer_cursor_t current
1071
+ );
1072
+
1073
+ #define ompt_id_none 0
1074
+ #define ompt_data_none {0}
1075
+ #define ompt_time_none 0
1076
+ #define ompt_hwid_none 0
1077
+ #define ompt_addr_none ~0
1078
+ #define ompt_mutex_impl_none 0
1079
+ #define ompt_wait_id_none 0
1080
+
1081
+ #define ompd_segment_none 0
1082
+
1083
+ #endif /* __OMPT__ */
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (193 Bytes). View file
 
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cuda_stdint.h ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2009-2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions
6
+ * are met:
7
+ * * Redistributions of source code must retain the above copyright
8
+ * notice, this list of conditions and the following disclaimer.
9
+ * * Redistributions in binary form must reproduce the above copyright
10
+ * notice, this list of conditions and the following disclaimer in the
11
+ * documentation and/or other materials provided with the distribution.
12
+ * * Neither the name of NVIDIA CORPORATION nor the names of its
13
+ * contributors may be used to endorse or promote products derived
14
+ * from this software without specific prior written permission.
15
+ *
16
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
17
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
20
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
24
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+ */
28
+
29
+ #ifndef __cuda_stdint_h__
30
+ #define __cuda_stdint_h__
31
+
32
+ // Compiler-specific treatment for C99's stdint.h
33
+ //
34
+ // By default, this header will use the standard headers (so it
35
+ // is your responsibility to make sure they are available), except
36
+ // on MSVC before Visual Studio 2010, when they were not provided.
37
+ // To support old MSVC, a few of the commonly-used definitions are
38
+ // provided here. If more definitions are needed, add them here,
39
+ // or replace these definitions with a complete implementation,
40
+ // such as the ones available from Google, Boost, or MSVC10. You
41
+ // can prevent the definition of any of these types (in order to
42
+ // use your own) by #defining CU_STDINT_TYPES_ALREADY_DEFINED.
43
+
44
+ #if !defined(CU_STDINT_TYPES_ALREADY_DEFINED)
45
+
46
+ // In VS including stdint.h forces the C++ runtime dep - provide an opt-out
47
+ // (CU_STDINT_VS_FORCE_NO_STDINT_H) for users that care (notably static
48
+ // cudart).
49
+ #if defined(_MSC_VER) && ((_MSC_VER < 1600) || defined(CU_STDINT_VS_FORCE_NO_STDINT_H))
50
+
51
+ // These definitions can be used with MSVC 8 and 9,
52
+ // which don't ship with stdint.h:
53
+
54
+ typedef unsigned char uint8_t;
55
+
56
+ typedef short int16_t;
57
+ typedef unsigned short uint16_t;
58
+
59
+ // To keep it consistent with all MSVC build. define those types
60
+ // in the exact same way they are defined with the MSVC headers
61
+ #if defined(_MSC_VER)
62
+ typedef signed char int8_t;
63
+
64
+ typedef int int32_t;
65
+ typedef unsigned int uint32_t;
66
+
67
+ typedef long long int64_t;
68
+ typedef unsigned long long uint64_t;
69
+ #else
70
+ typedef char int8_t;
71
+
72
+ typedef long int32_t;
73
+ typedef unsigned long uint32_t;
74
+
75
+ typedef __int64 int64_t;
76
+ typedef unsigned __int64 uint64_t;
77
+ #endif
78
+
79
+ #elif defined(__DJGPP__)
80
+
81
+ // These definitions can be used when compiling
82
+ // C code with DJGPP, which only provides stdint.h
83
+ // when compiling C++ code with TR1 enabled.
84
+
85
+ typedef char int8_t;
86
+ typedef unsigned char uint8_t;
87
+
88
+ typedef short int16_t;
89
+ typedef unsigned short uint16_t;
90
+
91
+ typedef long int32_t;
92
+ typedef unsigned long uint32_t;
93
+
94
+ typedef long long int64_t;
95
+ typedef unsigned long long uint64_t;
96
+
97
+ #else
98
+
99
+ // Use standard headers, as specified by C99 and C++ TR1.
100
+ // Known to be provided by:
101
+ // - gcc/glibc, supported by all versions of glibc
102
+ // - djgpp, supported since 2001
103
+ // - MSVC, supported by Visual Studio 2010 and later
104
+
105
+ #include <stdint.h>
106
+
107
+ #endif
108
+
109
+ #endif // !defined(CU_STDINT_TYPES_ALREADY_DEFINED)
110
+
111
+
112
+ #endif // file guard
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti.h ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_H_)
51
+ #define _CUPTI_H_
52
+
53
+ #ifdef _WIN32
54
+ #ifndef WIN32_LEAN_AND_MEAN
55
+ #define WIN32_LEAN_AND_MEAN
56
+ #endif
57
+ #ifdef NOMINMAX
58
+ #include <windows.h>
59
+ #else
60
+ #define NOMINMAX
61
+ #include <windows.h>
62
+ #undef NOMINMAX
63
+ #endif
64
+ #endif
65
+
66
+ #include <cuda.h>
67
+ #include <cupti_result.h>
68
+ #include <cupti_version.h>
69
+
70
+ /* Activity, callback, event and metric APIs */
71
+ #include <cupti_activity.h>
72
+ #include <cupti_callbacks.h>
73
+ #include <cupti_events.h>
74
+ #include <cupti_metrics.h>
75
+
76
+ /* Runtime, driver, and nvtx function identifiers */
77
+ #include <cupti_driver_cbid.h>
78
+ #include <cupti_runtime_cbid.h>
79
+ #include <cupti_nvtx_cbid.h>
80
+
81
+ /* To support function parameter structures for obsoleted API. See
82
+ cuda.h for the actual definition of these structures. */
83
+ typedef unsigned int CUdeviceptr_v1;
84
+ typedef struct CUDA_MEMCPY2D_v1_st { int dummy; } CUDA_MEMCPY2D_v1;
85
+ typedef struct CUDA_MEMCPY3D_v1_st { int dummy; } CUDA_MEMCPY3D_v1;
86
+ typedef struct CUDA_ARRAY_DESCRIPTOR_v1_st { int dummy; } CUDA_ARRAY_DESCRIPTOR_v1;
87
+ typedef struct CUDA_ARRAY3D_DESCRIPTOR_v1_st { int dummy; } CUDA_ARRAY3D_DESCRIPTOR_v1;
88
+
89
+ /* Function parameter structures */
90
+ #include <generated_cuda_runtime_api_meta.h>
91
+ #include <generated_cuda_meta.h>
92
+
93
+ /* The following parameter structures cannot be included unless a
94
+ header that defines GL_VERSION is included before including them.
95
+ If these are needed then make sure such a header is included
96
+ already. */
97
+ #ifdef GL_VERSION
98
+ #include <generated_cuda_gl_interop_meta.h>
99
+ #include <generated_cudaGL_meta.h>
100
+ #endif
101
+
102
+ //#include <generated_nvtx_meta.h>
103
+
104
+ /* The following parameter structures cannot be included by default as
105
+ they are not guaranteed to be available on all systems. Uncomment
106
+ the includes that are available, or use the include explicitly. */
107
+ #if defined(__linux__)
108
+ //#include <generated_cuda_vdpau_interop_meta.h>
109
+ //#include <generated_cudaVDPAU_meta.h>
110
+ #endif
111
+
112
+ #ifdef _WIN32
113
+ //#include <generated_cuda_d3d9_interop_meta.h>
114
+ //#include <generated_cuda_d3d10_interop_meta.h>
115
+ //#include <generated_cuda_d3d11_interop_meta.h>
116
+ //#include <generated_cudaD3D9_meta.h>
117
+ //#include <generated_cudaD3D10_meta.h>
118
+ //#include <generated_cudaD3D11_meta.h>
119
+ #endif
120
+
121
+ #endif /*_CUPTI_H_*/
122
+
123
+
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_activity.h ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_callbacks.h ADDED
@@ -0,0 +1,762 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2020 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUPTI_CALLBACKS_H__)
51
+ #define __CUPTI_CALLBACKS_H__
52
+
53
+ #include <cuda.h>
54
+ #include <builtin_types.h>
55
+ #include <string.h>
56
+ #include <cuda_stdint.h>
57
+ #include <cupti_result.h>
58
+
59
+ #ifndef CUPTIAPI
60
+ #ifdef _WIN32
61
+ #define CUPTIAPI __stdcall
62
+ #else
63
+ #define CUPTIAPI
64
+ #endif
65
+ #endif
66
+
67
+ #if defined(__cplusplus)
68
+ extern "C" {
69
+ #endif
70
+
71
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
72
+ #pragma GCC visibility push(default)
73
+ #endif
74
+
75
+ /**
76
+ * \defgroup CUPTI_CALLBACK_API CUPTI Callback API
77
+ * Functions, types, and enums that implement the CUPTI Callback API.
78
+ * @{
79
+ */
80
+
81
+ /**
82
+ * \brief Specifies the point in an API call that a callback is issued.
83
+ *
84
+ * Specifies the point in an API call that a callback is issued. This
85
+ * value is communicated to the callback function via \ref
86
+ * CUpti_CallbackData::callbackSite.
87
+ */
88
+ typedef enum {
89
+ /**
90
+ * The callback is at the entry of the API call.
91
+ */
92
+ CUPTI_API_ENTER = 0,
93
+ /**
94
+ * The callback is at the exit of the API call.
95
+ */
96
+ CUPTI_API_EXIT = 1,
97
+ CUPTI_API_CBSITE_FORCE_INT = 0x7fffffff
98
+ } CUpti_ApiCallbackSite;
99
+
100
+ /**
101
+ * \brief Callback domains.
102
+ *
103
+ * Callback domains. Each domain represents callback points for a
104
+ * group of related API functions or CUDA driver activity.
105
+ */
106
+ typedef enum {
107
+ /**
108
+ * Invalid domain.
109
+ */
110
+ CUPTI_CB_DOMAIN_INVALID = 0,
111
+ /**
112
+ * Domain containing callback points for all driver API functions.
113
+ */
114
+ CUPTI_CB_DOMAIN_DRIVER_API = 1,
115
+ /**
116
+ * Domain containing callback points for all runtime API
117
+ * functions.
118
+ */
119
+ CUPTI_CB_DOMAIN_RUNTIME_API = 2,
120
+ /**
121
+ * Domain containing callback points for CUDA resource tracking.
122
+ */
123
+ CUPTI_CB_DOMAIN_RESOURCE = 3,
124
+ /**
125
+ * Domain containing callback points for CUDA synchronization.
126
+ */
127
+ CUPTI_CB_DOMAIN_SYNCHRONIZE = 4,
128
+ /**
129
+ * Domain containing callback points for NVTX API functions.
130
+ */
131
+ CUPTI_CB_DOMAIN_NVTX = 5,
132
+ CUPTI_CB_DOMAIN_SIZE,
133
+
134
+ CUPTI_CB_DOMAIN_FORCE_INT = 0x7fffffff
135
+ } CUpti_CallbackDomain;
136
+
137
+ /**
138
+ * \brief Callback IDs for resource domain.
139
+ *
140
+ * Callback IDs for resource domain, CUPTI_CB_DOMAIN_RESOURCE. This
141
+ * value is communicated to the callback function via the \p cbid
142
+ * parameter.
143
+ */
144
+ typedef enum {
145
+ /**
146
+ * Invalid resource callback ID.
147
+ */
148
+ CUPTI_CBID_RESOURCE_INVALID = 0,
149
+ /**
150
+ * A new context has been created.
151
+ */
152
+ CUPTI_CBID_RESOURCE_CONTEXT_CREATED = 1,
153
+ /**
154
+ * A context is about to be destroyed.
155
+ */
156
+ CUPTI_CBID_RESOURCE_CONTEXT_DESTROY_STARTING = 2,
157
+ /**
158
+ * A new stream has been created.
159
+ */
160
+ CUPTI_CBID_RESOURCE_STREAM_CREATED = 3,
161
+ /**
162
+ * A stream is about to be destroyed.
163
+ */
164
+ CUPTI_CBID_RESOURCE_STREAM_DESTROY_STARTING = 4,
165
+ /**
166
+ * The driver has finished initializing.
167
+ */
168
+ CUPTI_CBID_RESOURCE_CU_INIT_FINISHED = 5,
169
+ /**
170
+ * A module has been loaded.
171
+ */
172
+ CUPTI_CBID_RESOURCE_MODULE_LOADED = 6,
173
+ /**
174
+ * A module is about to be unloaded.
175
+ */
176
+ CUPTI_CBID_RESOURCE_MODULE_UNLOAD_STARTING = 7,
177
+ /**
178
+ * The current module which is being profiled.
179
+ */
180
+ CUPTI_CBID_RESOURCE_MODULE_PROFILED = 8,
181
+ /**
182
+ * CUDA graph has been created.
183
+ */
184
+ CUPTI_CBID_RESOURCE_GRAPH_CREATED = 9,
185
+ /**
186
+ * CUDA graph is about to be destroyed.
187
+ */
188
+ CUPTI_CBID_RESOURCE_GRAPH_DESTROY_STARTING = 10,
189
+ /**
190
+ * CUDA graph is cloned.
191
+ */
192
+ CUPTI_CBID_RESOURCE_GRAPH_CLONED = 11,
193
+ /**
194
+ * CUDA graph node is about to be created
195
+ */
196
+ CUPTI_CBID_RESOURCE_GRAPHNODE_CREATE_STARTING = 12,
197
+ /**
198
+ * CUDA graph node is created.
199
+ */
200
+ CUPTI_CBID_RESOURCE_GRAPHNODE_CREATED = 13,
201
+ /**
202
+ * CUDA graph node is about to be destroyed.
203
+ */
204
+ CUPTI_CBID_RESOURCE_GRAPHNODE_DESTROY_STARTING = 14,
205
+ /**
206
+ * Dependency on a CUDA graph node is created.
207
+ */
208
+ CUPTI_CBID_RESOURCE_GRAPHNODE_DEPENDENCY_CREATED = 15,
209
+ /**
210
+ * Dependency on a CUDA graph node is destroyed.
211
+ */
212
+ CUPTI_CBID_RESOURCE_GRAPHNODE_DEPENDENCY_DESTROY_STARTING = 16,
213
+ /**
214
+ * An executable CUDA graph is about to be created.
215
+ */
216
+ CUPTI_CBID_RESOURCE_GRAPHEXEC_CREATE_STARTING = 17,
217
+ /**
218
+ * An executable CUDA graph is created.
219
+ */
220
+ CUPTI_CBID_RESOURCE_GRAPHEXEC_CREATED = 18,
221
+ /**
222
+ * An executable CUDA graph is about to be destroyed.
223
+ */
224
+ CUPTI_CBID_RESOURCE_GRAPHEXEC_DESTROY_STARTING = 19,
225
+ /**
226
+ * CUDA graph node is cloned.
227
+ */
228
+ CUPTI_CBID_RESOURCE_GRAPHNODE_CLONED = 20,
229
+
230
+ CUPTI_CBID_RESOURCE_SIZE,
231
+ CUPTI_CBID_RESOURCE_FORCE_INT = 0x7fffffff
232
+ } CUpti_CallbackIdResource;
233
+
234
+ /**
235
+ * \brief Callback IDs for synchronization domain.
236
+ *
237
+ * Callback IDs for synchronization domain,
238
+ * CUPTI_CB_DOMAIN_SYNCHRONIZE. This value is communicated to the
239
+ * callback function via the \p cbid parameter.
240
+ */
241
+ typedef enum {
242
+ /**
243
+ * Invalid synchronize callback ID.
244
+ */
245
+ CUPTI_CBID_SYNCHRONIZE_INVALID = 0,
246
+ /**
247
+ * Stream synchronization has completed for the stream.
248
+ */
249
+ CUPTI_CBID_SYNCHRONIZE_STREAM_SYNCHRONIZED = 1,
250
+ /**
251
+ * Context synchronization has completed for the context.
252
+ */
253
+ CUPTI_CBID_SYNCHRONIZE_CONTEXT_SYNCHRONIZED = 2,
254
+ CUPTI_CBID_SYNCHRONIZE_SIZE,
255
+ CUPTI_CBID_SYNCHRONIZE_FORCE_INT = 0x7fffffff
256
+ } CUpti_CallbackIdSync;
257
+
258
+
259
+ /**
260
+ * \brief Data passed into a runtime or driver API callback function.
261
+ *
262
+ * Data passed into a runtime or driver API callback function as the
263
+ * \p cbdata argument to \ref CUpti_CallbackFunc. The \p cbdata will
264
+ * be this type for \p domain equal to CUPTI_CB_DOMAIN_DRIVER_API or
265
+ * CUPTI_CB_DOMAIN_RUNTIME_API. The callback data is valid only within
266
+ * the invocation of the callback function that is passed the data. If
267
+ * you need to retain some data for use outside of the callback, you
268
+ * must make a copy of that data. For example, if you make a shallow
269
+ * copy of CUpti_CallbackData within a callback, you cannot
270
+ * dereference \p functionParams outside of that callback to access
271
+ * the function parameters. \p functionName is an exception: the
272
+ * string pointed to by \p functionName is a global constant and so
273
+ * may be accessed outside of the callback.
274
+ */
275
+ typedef struct {
276
+ /**
277
+ * Point in the runtime or driver function from where the callback
278
+ * was issued.
279
+ */
280
+ CUpti_ApiCallbackSite callbackSite;
281
+
282
+ /**
283
+ * Name of the runtime or driver API function which issued the
284
+ * callback. This string is a global constant and so may be
285
+ * accessed outside of the callback.
286
+ */
287
+ const char *functionName;
288
+
289
+ /**
290
+ * Pointer to the arguments passed to the runtime or driver API
291
+ * call. See generated_cuda_runtime_api_meta.h and
292
+ * generated_cuda_meta.h for structure definitions for the
293
+ * parameters for each runtime and driver API function.
294
+ */
295
+ const void *functionParams;
296
+
297
+ /**
298
+ * Pointer to the return value of the runtime or driver API
299
+ * call. This field is only valid within the exit::CUPTI_API_EXIT
300
+ * callback. For a runtime API \p functionReturnValue points to a
301
+ * \p cudaError_t. For a driver API \p functionReturnValue points
302
+ * to a \p CUresult.
303
+ */
304
+ void *functionReturnValue;
305
+
306
+ /**
307
+ * Name of the symbol operated on by the runtime or driver API
308
+ * function which issued the callback. This entry is valid only for
309
+ * driver and runtime launch callbacks, where it returns the name of
310
+ * the kernel.
311
+ */
312
+ const char *symbolName;
313
+
314
+ /**
315
+ * Driver context current to the thread, or null if no context is
316
+ * current. This value can change from the entry to exit callback
317
+ * of a runtime API function if the runtime initializes a context.
318
+ */
319
+ CUcontext context;
320
+
321
+ /**
322
+ * Unique ID for the CUDA context associated with the thread. The
323
+ * UIDs are assigned sequentially as contexts are created and are
324
+ * unique within a process.
325
+ */
326
+ uint32_t contextUid;
327
+
328
+ /**
329
+ * Pointer to data shared between the entry and exit callbacks of
330
+ * a given runtime or drive API function invocation. This field
331
+ * can be used to pass 64-bit values from the entry callback to
332
+ * the corresponding exit callback.
333
+ */
334
+ uint64_t *correlationData;
335
+
336
+ /**
337
+ * The activity record correlation ID for this callback. For a
338
+ * driver domain callback (i.e. \p domain
339
+ * CUPTI_CB_DOMAIN_DRIVER_API) this ID will equal the correlation ID
340
+ * in the CUpti_ActivityAPI record corresponding to the CUDA driver
341
+ * function call. For a runtime domain callback (i.e. \p domain
342
+ * CUPTI_CB_DOMAIN_RUNTIME_API) this ID will equal the correlation
343
+ * ID in the CUpti_ActivityAPI record corresponding to the CUDA
344
+ * runtime function call. Within the callback, this ID can be
345
+ * recorded to correlate user data with the activity record. This
346
+ * field is new in 4.1.
347
+ */
348
+ uint32_t correlationId;
349
+
350
+ } CUpti_CallbackData;
351
+
352
+ /**
353
+ * \brief Data passed into a resource callback function.
354
+ *
355
+ * Data passed into a resource callback function as the \p cbdata
356
+ * argument to \ref CUpti_CallbackFunc. The \p cbdata will be this
357
+ * type for \p domain equal to CUPTI_CB_DOMAIN_RESOURCE. The callback
358
+ * data is valid only within the invocation of the callback function
359
+ * that is passed the data. If you need to retain some data for use
360
+ * outside of the callback, you must make a copy of that data.
361
+ */
362
+ typedef struct {
363
+ /**
364
+ * For CUPTI_CBID_RESOURCE_CONTEXT_CREATED and
365
+ * CUPTI_CBID_RESOURCE_CONTEXT_DESTROY_STARTING, the context being
366
+ * created or destroyed. For CUPTI_CBID_RESOURCE_STREAM_CREATED and
367
+ * CUPTI_CBID_RESOURCE_STREAM_DESTROY_STARTING, the context
368
+ * containing the stream being created or destroyed.
369
+ */
370
+ CUcontext context;
371
+
372
+ union {
373
+ /**
374
+ * For CUPTI_CBID_RESOURCE_STREAM_CREATED and
375
+ * CUPTI_CBID_RESOURCE_STREAM_DESTROY_STARTING, the stream being
376
+ * created or destroyed.
377
+ */
378
+ CUstream stream;
379
+ } resourceHandle;
380
+
381
+ /**
382
+ * Reserved for future use.
383
+ */
384
+ void *resourceDescriptor;
385
+ } CUpti_ResourceData;
386
+
387
+
388
+ /**
389
+ * \brief Module data passed into a resource callback function.
390
+ *
391
+ * CUDA module data passed into a resource callback function as the \p cbdata
392
+ * argument to \ref CUpti_CallbackFunc. The \p cbdata will be this
393
+ * type for \p domain equal to CUPTI_CB_DOMAIN_RESOURCE. The module
394
+ * data is valid only within the invocation of the callback function
395
+ * that is passed the data. If you need to retain some data for use
396
+ * outside of the callback, you must make a copy of that data.
397
+ */
398
+
399
+ typedef struct {
400
+ /**
401
+ * Identifier to associate with the CUDA module.
402
+ */
403
+ uint32_t moduleId;
404
+
405
+ /**
406
+ * The size of the cubin.
407
+ */
408
+ size_t cubinSize;
409
+
410
+ /**
411
+ * Pointer to the associated cubin.
412
+ */
413
+ const char *pCubin;
414
+ } CUpti_ModuleResourceData;
415
+
416
+ /**
417
+ * \brief CUDA graphs data passed into a resource callback function.
418
+ *
419
+ * CUDA graphs data passed into a resource callback function as the \p cbdata
420
+ * argument to \ref CUpti_CallbackFunc. The \p cbdata will be this
421
+ * type for \p domain equal to CUPTI_CB_DOMAIN_RESOURCE. The graph
422
+ * data is valid only within the invocation of the callback function
423
+ * that is passed the data. If you need to retain some data for use
424
+ * outside of the callback, you must make a copy of that data.
425
+ */
426
+
427
+ typedef struct {
428
+ /**
429
+ * CUDA graph
430
+ */
431
+ CUgraph graph;
432
+ /**
433
+ * The original CUDA graph from which \param graph is cloned
434
+ */
435
+ CUgraph originalGraph;
436
+ /**
437
+ * CUDA graph node
438
+ */
439
+ CUgraphNode node;
440
+ /**
441
+ * The original CUDA graph node from which \param node is cloned
442
+ */
443
+ CUgraphNode originalNode;
444
+ /**
445
+ * Type of the \param node
446
+ */
447
+ CUgraphNodeType nodeType;
448
+ /**
449
+ * The dependent graph node
450
+ * The size of the array is \param numDependencies.
451
+ */
452
+ CUgraphNode dependency;
453
+ /**
454
+ * CUDA executable graph
455
+ */
456
+ CUgraphExec graphExec;
457
+ } CUpti_GraphData;
458
+
459
+ /**
460
+ * \brief Data passed into a synchronize callback function.
461
+ *
462
+ * Data passed into a synchronize callback function as the \p cbdata
463
+ * argument to \ref CUpti_CallbackFunc. The \p cbdata will be this
464
+ * type for \p domain equal to CUPTI_CB_DOMAIN_SYNCHRONIZE. The
465
+ * callback data is valid only within the invocation of the callback
466
+ * function that is passed the data. If you need to retain some data
467
+ * for use outside of the callback, you must make a copy of that data.
468
+ */
469
+ typedef struct {
470
+ /**
471
+ * The context of the stream being synchronized.
472
+ */
473
+ CUcontext context;
474
+ /**
475
+ * The stream being synchronized.
476
+ */
477
+ CUstream stream;
478
+ } CUpti_SynchronizeData;
479
+
480
+ /**
481
+ * \brief Data passed into a NVTX callback function.
482
+ *
483
+ * Data passed into a NVTX callback function as the \p cbdata argument
484
+ * to \ref CUpti_CallbackFunc. The \p cbdata will be this type for \p
485
+ * domain equal to CUPTI_CB_DOMAIN_NVTX. Unless otherwise notes, the
486
+ * callback data is valid only within the invocation of the callback
487
+ * function that is passed the data. If you need to retain some data
488
+ * for use outside of the callback, you must make a copy of that data.
489
+ */
490
+ typedef struct {
491
+ /**
492
+ * Name of the NVTX API function which issued the callback. This
493
+ * string is a global constant and so may be accessed outside of the
494
+ * callback.
495
+ */
496
+ const char *functionName;
497
+
498
+ /**
499
+ * Pointer to the arguments passed to the NVTX API call. See
500
+ * generated_nvtx_meta.h for structure definitions for the
501
+ * parameters for each NVTX API function.
502
+ */
503
+ const void *functionParams;
504
+
505
+ /**
506
+ * Pointer to the return value of the NVTX API call. See
507
+ * nvToolsExt.h for each NVTX API function's return value.
508
+ */
509
+ const void *functionReturnValue;
510
+ } CUpti_NvtxData;
511
+
512
+ /**
513
+ * \brief An ID for a driver API, runtime API, resource or
514
+ * synchronization callback.
515
+ *
516
+ * An ID for a driver API, runtime API, resource or synchronization
517
+ * callback. Within a driver API callback this should be interpreted
518
+ * as a CUpti_driver_api_trace_cbid value (these values are defined in
519
+ * cupti_driver_cbid.h). Within a runtime API callback this should be
520
+ * interpreted as a CUpti_runtime_api_trace_cbid value (these values
521
+ * are defined in cupti_runtime_cbid.h). Within a resource API
522
+ * callback this should be interpreted as a \ref
523
+ * CUpti_CallbackIdResource value. Within a synchronize API callback
524
+ * this should be interpreted as a \ref CUpti_CallbackIdSync value.
525
+ */
526
+ typedef uint32_t CUpti_CallbackId;
527
+
528
+ /**
529
+ * \brief Function type for a callback.
530
+ *
531
+ * Function type for a callback. The type of the data passed to the
532
+ * callback in \p cbdata depends on the \p domain. If \p domain is
533
+ * CUPTI_CB_DOMAIN_DRIVER_API or CUPTI_CB_DOMAIN_RUNTIME_API the type
534
+ * of \p cbdata will be CUpti_CallbackData. If \p domain is
535
+ * CUPTI_CB_DOMAIN_RESOURCE the type of \p cbdata will be
536
+ * CUpti_ResourceData. If \p domain is CUPTI_CB_DOMAIN_SYNCHRONIZE the
537
+ * type of \p cbdata will be CUpti_SynchronizeData. If \p domain is
538
+ * CUPTI_CB_DOMAIN_NVTX the type of \p cbdata will be CUpti_NvtxData.
539
+ *
540
+ * \param userdata User data supplied at subscription of the callback
541
+ * \param domain The domain of the callback
542
+ * \param cbid The ID of the callback
543
+ * \param cbdata Data passed to the callback.
544
+ */
545
+ typedef void (CUPTIAPI *CUpti_CallbackFunc)(
546
+ void *userdata,
547
+ CUpti_CallbackDomain domain,
548
+ CUpti_CallbackId cbid,
549
+ const void *cbdata);
550
+
551
+ /**
552
+ * \brief A callback subscriber.
553
+ */
554
+ typedef struct CUpti_Subscriber_st *CUpti_SubscriberHandle;
555
+
556
+ /**
557
+ * \brief Pointer to an array of callback domains.
558
+ */
559
+ typedef CUpti_CallbackDomain *CUpti_DomainTable;
560
+
561
+ /**
562
+ * \brief Get the available callback domains.
563
+ *
564
+ * Returns in \p *domainTable an array of size \p *domainCount of all
565
+ * the available callback domains.
566
+ * \note \b Thread-safety: this function is thread safe.
567
+ *
568
+ * \param domainCount Returns number of callback domains
569
+ * \param domainTable Returns pointer to array of available callback domains
570
+ *
571
+ * \retval CUPTI_SUCCESS on success
572
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialize CUPTI
573
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p domainCount or \p domainTable are NULL
574
+ */
575
+ CUptiResult CUPTIAPI cuptiSupportedDomains(size_t *domainCount,
576
+ CUpti_DomainTable *domainTable);
577
+
578
+ /**
579
+ * \brief Initialize a callback subscriber with a callback function
580
+ * and user data.
581
+ *
582
+ * Initializes a callback subscriber with a callback function and
583
+ * (optionally) a pointer to user data. The returned subscriber handle
584
+ * can be used to enable and disable the callback for specific domains
585
+ * and callback IDs.
586
+ * \note Only a single subscriber can be registered at a time. To ensure
587
+ * that no other CUPTI client interrupts the profiling session, it's the
588
+ * responsibility of all the CUPTI clients to call this function before
589
+ * starting the profling session. In case profiling session is already
590
+ * started by another CUPTI client, this function returns the error code
591
+ * CUPTI_ERROR_MULTIPLE_SUBSCRIBERS_NOT_SUPPORTED.
592
+ * Note that this function returns the same error when application is
593
+ * launched using NVIDIA tools like nvprof, Visual Profiler, Nsight Systems,
594
+ * Nsight Compute, cuda-gdb and cuda-memcheck.
595
+ * \note This function does not enable any callbacks.
596
+ * \note \b Thread-safety: this function is thread safe.
597
+ *
598
+ * \param subscriber Returns handle to initialize subscriber
599
+ * \param callback The callback function
600
+ * \param userdata A pointer to user data. This data will be passed to
601
+ * the callback function via the \p userdata paramater.
602
+ *
603
+ * \retval CUPTI_SUCCESS on success
604
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialize CUPTI
605
+ * \retval CUPTI_ERROR_MULTIPLE_SUBSCRIBERS_NOT_SUPPORTED if there is already a CUPTI subscriber
606
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p subscriber is NULL
607
+ */
608
+ CUptiResult CUPTIAPI cuptiSubscribe(CUpti_SubscriberHandle *subscriber,
609
+ CUpti_CallbackFunc callback,
610
+ void *userdata);
611
+
612
+ /**
613
+ * \brief Unregister a callback subscriber.
614
+ *
615
+ * Removes a callback subscriber so that no future callbacks will be
616
+ * issued to that subscriber.
617
+ * \note \b Thread-safety: this function is thread safe.
618
+ *
619
+ * \param subscriber Handle to the initialize subscriber
620
+ *
621
+ * \retval CUPTI_SUCCESS on success
622
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialized CUPTI
623
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p subscriber is NULL or not initialized
624
+ */
625
+ CUptiResult CUPTIAPI cuptiUnsubscribe(CUpti_SubscriberHandle subscriber);
626
+
627
+ /**
628
+ * \brief Get the current enabled/disabled state of a callback for a specific
629
+ * domain and function ID.
630
+ *
631
+ * Returns non-zero in \p *enable if the callback for a domain and
632
+ * callback ID is enabled, and zero if not enabled.
633
+ *
634
+ * \note \b Thread-safety: a subscriber must serialize access to
635
+ * cuptiGetCallbackState, cuptiEnableCallback, cuptiEnableDomain, and
636
+ * cuptiEnableAllDomains. For example, if cuptiGetCallbackState(sub,
637
+ * d, c) and cuptiEnableCallback(sub, d, c) are called concurrently,
638
+ * the results are undefined.
639
+ *
640
+ * \param enable Returns non-zero if callback enabled, zero if not enabled
641
+ * \param subscriber Handle to the initialize subscriber
642
+ * \param domain The domain of the callback
643
+ * \param cbid The ID of the callback
644
+ *
645
+ * \retval CUPTI_SUCCESS on success
646
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialized CUPTI
647
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p enabled is NULL, or if \p
648
+ * subscriber, \p domain or \p cbid is invalid.
649
+ */
650
+ CUptiResult CUPTIAPI cuptiGetCallbackState(uint32_t *enable,
651
+ CUpti_SubscriberHandle subscriber,
652
+ CUpti_CallbackDomain domain,
653
+ CUpti_CallbackId cbid);
654
+
655
+ /**
656
+ * \brief Enable or disabled callbacks for a specific domain and
657
+ * callback ID.
658
+ *
659
+ * Enable or disabled callbacks for a subscriber for a specific domain
660
+ * and callback ID.
661
+ *
662
+ * \note \b Thread-safety: a subscriber must serialize access to
663
+ * cuptiGetCallbackState, cuptiEnableCallback, cuptiEnableDomain, and
664
+ * cuptiEnableAllDomains. For example, if cuptiGetCallbackState(sub,
665
+ * d, c) and cuptiEnableCallback(sub, d, c) are called concurrently,
666
+ * the results are undefined.
667
+ *
668
+ * \param enable New enable state for the callback. Zero disables the
669
+ * callback, non-zero enables the callback.
670
+ * \param subscriber - Handle to callback subscription
671
+ * \param domain The domain of the callback
672
+ * \param cbid The ID of the callback
673
+ *
674
+ * \retval CUPTI_SUCCESS on success
675
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialized CUPTI
676
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p subscriber, \p domain or \p
677
+ * cbid is invalid.
678
+ */
679
+ CUptiResult CUPTIAPI cuptiEnableCallback(uint32_t enable,
680
+ CUpti_SubscriberHandle subscriber,
681
+ CUpti_CallbackDomain domain,
682
+ CUpti_CallbackId cbid);
683
+
684
+ /**
685
+ * \brief Enable or disabled all callbacks for a specific domain.
686
+ *
687
+ * Enable or disabled all callbacks for a specific domain.
688
+ *
689
+ * \note \b Thread-safety: a subscriber must serialize access to
690
+ * cuptiGetCallbackState, cuptiEnableCallback, cuptiEnableDomain, and
691
+ * cuptiEnableAllDomains. For example, if cuptiGetCallbackEnabled(sub,
692
+ * d, *) and cuptiEnableDomain(sub, d) are called concurrently, the
693
+ * results are undefined.
694
+ *
695
+ * \param enable New enable state for all callbacks in the
696
+ * domain. Zero disables all callbacks, non-zero enables all
697
+ * callbacks.
698
+ * \param subscriber - Handle to callback subscription
699
+ * \param domain The domain of the callback
700
+ *
701
+ * \retval CUPTI_SUCCESS on success
702
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialized CUPTI
703
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p subscriber or \p domain is invalid
704
+ */
705
+ CUptiResult CUPTIAPI cuptiEnableDomain(uint32_t enable,
706
+ CUpti_SubscriberHandle subscriber,
707
+ CUpti_CallbackDomain domain);
708
+
709
+ /**
710
+ * \brief Enable or disable all callbacks in all domains.
711
+ *
712
+ * Enable or disable all callbacks in all domains.
713
+ *
714
+ * \note \b Thread-safety: a subscriber must serialize access to
715
+ * cuptiGetCallbackState, cuptiEnableCallback, cuptiEnableDomain, and
716
+ * cuptiEnableAllDomains. For example, if cuptiGetCallbackState(sub,
717
+ * d, *) and cuptiEnableAllDomains(sub) are called concurrently, the
718
+ * results are undefined.
719
+ *
720
+ * \param enable New enable state for all callbacks in all
721
+ * domain. Zero disables all callbacks, non-zero enables all
722
+ * callbacks.
723
+ * \param subscriber - Handle to callback subscription
724
+ *
725
+ * \retval CUPTI_SUCCESS on success
726
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialized CUPTI
727
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p subscriber is invalid
728
+ */
729
+ CUptiResult CUPTIAPI cuptiEnableAllDomains(uint32_t enable,
730
+ CUpti_SubscriberHandle subscriber);
731
+
732
+ /**
733
+ * \brief Get the name of a callback for a specific domain and callback ID.
734
+ *
735
+ * Returns a pointer to the name c_string in \p **name.
736
+ *
737
+ * \note \b Names are available only for the DRIVER and RUNTIME domains.
738
+ *
739
+ * \param domain The domain of the callback
740
+ * \param cbid The ID of the callback
741
+ * \param name Returns pointer to the name string on success, NULL otherwise
742
+ *
743
+ * \retval CUPTI_SUCCESS on success
744
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p name is NULL, or if
745
+ * \p domain or \p cbid is invalid.
746
+ */
747
+ CUptiResult CUPTIAPI cuptiGetCallbackName(CUpti_CallbackDomain domain,
748
+ uint32_t cbid,
749
+ const char **name);
750
+
751
+ /** @} */ /* END CUPTI_CALLBACK_API */
752
+
753
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
754
+ #pragma GCC visibility pop
755
+ #endif
756
+
757
+ #if defined(__cplusplus)
758
+ }
759
+ #endif
760
+
761
+ #endif // file guard
762
+
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_checkpoint.h ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cuda.h>
4
+ #include <cupti_result.h>
5
+
6
+ #include <stddef.h>
7
+ #include <stdint.h>
8
+
9
+ namespace NV { namespace Cupti { namespace Checkpoint {
10
+
11
+ #ifdef __cplusplus
12
+ extern "C"
13
+ {
14
+ #endif
15
+
16
+ /**
17
+ * \defgroup CUPTI_CHECKPOINT_API CUPTI Checkpoint API
18
+ * Functions, types, and enums that implement the CUPTI Checkpoint API.
19
+ * @{
20
+ */
21
+
22
+ /**
23
+ * \brief Specifies optimization options for a checkpoint, may be OR'd together to specify multiple options.
24
+ */
25
+ typedef enum
26
+ {
27
+ CUPTI_CHECKPOINT_OPT_NONE = 0, //!< Default behavior
28
+ CUPTI_CHECKPOINT_OPT_TRANSFER = 1, //!< Determine which mem blocks have changed, and only restore those. This optimization is cached, which means cuptiCheckpointRestore must always be called at the same point in the application when this option is enabled, or the result may be incorrect.
29
+ } CUpti_CheckpointOptimizations;
30
+
31
+ /**
32
+ * \brief Configuration and handle for a CUPTI Checkpoint
33
+ *
34
+ * A CUptiCheckpoint object should be initialized with desired options prior to passing into any
35
+ * CUPTI Checkpoint API function. The first call into a Checkpoint API function will initialize internal
36
+ * state based on these options. Subsequent changes to these options will not have any effect.
37
+ *
38
+ * Checkpoint data is saved in device, host, and filesystem space. There are options to reserve memory
39
+ * at each level (device, host, filesystem) which are intended to allow a guarantee that a certain amount
40
+ * of memory will remain free for use after the checkpoint is saved.
41
+ * Note, however, that falling back to slower levels of memory (host, and then filesystem) to save the checkpoint
42
+ * will result in performance degradation.
43
+ * Currently, the filesystem limitation is not implemented. Note that falling back to filesystem storage may
44
+ * significantly impact the performance for saving and restoring a checkpoint.
45
+ */
46
+ typedef struct
47
+ {
48
+ size_t structSize; //!< [in] Must be set to CUpti_Checkpoint_STRUCT_SIZE
49
+
50
+ CUcontext ctx; //!< [in] Set to context to save from, or will use current context if NULL
51
+
52
+ size_t reserveDeviceMB; //!< [in] Restrict checkpoint from using last N MB of device memory (-1 = use no device memory)
53
+ size_t reserveHostMB; //!< [in] Restrict checkpoint from using last N MB of host memory (-1 = use no host memory)
54
+ uint8_t allowOverwrite; //!< [in] Boolean, Allow checkpoint to save over existing checkpoint
55
+ uint8_t optimizations; //!< [in] Mask of CUpti_CheckpointOptimizations flags for this checkpoint
56
+
57
+ void * pPriv; //!< [in] Assign to NULL
58
+ } CUpti_Checkpoint;
59
+
60
+ #define CUpti_Checkpoint_STRUCT_SIZE \
61
+ (offsetof(CUpti_Checkpoint, pPriv) + \
62
+ sizeof(((CUpti_Checkpoint*)(nullptr))->pPriv))
63
+
64
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
65
+ #pragma GCC visibility push(default)
66
+ #endif
67
+
68
+ /**
69
+ * \brief Initialize and save a checkpoint of the device state associated with the handle context
70
+ *
71
+ * Uses the handle options to configure and save a checkpoint of the device state associated with the specified context.
72
+ *
73
+ * \param handle A pointer to a CUpti_Checkpoint object
74
+ *
75
+ * \retval CUPTI_SUCCESS if a checkpoint was successfully initialized and saved
76
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p handle does not appear to refer to a valid CUpti_Checkpoint
77
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
78
+ * \retval CUPTI_ERROR_INVALID_DEVICE if device associated with context is not compatible with checkpoint API
79
+ * \retval CUPTI_ERROR_INVALID_OPERATION if Save is requested over an existing checkpoint, but \p allowOverwrite was not originally specified
80
+ * \retval CUPTI_ERROR_OUT_OF_MEMORY if as configured, not enough backing storage space to save the checkpoint
81
+ */
82
+ CUptiResult cuptiCheckpointSave(CUpti_Checkpoint * const handle);
83
+
84
+ /**
85
+ * \brief Restore a checkpoint to the device associated with its context
86
+ *
87
+ * Restores device, pinned, and allocated memory to the state when the checkpoint was saved
88
+ *
89
+ * \param handle A pointer to a previously saved CUpti_Checkpoint object
90
+ *
91
+ * \retval CUTPI_SUCCESS if the checkpoint was successfully restored
92
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if the checkpoint was not previously initialized
93
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
94
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if the handle appears invalid
95
+ * \retval CUPTI_ERROR_UNKNOWN if the restore or optimization operation fails
96
+ */
97
+ CUptiResult cuptiCheckpointRestore(CUpti_Checkpoint * const handle);
98
+
99
+ /**
100
+ * \brief Free the backing data for a checkpoint
101
+ *
102
+ * Frees all associated device, host memory and filesystem storage used for this context.
103
+ * After freeing a handle, it may be re-used as if it was new - options may be re-configured and will
104
+ * take effect on the next call to \p cuptiCheckpointSave.
105
+ *
106
+ * \param handle A pointer to a previously saved CUpti_Checkpoint object
107
+ *
108
+ * \retval CUPTI_SUCCESS if the handle was successfully freed
109
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if the handle was already freed or appears invalid
110
+ * \retval CUPTI_ERROR_INVALID_CONTEXT if the context is no longer valid
111
+ */
112
+ CUptiResult cuptiCheckpointFree(CUpti_Checkpoint * const handle);
113
+
114
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
115
+ #pragma GCC visibility pop
116
+ #endif
117
+
118
+ /**
119
+ * @}
120
+ */
121
+
122
+ #ifdef __cplusplus
123
+ }
124
+ #endif
125
+
126
+ // Exit namespace NV::Cupti::Checkpoint
127
+ }}}
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_driver_cbid.h ADDED
@@ -0,0 +1,725 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ // *************************************************************************
3
+ // Definitions of indices for API functions, unique across entire API
4
+ // *************************************************************************
5
+
6
+ // This file is generated. Any changes you make will be lost during the next clean build.
7
+ // CUDA public interface, for type definitions and cu* function prototypes
8
+
9
+ typedef enum CUpti_driver_api_trace_cbid_enum {
10
+ CUPTI_DRIVER_TRACE_CBID_INVALID = 0,
11
+ CUPTI_DRIVER_TRACE_CBID_cuInit = 1,
12
+ CUPTI_DRIVER_TRACE_CBID_cuDriverGetVersion = 2,
13
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGet = 3,
14
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetCount = 4,
15
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetName = 5,
16
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceComputeCapability = 6,
17
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceTotalMem = 7,
18
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetProperties = 8,
19
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetAttribute = 9,
20
+ CUPTI_DRIVER_TRACE_CBID_cuCtxCreate = 10,
21
+ CUPTI_DRIVER_TRACE_CBID_cuCtxDestroy = 11,
22
+ CUPTI_DRIVER_TRACE_CBID_cuCtxAttach = 12,
23
+ CUPTI_DRIVER_TRACE_CBID_cuCtxDetach = 13,
24
+ CUPTI_DRIVER_TRACE_CBID_cuCtxPushCurrent = 14,
25
+ CUPTI_DRIVER_TRACE_CBID_cuCtxPopCurrent = 15,
26
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetDevice = 16,
27
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSynchronize = 17,
28
+ CUPTI_DRIVER_TRACE_CBID_cuModuleLoad = 18,
29
+ CUPTI_DRIVER_TRACE_CBID_cuModuleLoadData = 19,
30
+ CUPTI_DRIVER_TRACE_CBID_cuModuleLoadDataEx = 20,
31
+ CUPTI_DRIVER_TRACE_CBID_cuModuleLoadFatBinary = 21,
32
+ CUPTI_DRIVER_TRACE_CBID_cuModuleUnload = 22,
33
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetFunction = 23,
34
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetGlobal = 24,
35
+ CUPTI_DRIVER_TRACE_CBID_cu64ModuleGetGlobal = 25,
36
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetTexRef = 26,
37
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetInfo = 27,
38
+ CUPTI_DRIVER_TRACE_CBID_cu64MemGetInfo = 28,
39
+ CUPTI_DRIVER_TRACE_CBID_cuMemAlloc = 29,
40
+ CUPTI_DRIVER_TRACE_CBID_cu64MemAlloc = 30,
41
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocPitch = 31,
42
+ CUPTI_DRIVER_TRACE_CBID_cu64MemAllocPitch = 32,
43
+ CUPTI_DRIVER_TRACE_CBID_cuMemFree = 33,
44
+ CUPTI_DRIVER_TRACE_CBID_cu64MemFree = 34,
45
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetAddressRange = 35,
46
+ CUPTI_DRIVER_TRACE_CBID_cu64MemGetAddressRange = 36,
47
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocHost = 37,
48
+ CUPTI_DRIVER_TRACE_CBID_cuMemFreeHost = 38,
49
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostAlloc = 39,
50
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostGetDevicePointer = 40,
51
+ CUPTI_DRIVER_TRACE_CBID_cu64MemHostGetDevicePointer = 41,
52
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostGetFlags = 42,
53
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoD = 43,
54
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyHtoD = 44,
55
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoH = 45,
56
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoH = 46,
57
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoD = 47,
58
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoD = 48,
59
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoA = 49,
60
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoA = 50,
61
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoD = 51,
62
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyAtoD = 52,
63
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoA = 53,
64
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoH = 54,
65
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoA = 55,
66
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2D = 56,
67
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DUnaligned = 57,
68
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3D = 58,
69
+ CUPTI_DRIVER_TRACE_CBID_cu64Memcpy3D = 59,
70
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoDAsync = 60,
71
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyHtoDAsync = 61,
72
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoHAsync = 62,
73
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoHAsync = 63,
74
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoDAsync = 64,
75
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoDAsync = 65,
76
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoAAsync = 66,
77
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoHAsync = 67,
78
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DAsync = 68,
79
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DAsync = 69,
80
+ CUPTI_DRIVER_TRACE_CBID_cu64Memcpy3DAsync = 70,
81
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD8 = 71,
82
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD8 = 72,
83
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD16 = 73,
84
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD16 = 74,
85
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD32 = 75,
86
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD32 = 76,
87
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8 = 77,
88
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D8 = 78,
89
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16 = 79,
90
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D16 = 80,
91
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32 = 81,
92
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D32 = 82,
93
+ CUPTI_DRIVER_TRACE_CBID_cuFuncSetBlockShape = 83,
94
+ CUPTI_DRIVER_TRACE_CBID_cuFuncSetSharedSize = 84,
95
+ CUPTI_DRIVER_TRACE_CBID_cuFuncGetAttribute = 85,
96
+ CUPTI_DRIVER_TRACE_CBID_cuFuncSetCacheConfig = 86,
97
+ CUPTI_DRIVER_TRACE_CBID_cuArrayCreate = 87,
98
+ CUPTI_DRIVER_TRACE_CBID_cuArrayGetDescriptor = 88,
99
+ CUPTI_DRIVER_TRACE_CBID_cuArrayDestroy = 89,
100
+ CUPTI_DRIVER_TRACE_CBID_cuArray3DCreate = 90,
101
+ CUPTI_DRIVER_TRACE_CBID_cuArray3DGetDescriptor = 91,
102
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefCreate = 92,
103
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefDestroy = 93,
104
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetArray = 94,
105
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress = 95,
106
+ CUPTI_DRIVER_TRACE_CBID_cu64TexRefSetAddress = 96,
107
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress2D = 97,
108
+ CUPTI_DRIVER_TRACE_CBID_cu64TexRefSetAddress2D = 98,
109
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetFormat = 99,
110
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddressMode = 100,
111
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetFilterMode = 101,
112
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetFlags = 102,
113
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetAddress = 103,
114
+ CUPTI_DRIVER_TRACE_CBID_cu64TexRefGetAddress = 104,
115
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetArray = 105,
116
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetAddressMode = 106,
117
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetFilterMode = 107,
118
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetFormat = 108,
119
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetFlags = 109,
120
+ CUPTI_DRIVER_TRACE_CBID_cuParamSetSize = 110,
121
+ CUPTI_DRIVER_TRACE_CBID_cuParamSeti = 111,
122
+ CUPTI_DRIVER_TRACE_CBID_cuParamSetf = 112,
123
+ CUPTI_DRIVER_TRACE_CBID_cuParamSetv = 113,
124
+ CUPTI_DRIVER_TRACE_CBID_cuParamSetTexRef = 114,
125
+ CUPTI_DRIVER_TRACE_CBID_cuLaunch = 115,
126
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchGrid = 116,
127
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchGridAsync = 117,
128
+ CUPTI_DRIVER_TRACE_CBID_cuEventCreate = 118,
129
+ CUPTI_DRIVER_TRACE_CBID_cuEventRecord = 119,
130
+ CUPTI_DRIVER_TRACE_CBID_cuEventQuery = 120,
131
+ CUPTI_DRIVER_TRACE_CBID_cuEventSynchronize = 121,
132
+ CUPTI_DRIVER_TRACE_CBID_cuEventDestroy = 122,
133
+ CUPTI_DRIVER_TRACE_CBID_cuEventElapsedTime = 123,
134
+ CUPTI_DRIVER_TRACE_CBID_cuStreamCreate = 124,
135
+ CUPTI_DRIVER_TRACE_CBID_cuStreamQuery = 125,
136
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSynchronize = 126,
137
+ CUPTI_DRIVER_TRACE_CBID_cuStreamDestroy = 127,
138
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsUnregisterResource = 128,
139
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsSubResourceGetMappedArray = 129,
140
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedPointer = 130,
141
+ CUPTI_DRIVER_TRACE_CBID_cu64GraphicsResourceGetMappedPointer = 131,
142
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceSetMapFlags = 132,
143
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsMapResources = 133,
144
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsUnmapResources = 134,
145
+ CUPTI_DRIVER_TRACE_CBID_cuGetExportTable = 135,
146
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSetLimit = 136,
147
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetLimit = 137,
148
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10GetDevice = 138,
149
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10CtxCreate = 139,
150
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsD3D10RegisterResource = 140,
151
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10RegisterResource = 141,
152
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10UnregisterResource = 142,
153
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10MapResources = 143,
154
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10UnmapResources = 144,
155
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceSetMapFlags = 145,
156
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedArray = 146,
157
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPointer = 147,
158
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedSize = 148,
159
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPitch = 149,
160
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetSurfaceDimensions = 150,
161
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11GetDevice = 151,
162
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11CtxCreate = 152,
163
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsD3D11RegisterResource = 153,
164
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9GetDevice = 154,
165
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9CtxCreate = 155,
166
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsD3D9RegisterResource = 156,
167
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9GetDirect3DDevice = 157,
168
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9RegisterResource = 158,
169
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9UnregisterResource = 159,
170
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9MapResources = 160,
171
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9UnmapResources = 161,
172
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceSetMapFlags = 162,
173
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetSurfaceDimensions = 163,
174
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedArray = 164,
175
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPointer = 165,
176
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedSize = 166,
177
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPitch = 167,
178
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9Begin = 168,
179
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9End = 169,
180
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9RegisterVertexBuffer = 170,
181
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9MapVertexBuffer = 171,
182
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9UnmapVertexBuffer = 172,
183
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9UnregisterVertexBuffer = 173,
184
+ CUPTI_DRIVER_TRACE_CBID_cuGLCtxCreate = 174,
185
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsGLRegisterBuffer = 175,
186
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsGLRegisterImage = 176,
187
+ CUPTI_DRIVER_TRACE_CBID_cuWGLGetDevice = 177,
188
+ CUPTI_DRIVER_TRACE_CBID_cuGLInit = 178,
189
+ CUPTI_DRIVER_TRACE_CBID_cuGLRegisterBufferObject = 179,
190
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObject = 180,
191
+ CUPTI_DRIVER_TRACE_CBID_cuGLUnmapBufferObject = 181,
192
+ CUPTI_DRIVER_TRACE_CBID_cuGLUnregisterBufferObject = 182,
193
+ CUPTI_DRIVER_TRACE_CBID_cuGLSetBufferObjectMapFlags = 183,
194
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObjectAsync = 184,
195
+ CUPTI_DRIVER_TRACE_CBID_cuGLUnmapBufferObjectAsync = 185,
196
+ CUPTI_DRIVER_TRACE_CBID_cuVDPAUGetDevice = 186,
197
+ CUPTI_DRIVER_TRACE_CBID_cuVDPAUCtxCreate = 187,
198
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsVDPAURegisterVideoSurface = 188,
199
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsVDPAURegisterOutputSurface = 189,
200
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetSurfRef = 190,
201
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefCreate = 191,
202
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefDestroy = 192,
203
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefSetFormat = 193,
204
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefSetArray = 194,
205
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefGetFormat = 195,
206
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefGetArray = 196,
207
+ CUPTI_DRIVER_TRACE_CBID_cu64DeviceTotalMem = 197,
208
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetMappedPointer = 198,
209
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetMappedSize = 199,
210
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetMappedPitch = 200,
211
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetSurfaceDimensions = 201,
212
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetSurfaceDimensions = 202,
213
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetMappedPointer = 203,
214
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetMappedSize = 204,
215
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetMappedPitch = 205,
216
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D9MapVertexBuffer = 206,
217
+ CUPTI_DRIVER_TRACE_CBID_cu64GLMapBufferObject = 207,
218
+ CUPTI_DRIVER_TRACE_CBID_cu64GLMapBufferObjectAsync = 208,
219
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11GetDevices = 209,
220
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11CtxCreateOnDevice = 210,
221
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10GetDevices = 211,
222
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10CtxCreateOnDevice = 212,
223
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9GetDevices = 213,
224
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9CtxCreateOnDevice = 214,
225
+ CUPTI_DRIVER_TRACE_CBID_cu64MemHostAlloc = 215,
226
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD8Async = 216,
227
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD8Async = 217,
228
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD16Async = 218,
229
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD16Async = 219,
230
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD32Async = 220,
231
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD32Async = 221,
232
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8Async = 222,
233
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D8Async = 223,
234
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16Async = 224,
235
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D16Async = 225,
236
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32Async = 226,
237
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D32Async = 227,
238
+ CUPTI_DRIVER_TRACE_CBID_cu64ArrayCreate = 228,
239
+ CUPTI_DRIVER_TRACE_CBID_cu64ArrayGetDescriptor = 229,
240
+ CUPTI_DRIVER_TRACE_CBID_cu64Array3DCreate = 230,
241
+ CUPTI_DRIVER_TRACE_CBID_cu64Array3DGetDescriptor = 231,
242
+ CUPTI_DRIVER_TRACE_CBID_cu64Memcpy2D = 232,
243
+ CUPTI_DRIVER_TRACE_CBID_cu64Memcpy2DUnaligned = 233,
244
+ CUPTI_DRIVER_TRACE_CBID_cu64Memcpy2DAsync = 234,
245
+ CUPTI_DRIVER_TRACE_CBID_cuCtxCreate_v2 = 235,
246
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10CtxCreate_v2 = 236,
247
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11CtxCreate_v2 = 237,
248
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9CtxCreate_v2 = 238,
249
+ CUPTI_DRIVER_TRACE_CBID_cuGLCtxCreate_v2 = 239,
250
+ CUPTI_DRIVER_TRACE_CBID_cuVDPAUCtxCreate_v2 = 240,
251
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetGlobal_v2 = 241,
252
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetInfo_v2 = 242,
253
+ CUPTI_DRIVER_TRACE_CBID_cuMemAlloc_v2 = 243,
254
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocPitch_v2 = 244,
255
+ CUPTI_DRIVER_TRACE_CBID_cuMemFree_v2 = 245,
256
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetAddressRange_v2 = 246,
257
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostGetDevicePointer_v2 = 247,
258
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy_v2 = 248,
259
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD8_v2 = 249,
260
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD16_v2 = 250,
261
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD32_v2 = 251,
262
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8_v2 = 252,
263
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16_v2 = 253,
264
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32_v2 = 254,
265
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress_v2 = 255,
266
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress2D_v2 = 256,
267
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetAddress_v2 = 257,
268
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedPointer_v2 = 258,
269
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceTotalMem_v2 = 259,
270
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPointer_v2 = 260,
271
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedSize_v2 = 261,
272
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPitch_v2 = 262,
273
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetSurfaceDimensions_v2 = 263,
274
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetSurfaceDimensions_v2 = 264,
275
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPointer_v2 = 265,
276
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedSize_v2 = 266,
277
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPitch_v2 = 267,
278
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9MapVertexBuffer_v2 = 268,
279
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObject_v2 = 269,
280
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObjectAsync_v2 = 270,
281
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostAlloc_v2 = 271,
282
+ CUPTI_DRIVER_TRACE_CBID_cuArrayCreate_v2 = 272,
283
+ CUPTI_DRIVER_TRACE_CBID_cuArrayGetDescriptor_v2 = 273,
284
+ CUPTI_DRIVER_TRACE_CBID_cuArray3DCreate_v2 = 274,
285
+ CUPTI_DRIVER_TRACE_CBID_cuArray3DGetDescriptor_v2 = 275,
286
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoD_v2 = 276,
287
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoDAsync_v2 = 277,
288
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoH_v2 = 278,
289
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoHAsync_v2 = 279,
290
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoD_v2 = 280,
291
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoDAsync_v2 = 281,
292
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoH_v2 = 282,
293
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoHAsync_v2 = 283,
294
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoD_v2 = 284,
295
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoA_v2 = 285,
296
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoA_v2 = 286,
297
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2D_v2 = 287,
298
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DUnaligned_v2 = 288,
299
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DAsync_v2 = 289,
300
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3D_v2 = 290,
301
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DAsync_v2 = 291,
302
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoA_v2 = 292,
303
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoAAsync_v2 = 293,
304
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocHost_v2 = 294,
305
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitEvent = 295,
306
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetApiVersion = 296,
307
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10GetDirect3DDevice = 297,
308
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11GetDirect3DDevice = 298,
309
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetCacheConfig = 299,
310
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSetCacheConfig = 300,
311
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostRegister = 301,
312
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostUnregister = 302,
313
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSetCurrent = 303,
314
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetCurrent = 304,
315
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy = 305,
316
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAsync = 306,
317
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel = 307,
318
+ CUPTI_DRIVER_TRACE_CBID_cuProfilerStart = 308,
319
+ CUPTI_DRIVER_TRACE_CBID_cuProfilerStop = 309,
320
+ CUPTI_DRIVER_TRACE_CBID_cuPointerGetAttribute = 310,
321
+ CUPTI_DRIVER_TRACE_CBID_cuProfilerInitialize = 311,
322
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceCanAccessPeer = 312,
323
+ CUPTI_DRIVER_TRACE_CBID_cuCtxEnablePeerAccess = 313,
324
+ CUPTI_DRIVER_TRACE_CBID_cuCtxDisablePeerAccess = 314,
325
+ CUPTI_DRIVER_TRACE_CBID_cuMemPeerRegister = 315,
326
+ CUPTI_DRIVER_TRACE_CBID_cuMemPeerUnregister = 316,
327
+ CUPTI_DRIVER_TRACE_CBID_cuMemPeerGetDevicePointer = 317,
328
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeer = 318,
329
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeerAsync = 319,
330
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeer = 320,
331
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeerAsync = 321,
332
+ CUPTI_DRIVER_TRACE_CBID_cuCtxDestroy_v2 = 322,
333
+ CUPTI_DRIVER_TRACE_CBID_cuCtxPushCurrent_v2 = 323,
334
+ CUPTI_DRIVER_TRACE_CBID_cuCtxPopCurrent_v2 = 324,
335
+ CUPTI_DRIVER_TRACE_CBID_cuEventDestroy_v2 = 325,
336
+ CUPTI_DRIVER_TRACE_CBID_cuStreamDestroy_v2 = 326,
337
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress2D_v3 = 327,
338
+ CUPTI_DRIVER_TRACE_CBID_cuIpcGetMemHandle = 328,
339
+ CUPTI_DRIVER_TRACE_CBID_cuIpcOpenMemHandle = 329,
340
+ CUPTI_DRIVER_TRACE_CBID_cuIpcCloseMemHandle = 330,
341
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetByPCIBusId = 331,
342
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetPCIBusId = 332,
343
+ CUPTI_DRIVER_TRACE_CBID_cuGLGetDevices = 333,
344
+ CUPTI_DRIVER_TRACE_CBID_cuIpcGetEventHandle = 334,
345
+ CUPTI_DRIVER_TRACE_CBID_cuIpcOpenEventHandle = 335,
346
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSetSharedMemConfig = 336,
347
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetSharedMemConfig = 337,
348
+ CUPTI_DRIVER_TRACE_CBID_cuFuncSetSharedMemConfig = 338,
349
+ CUPTI_DRIVER_TRACE_CBID_cuTexObjectCreate = 339,
350
+ CUPTI_DRIVER_TRACE_CBID_cuTexObjectDestroy = 340,
351
+ CUPTI_DRIVER_TRACE_CBID_cuTexObjectGetResourceDesc = 341,
352
+ CUPTI_DRIVER_TRACE_CBID_cuTexObjectGetTextureDesc = 342,
353
+ CUPTI_DRIVER_TRACE_CBID_cuSurfObjectCreate = 343,
354
+ CUPTI_DRIVER_TRACE_CBID_cuSurfObjectDestroy = 344,
355
+ CUPTI_DRIVER_TRACE_CBID_cuSurfObjectGetResourceDesc = 345,
356
+ CUPTI_DRIVER_TRACE_CBID_cuStreamAddCallback = 346,
357
+ CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayCreate = 347,
358
+ CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayGetLevel = 348,
359
+ CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayDestroy = 349,
360
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmappedArray = 350,
361
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmapFilterMode = 351,
362
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmapLevelBias = 352,
363
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmapLevelClamp = 353,
364
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMaxAnisotropy = 354,
365
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmappedArray = 355,
366
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmapFilterMode = 356,
367
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmapLevelBias = 357,
368
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmapLevelClamp = 358,
369
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMaxAnisotropy = 359,
370
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedMipmappedArray = 360,
371
+ CUPTI_DRIVER_TRACE_CBID_cuTexObjectGetResourceViewDesc = 361,
372
+ CUPTI_DRIVER_TRACE_CBID_cuLinkCreate = 362,
373
+ CUPTI_DRIVER_TRACE_CBID_cuLinkAddData = 363,
374
+ CUPTI_DRIVER_TRACE_CBID_cuLinkAddFile = 364,
375
+ CUPTI_DRIVER_TRACE_CBID_cuLinkComplete = 365,
376
+ CUPTI_DRIVER_TRACE_CBID_cuLinkDestroy = 366,
377
+ CUPTI_DRIVER_TRACE_CBID_cuStreamCreateWithPriority = 367,
378
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetPriority = 368,
379
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetFlags = 369,
380
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetStreamPriorityRange = 370,
381
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocManaged = 371,
382
+ CUPTI_DRIVER_TRACE_CBID_cuGetErrorString = 372,
383
+ CUPTI_DRIVER_TRACE_CBID_cuGetErrorName = 373,
384
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxActiveBlocksPerMultiprocessor = 374,
385
+ CUPTI_DRIVER_TRACE_CBID_cuCompilePtx = 375,
386
+ CUPTI_DRIVER_TRACE_CBID_cuBinaryFree = 376,
387
+ CUPTI_DRIVER_TRACE_CBID_cuStreamAttachMemAsync = 377,
388
+ CUPTI_DRIVER_TRACE_CBID_cuPointerSetAttribute = 378,
389
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostRegister_v2 = 379,
390
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceSetMapFlags_v2 = 380,
391
+ CUPTI_DRIVER_TRACE_CBID_cuLinkCreate_v2 = 381,
392
+ CUPTI_DRIVER_TRACE_CBID_cuLinkAddData_v2 = 382,
393
+ CUPTI_DRIVER_TRACE_CBID_cuLinkAddFile_v2 = 383,
394
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxPotentialBlockSize = 384,
395
+ CUPTI_DRIVER_TRACE_CBID_cuGLGetDevices_v2 = 385,
396
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxRetain = 386,
397
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxRelease = 387,
398
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxSetFlags = 388,
399
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxReset = 389,
400
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsEGLRegisterImage = 390,
401
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetFlags = 391,
402
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxGetState = 392,
403
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerConnect = 393,
404
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerDisconnect = 394,
405
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerAcquireFrame = 395,
406
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerReleaseFrame = 396,
407
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoD_v2_ptds = 397,
408
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoH_v2_ptds = 398,
409
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoD_v2_ptds = 399,
410
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoA_v2_ptds = 400,
411
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoD_v2_ptds = 401,
412
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoA_v2_ptds = 402,
413
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoH_v2_ptds = 403,
414
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoA_v2_ptds = 404,
415
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2D_v2_ptds = 405,
416
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DUnaligned_v2_ptds = 406,
417
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3D_v2_ptds = 407,
418
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy_ptds = 408,
419
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeer_ptds = 409,
420
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeer_ptds = 410,
421
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD8_v2_ptds = 411,
422
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD16_v2_ptds = 412,
423
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD32_v2_ptds = 413,
424
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8_v2_ptds = 414,
425
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16_v2_ptds = 415,
426
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32_v2_ptds = 416,
427
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObject_v2_ptds = 417,
428
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAsync_ptsz = 418,
429
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoAAsync_v2_ptsz = 419,
430
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoHAsync_v2_ptsz = 420,
431
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoDAsync_v2_ptsz = 421,
432
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoHAsync_v2_ptsz = 422,
433
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoDAsync_v2_ptsz = 423,
434
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DAsync_v2_ptsz = 424,
435
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DAsync_v2_ptsz = 425,
436
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeerAsync_ptsz = 426,
437
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeerAsync_ptsz = 427,
438
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD8Async_ptsz = 428,
439
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD16Async_ptsz = 429,
440
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD32Async_ptsz = 430,
441
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8Async_ptsz = 431,
442
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16Async_ptsz = 432,
443
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32Async_ptsz = 433,
444
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetPriority_ptsz = 434,
445
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetFlags_ptsz = 435,
446
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitEvent_ptsz = 436,
447
+ CUPTI_DRIVER_TRACE_CBID_cuStreamAddCallback_ptsz = 437,
448
+ CUPTI_DRIVER_TRACE_CBID_cuStreamAttachMemAsync_ptsz = 438,
449
+ CUPTI_DRIVER_TRACE_CBID_cuStreamQuery_ptsz = 439,
450
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSynchronize_ptsz = 440,
451
+ CUPTI_DRIVER_TRACE_CBID_cuEventRecord_ptsz = 441,
452
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel_ptsz = 442,
453
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsMapResources_ptsz = 443,
454
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsUnmapResources_ptsz = 444,
455
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObjectAsync_v2_ptsz = 445,
456
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerConnect = 446,
457
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerDisconnect = 447,
458
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerPresentFrame = 448,
459
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedEglFrame = 449,
460
+ CUPTI_DRIVER_TRACE_CBID_cuPointerGetAttributes = 450,
461
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags = 451,
462
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxPotentialBlockSizeWithFlags = 452,
463
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerReturnFrame = 453,
464
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetP2PAttribute = 454,
465
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetBorderColor = 455,
466
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetBorderColor = 456,
467
+ CUPTI_DRIVER_TRACE_CBID_cuMemAdvise = 457,
468
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32 = 458,
469
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32_ptsz = 459,
470
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32 = 460,
471
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32_ptsz = 461,
472
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp = 462,
473
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp_ptsz = 463,
474
+ CUPTI_DRIVER_TRACE_CBID_cuNVNbufferGetPointer = 464,
475
+ CUPTI_DRIVER_TRACE_CBID_cuNVNtextureGetArray = 465,
476
+ CUPTI_DRIVER_TRACE_CBID_cuNNSetAllocator = 466,
477
+ CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync = 467,
478
+ CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync_ptsz = 468,
479
+ CUPTI_DRIVER_TRACE_CBID_cuEventCreateFromNVNSync = 469,
480
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerConnectWithFlags = 470,
481
+ CUPTI_DRIVER_TRACE_CBID_cuMemRangeGetAttribute = 471,
482
+ CUPTI_DRIVER_TRACE_CBID_cuMemRangeGetAttributes = 472,
483
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64 = 473,
484
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64_ptsz = 474,
485
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64 = 475,
486
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64_ptsz = 476,
487
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchCooperativeKernel = 477,
488
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchCooperativeKernel_ptsz = 478,
489
+ CUPTI_DRIVER_TRACE_CBID_cuEventCreateFromEGLSync = 479,
490
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchCooperativeKernelMultiDevice = 480,
491
+ CUPTI_DRIVER_TRACE_CBID_cuFuncSetAttribute = 481,
492
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetUuid = 482,
493
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCtx = 483,
494
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCtx_ptsz = 484,
495
+ CUPTI_DRIVER_TRACE_CBID_cuImportExternalMemory = 485,
496
+ CUPTI_DRIVER_TRACE_CBID_cuExternalMemoryGetMappedBuffer = 486,
497
+ CUPTI_DRIVER_TRACE_CBID_cuExternalMemoryGetMappedMipmappedArray = 487,
498
+ CUPTI_DRIVER_TRACE_CBID_cuDestroyExternalMemory = 488,
499
+ CUPTI_DRIVER_TRACE_CBID_cuImportExternalSemaphore = 489,
500
+ CUPTI_DRIVER_TRACE_CBID_cuSignalExternalSemaphoresAsync = 490,
501
+ CUPTI_DRIVER_TRACE_CBID_cuSignalExternalSemaphoresAsync_ptsz = 491,
502
+ CUPTI_DRIVER_TRACE_CBID_cuWaitExternalSemaphoresAsync = 492,
503
+ CUPTI_DRIVER_TRACE_CBID_cuWaitExternalSemaphoresAsync_ptsz = 493,
504
+ CUPTI_DRIVER_TRACE_CBID_cuDestroyExternalSemaphore = 494,
505
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture = 495,
506
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture_ptsz = 496,
507
+ CUPTI_DRIVER_TRACE_CBID_cuStreamEndCapture = 497,
508
+ CUPTI_DRIVER_TRACE_CBID_cuStreamEndCapture_ptsz = 498,
509
+ CUPTI_DRIVER_TRACE_CBID_cuStreamIsCapturing = 499,
510
+ CUPTI_DRIVER_TRACE_CBID_cuStreamIsCapturing_ptsz = 500,
511
+ CUPTI_DRIVER_TRACE_CBID_cuGraphCreate = 501,
512
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddKernelNode = 502,
513
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeGetParams = 503,
514
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemcpyNode = 504,
515
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemcpyNodeGetParams = 505,
516
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemsetNode = 506,
517
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemsetNodeGetParams = 507,
518
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemsetNodeSetParams = 508,
519
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetType = 509,
520
+ CUPTI_DRIVER_TRACE_CBID_cuGraphGetRootNodes = 510,
521
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependencies = 511,
522
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependentNodes = 512,
523
+ CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiate = 513,
524
+ CUPTI_DRIVER_TRACE_CBID_cuGraphLaunch = 514,
525
+ CUPTI_DRIVER_TRACE_CBID_cuGraphLaunch_ptsz = 515,
526
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecDestroy = 516,
527
+ CUPTI_DRIVER_TRACE_CBID_cuGraphDestroy = 517,
528
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddDependencies = 518,
529
+ CUPTI_DRIVER_TRACE_CBID_cuGraphRemoveDependencies = 519,
530
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemcpyNodeSetParams = 520,
531
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeSetParams = 521,
532
+ CUPTI_DRIVER_TRACE_CBID_cuGraphDestroyNode = 522,
533
+ CUPTI_DRIVER_TRACE_CBID_cuGraphClone = 523,
534
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeFindInClone = 524,
535
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddChildGraphNode = 525,
536
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddEmptyNode = 526,
537
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchHostFunc = 527,
538
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchHostFunc_ptsz = 528,
539
+ CUPTI_DRIVER_TRACE_CBID_cuGraphChildGraphNodeGetGraph = 529,
540
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddHostNode = 530,
541
+ CUPTI_DRIVER_TRACE_CBID_cuGraphHostNodeGetParams = 531,
542
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetLuid = 532,
543
+ CUPTI_DRIVER_TRACE_CBID_cuGraphHostNodeSetParams = 533,
544
+ CUPTI_DRIVER_TRACE_CBID_cuGraphGetNodes = 534,
545
+ CUPTI_DRIVER_TRACE_CBID_cuGraphGetEdges = 535,
546
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo = 536,
547
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_ptsz = 537,
548
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecKernelNodeSetParams = 538,
549
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture_v2 = 539,
550
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture_v2_ptsz = 540,
551
+ CUPTI_DRIVER_TRACE_CBID_cuThreadExchangeStreamCaptureMode = 541,
552
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetNvSciSyncAttributes = 542,
553
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyAvailableDynamicSMemPerBlock = 543,
554
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxRelease_v2 = 544,
555
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxReset_v2 = 545,
556
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxSetFlags_v2 = 546,
557
+ CUPTI_DRIVER_TRACE_CBID_cuMemAddressReserve = 547,
558
+ CUPTI_DRIVER_TRACE_CBID_cuMemAddressFree = 548,
559
+ CUPTI_DRIVER_TRACE_CBID_cuMemCreate = 549,
560
+ CUPTI_DRIVER_TRACE_CBID_cuMemRelease = 550,
561
+ CUPTI_DRIVER_TRACE_CBID_cuMemMap = 551,
562
+ CUPTI_DRIVER_TRACE_CBID_cuMemUnmap = 552,
563
+ CUPTI_DRIVER_TRACE_CBID_cuMemSetAccess = 553,
564
+ CUPTI_DRIVER_TRACE_CBID_cuMemExportToShareableHandle = 554,
565
+ CUPTI_DRIVER_TRACE_CBID_cuMemImportFromShareableHandle = 555,
566
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetAllocationGranularity = 556,
567
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetAllocationPropertiesFromHandle = 557,
568
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetAccess = 558,
569
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSetFlags = 559,
570
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSetFlags_ptsz = 560,
571
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecUpdate = 561,
572
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecMemcpyNodeSetParams = 562,
573
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecMemsetNodeSetParams = 563,
574
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecHostNodeSetParams = 564,
575
+ CUPTI_DRIVER_TRACE_CBID_cuMemRetainAllocationHandle = 565,
576
+ CUPTI_DRIVER_TRACE_CBID_cuFuncGetModule = 566,
577
+ CUPTI_DRIVER_TRACE_CBID_cuIpcOpenMemHandle_v2 = 567,
578
+ CUPTI_DRIVER_TRACE_CBID_cuCtxResetPersistingL2Cache = 568,
579
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeCopyAttributes = 569,
580
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeGetAttribute = 570,
581
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeSetAttribute = 571,
582
+ CUPTI_DRIVER_TRACE_CBID_cuStreamCopyAttributes = 572,
583
+ CUPTI_DRIVER_TRACE_CBID_cuStreamCopyAttributes_ptsz = 573,
584
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetAttribute = 574,
585
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetAttribute_ptsz = 575,
586
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSetAttribute = 576,
587
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSetAttribute_ptsz = 577,
588
+ CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiate_v2 = 578,
589
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetTexture1DLinearMaxWidth = 579,
590
+ CUPTI_DRIVER_TRACE_CBID_cuGraphUpload = 580,
591
+ CUPTI_DRIVER_TRACE_CBID_cuGraphUpload_ptsz = 581,
592
+ CUPTI_DRIVER_TRACE_CBID_cuArrayGetSparseProperties = 582,
593
+ CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayGetSparseProperties = 583,
594
+ CUPTI_DRIVER_TRACE_CBID_cuMemMapArrayAsync = 584,
595
+ CUPTI_DRIVER_TRACE_CBID_cuMemMapArrayAsync_ptsz = 585,
596
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecChildGraphNodeSetParams = 586,
597
+ CUPTI_DRIVER_TRACE_CBID_cuEventRecordWithFlags = 587,
598
+ CUPTI_DRIVER_TRACE_CBID_cuEventRecordWithFlags_ptsz = 588,
599
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddEventRecordNode = 589,
600
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddEventWaitNode = 590,
601
+ CUPTI_DRIVER_TRACE_CBID_cuGraphEventRecordNodeGetEvent = 591,
602
+ CUPTI_DRIVER_TRACE_CBID_cuGraphEventWaitNodeGetEvent = 592,
603
+ CUPTI_DRIVER_TRACE_CBID_cuGraphEventRecordNodeSetEvent = 593,
604
+ CUPTI_DRIVER_TRACE_CBID_cuGraphEventWaitNodeSetEvent = 594,
605
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecEventRecordNodeSetEvent = 595,
606
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecEventWaitNodeSetEvent = 596,
607
+ CUPTI_DRIVER_TRACE_CBID_cuArrayGetPlane = 597,
608
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocAsync = 598,
609
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocAsync_ptsz = 599,
610
+ CUPTI_DRIVER_TRACE_CBID_cuMemFreeAsync = 600,
611
+ CUPTI_DRIVER_TRACE_CBID_cuMemFreeAsync_ptsz = 601,
612
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolTrimTo = 602,
613
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolSetAttribute = 603,
614
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolGetAttribute = 604,
615
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolSetAccess = 605,
616
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetDefaultMemPool = 606,
617
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolCreate = 607,
618
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolDestroy = 608,
619
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceSetMemPool = 609,
620
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetMemPool = 610,
621
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocFromPoolAsync = 611,
622
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocFromPoolAsync_ptsz = 612,
623
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolExportToShareableHandle = 613,
624
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolImportFromShareableHandle = 614,
625
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolExportPointer = 615,
626
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolImportPointer = 616,
627
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolGetAccess = 617,
628
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddExternalSemaphoresSignalNode = 618,
629
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresSignalNodeGetParams = 619,
630
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresSignalNodeSetParams = 620,
631
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddExternalSemaphoresWaitNode = 621,
632
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresWaitNodeGetParams = 622,
633
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresWaitNodeSetParams = 623,
634
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecExternalSemaphoresSignalNodeSetParams = 624,
635
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecExternalSemaphoresWaitNodeSetParams = 625,
636
+ CUPTI_DRIVER_TRACE_CBID_cuGetProcAddress = 626,
637
+ CUPTI_DRIVER_TRACE_CBID_cuFlushGPUDirectRDMAWrites = 627,
638
+ CUPTI_DRIVER_TRACE_CBID_cuGraphDebugDotPrint = 628,
639
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v2 = 629,
640
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v2_ptsz = 630,
641
+ CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies = 631,
642
+ CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies_ptsz = 632,
643
+ CUPTI_DRIVER_TRACE_CBID_cuUserObjectCreate = 633,
644
+ CUPTI_DRIVER_TRACE_CBID_cuUserObjectRetain = 634,
645
+ CUPTI_DRIVER_TRACE_CBID_cuUserObjectRelease = 635,
646
+ CUPTI_DRIVER_TRACE_CBID_cuGraphRetainUserObject = 636,
647
+ CUPTI_DRIVER_TRACE_CBID_cuGraphReleaseUserObject = 637,
648
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemAllocNode = 638,
649
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemFreeNode = 639,
650
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGraphMemTrim = 640,
651
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetGraphMemAttribute = 641,
652
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceSetGraphMemAttribute = 642,
653
+ CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiateWithFlags = 643,
654
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetExecAffinitySupport = 644,
655
+ CUPTI_DRIVER_TRACE_CBID_cuCtxCreate_v3 = 645,
656
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetExecAffinity = 646,
657
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetUuid_v2 = 647,
658
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemAllocNodeGetParams = 648,
659
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemFreeNodeGetParams = 649,
660
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeSetEnabled = 650,
661
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetEnabled = 651,
662
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchKernelEx = 652,
663
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchKernelEx_ptsz = 653,
664
+ CUPTI_DRIVER_TRACE_CBID_cuArrayGetMemoryRequirements = 654,
665
+ CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayGetMemoryRequirements = 655,
666
+ CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiateWithParams = 656,
667
+ CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiateWithParams_ptsz = 657,
668
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecGetFlags = 658,
669
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32_v2 = 659,
670
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32_v2_ptsz = 660,
671
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64_v2 = 661,
672
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64_v2_ptsz = 662,
673
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32_v2 = 663,
674
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32_v2_ptsz = 664,
675
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64_v2 = 665,
676
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64_v2_ptsz = 666,
677
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp_v2 = 667,
678
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp_v2_ptsz = 668,
679
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddBatchMemOpNode = 669,
680
+ CUPTI_DRIVER_TRACE_CBID_cuGraphBatchMemOpNodeGetParams = 670,
681
+ CUPTI_DRIVER_TRACE_CBID_cuGraphBatchMemOpNodeSetParams = 671,
682
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecBatchMemOpNodeSetParams = 672,
683
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetLoadingMode = 673,
684
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetHandleForAddressRange = 674,
685
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxPotentialClusterSize = 675,
686
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxActiveClusters = 676,
687
+ CUPTI_DRIVER_TRACE_CBID_cuGetProcAddress_v2 = 677,
688
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryLoadData = 678,
689
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryLoadFromFile = 679,
690
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryUnload = 680,
691
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetKernel = 681,
692
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetModule = 682,
693
+ CUPTI_DRIVER_TRACE_CBID_cuKernelGetFunction = 683,
694
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetGlobal = 684,
695
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetManaged = 685,
696
+ CUPTI_DRIVER_TRACE_CBID_cuKernelGetAttribute = 686,
697
+ CUPTI_DRIVER_TRACE_CBID_cuKernelSetAttribute = 687,
698
+ CUPTI_DRIVER_TRACE_CBID_cuKernelSetCacheConfig = 688,
699
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddKernelNode_v2 = 689,
700
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeGetParams_v2 = 690,
701
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeSetParams_v2 = 691,
702
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecKernelNodeSetParams_v2 = 692,
703
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetId = 693,
704
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetId_ptsz = 694,
705
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetId = 695,
706
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecUpdate_v2 = 696,
707
+ CUPTI_DRIVER_TRACE_CBID_cuTensorMapEncodeTiled = 697,
708
+ CUPTI_DRIVER_TRACE_CBID_cuTensorMapEncodeIm2col = 698,
709
+ CUPTI_DRIVER_TRACE_CBID_cuTensorMapReplaceAddress = 699,
710
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetUnifiedFunction = 700,
711
+ CUPTI_DRIVER_TRACE_CBID_cuCoredumpGetAttribute = 701,
712
+ CUPTI_DRIVER_TRACE_CBID_cuCoredumpGetAttributeGlobal = 702,
713
+ CUPTI_DRIVER_TRACE_CBID_cuCoredumpSetAttribute = 703,
714
+ CUPTI_DRIVER_TRACE_CBID_cuCoredumpSetAttributeGlobal = 704,
715
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSetFlags = 705,
716
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastCreate = 706,
717
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastAddDevice = 707,
718
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastBindMem = 708,
719
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastBindAddr = 709,
720
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastUnbind = 710,
721
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastGetGranularity = 711,
722
+ CUPTI_DRIVER_TRACE_CBID_SIZE = 712,
723
+ CUPTI_DRIVER_TRACE_CBID_FORCE_INT = 0x7fffffff
724
+ } CUpti_driver_api_trace_cbid;
725
+
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_events.h ADDED
@@ -0,0 +1,1371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_EVENTS_H_)
51
+ #define _CUPTI_EVENTS_H_
52
+
53
+ #include <cuda.h>
54
+ #include <string.h>
55
+ #include <cuda_stdint.h>
56
+ #include <cupti_result.h>
57
+
58
+ #ifndef CUPTIAPI
59
+ #ifdef _WIN32
60
+ #define CUPTIAPI __stdcall
61
+ #else
62
+ #define CUPTIAPI
63
+ #endif
64
+ #endif
65
+
66
+ #if defined(__cplusplus)
67
+ extern "C" {
68
+ #endif
69
+
70
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
71
+ #pragma GCC visibility push(default)
72
+ #endif
73
+
74
+ /**
75
+ * \defgroup CUPTI_EVENT_API CUPTI Event API
76
+ * Functions, types, and enums that implement the CUPTI Event API.
77
+ *
78
+ * \note CUPTI event API from the header cupti_events.h are not supported on devices
79
+ * with compute capability 7.5 and higher (i.e. Turing and later GPU architectures).
80
+ * These API will be deprecated in a future CUDA release. These are replaced by
81
+ * Profiling API in the header cupti_profiler_target.h and Perfworks metrics API
82
+ * in the headers nvperf_host.h and nvperf_target.h which are supported on
83
+ * devices with compute capability 7.0 and higher (i.e. Volta and later GPU
84
+ * architectures).
85
+ *
86
+ * @{
87
+ */
88
+
89
+ /**
90
+ * \brief ID for an event.
91
+ *
92
+ * An event represents a countable activity, action, or occurrence on
93
+ * the device.
94
+ */
95
+ typedef uint32_t CUpti_EventID;
96
+
97
+ /**
98
+ * \brief ID for an event domain.
99
+ *
100
+ * ID for an event domain. An event domain represents a group of
101
+ * related events. A device may have multiple instances of a domain,
102
+ * indicating that the device can simultaneously record multiple
103
+ * instances of each event within that domain.
104
+ */
105
+ typedef uint32_t CUpti_EventDomainID;
106
+
107
+ /**
108
+ * \brief A group of events.
109
+ *
110
+ * An event group is a collection of events that are managed
111
+ * together. All events in an event group must belong to the same
112
+ * domain.
113
+ */
114
+ typedef void *CUpti_EventGroup;
115
+
116
+ /**
117
+ * \brief Device class.
118
+ *
119
+ * Enumeration of device classes for device attribute
120
+ * CUPTI_DEVICE_ATTR_DEVICE_CLASS.
121
+ */
122
+ typedef enum {
123
+ CUPTI_DEVICE_ATTR_DEVICE_CLASS_TESLA = 0,
124
+ CUPTI_DEVICE_ATTR_DEVICE_CLASS_QUADRO = 1,
125
+ CUPTI_DEVICE_ATTR_DEVICE_CLASS_GEFORCE = 2,
126
+ CUPTI_DEVICE_ATTR_DEVICE_CLASS_TEGRA = 3,
127
+ } CUpti_DeviceAttributeDeviceClass;
128
+
129
+ /**
130
+ * \brief Device attributes.
131
+ *
132
+ * CUPTI device attributes. These attributes can be read using \ref
133
+ * cuptiDeviceGetAttribute.
134
+ */
135
+ typedef enum {
136
+ /**
137
+ * Number of event IDs for a device. Value is a uint32_t.
138
+ */
139
+ CUPTI_DEVICE_ATTR_MAX_EVENT_ID = 1,
140
+ /**
141
+ * Number of event domain IDs for a device. Value is a uint32_t.
142
+ */
143
+ CUPTI_DEVICE_ATTR_MAX_EVENT_DOMAIN_ID = 2,
144
+ /**
145
+ * Get global memory bandwidth in Kbytes/sec. Value is a uint64_t.
146
+ */
147
+ CUPTI_DEVICE_ATTR_GLOBAL_MEMORY_BANDWIDTH = 3,
148
+ /**
149
+ * Get theoretical maximum number of instructions per cycle. Value
150
+ * is a uint32_t.
151
+ */
152
+ CUPTI_DEVICE_ATTR_INSTRUCTION_PER_CYCLE = 4,
153
+ /**
154
+ * Get theoretical maximum number of single precision instructions
155
+ * that can be executed per second. Value is a uint64_t.
156
+ */
157
+ CUPTI_DEVICE_ATTR_INSTRUCTION_THROUGHPUT_SINGLE_PRECISION = 5,
158
+ /**
159
+ * Get number of frame buffers for device. Value is a uint64_t.
160
+ */
161
+ CUPTI_DEVICE_ATTR_MAX_FRAME_BUFFERS = 6,
162
+ /**
163
+ * Get PCIE link rate in Mega bits/sec for device. Return 0 if bus-type
164
+ * is non-PCIE. Value is a uint64_t.
165
+ */
166
+ CUPTI_DEVICE_ATTR_PCIE_LINK_RATE = 7,
167
+ /**
168
+ * Get PCIE link width for device. Return 0 if bus-type
169
+ * is non-PCIE. Value is a uint64_t.
170
+ */
171
+ CUPTI_DEVICE_ATTR_PCIE_LINK_WIDTH = 8,
172
+ /**
173
+ * Get PCIE generation for device. Return 0 if bus-type
174
+ * is non-PCIE. Value is a uint64_t.
175
+ */
176
+ CUPTI_DEVICE_ATTR_PCIE_GEN = 9,
177
+ /**
178
+ * Get the class for the device. Value is a
179
+ * CUpti_DeviceAttributeDeviceClass.
180
+ */
181
+ CUPTI_DEVICE_ATTR_DEVICE_CLASS = 10,
182
+ /**
183
+ * Get the peak single precision flop per cycle. Value is a uint64_t.
184
+ */
185
+ CUPTI_DEVICE_ATTR_FLOP_SP_PER_CYCLE = 11,
186
+ /**
187
+ * Get the peak double precision flop per cycle. Value is a uint64_t.
188
+ */
189
+ CUPTI_DEVICE_ATTR_FLOP_DP_PER_CYCLE = 12,
190
+ /**
191
+ * Get number of L2 units. Value is a uint64_t.
192
+ */
193
+ CUPTI_DEVICE_ATTR_MAX_L2_UNITS = 13,
194
+ /**
195
+ * Get the maximum shared memory for the CU_FUNC_CACHE_PREFER_SHARED
196
+ * preference. Value is a uint64_t.
197
+ */
198
+ CUPTI_DEVICE_ATTR_MAX_SHARED_MEMORY_CACHE_CONFIG_PREFER_SHARED = 14,
199
+ /**
200
+ * Get the maximum shared memory for the CU_FUNC_CACHE_PREFER_L1
201
+ * preference. Value is a uint64_t.
202
+ */
203
+ CUPTI_DEVICE_ATTR_MAX_SHARED_MEMORY_CACHE_CONFIG_PREFER_L1 = 15,
204
+ /**
205
+ * Get the maximum shared memory for the CU_FUNC_CACHE_PREFER_EQUAL
206
+ * preference. Value is a uint64_t.
207
+ */
208
+ CUPTI_DEVICE_ATTR_MAX_SHARED_MEMORY_CACHE_CONFIG_PREFER_EQUAL = 16,
209
+ /**
210
+ * Get the peak half precision flop per cycle. Value is a uint64_t.
211
+ */
212
+ CUPTI_DEVICE_ATTR_FLOP_HP_PER_CYCLE = 17,
213
+ /**
214
+ * Check if Nvlink is connected to device. Returns 1, if at least one
215
+ * Nvlink is connected to the device, returns 0 otherwise.
216
+ * Value is a uint32_t.
217
+ */
218
+ CUPTI_DEVICE_ATTR_NVLINK_PRESENT = 18,
219
+ /**
220
+ * Check if Nvlink is present between GPU and CPU. Returns Bandwidth,
221
+ * in Bytes/sec, if Nvlink is present, returns 0 otherwise.
222
+ * Value is a uint64_t.
223
+ */
224
+ CUPTI_DEVICE_ATTR_GPU_CPU_NVLINK_BW = 19,
225
+ /**
226
+ * Check if NVSwitch is present in the underlying topology.
227
+ * Returns 1, if present, returns 0 otherwise.
228
+ * Value is a uint32_t.
229
+ */
230
+ CUPTI_DEVICE_ATTR_NVSWITCH_PRESENT = 20,
231
+ CUPTI_DEVICE_ATTR_FORCE_INT = 0x7fffffff,
232
+ } CUpti_DeviceAttribute;
233
+
234
+ /**
235
+ * \brief Event domain attributes.
236
+ *
237
+ * Event domain attributes. Except where noted, all the attributes can
238
+ * be read using either \ref cuptiDeviceGetEventDomainAttribute or
239
+ * \ref cuptiEventDomainGetAttribute.
240
+ */
241
+ typedef enum {
242
+ /**
243
+ * Event domain name. Value is a null terminated const c-string.
244
+ */
245
+ CUPTI_EVENT_DOMAIN_ATTR_NAME = 0,
246
+ /**
247
+ * Number of instances of the domain for which event counts will be
248
+ * collected. The domain may have additional instances that cannot
249
+ * be profiled (see CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT).
250
+ * Can be read only with \ref
251
+ * cuptiDeviceGetEventDomainAttribute. Value is a uint32_t.
252
+ */
253
+ CUPTI_EVENT_DOMAIN_ATTR_INSTANCE_COUNT = 1,
254
+ /**
255
+ * Total number of instances of the domain, including instances that
256
+ * cannot be profiled. Use CUPTI_EVENT_DOMAIN_ATTR_INSTANCE_COUNT
257
+ * to get the number of instances that can be profiled. Can be read
258
+ * only with \ref cuptiDeviceGetEventDomainAttribute. Value is a
259
+ * uint32_t.
260
+ */
261
+ CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT = 3,
262
+ /**
263
+ * Collection method used for events contained in the event domain.
264
+ * Value is a \ref CUpti_EventCollectionMethod.
265
+ */
266
+ CUPTI_EVENT_DOMAIN_ATTR_COLLECTION_METHOD = 4,
267
+
268
+ CUPTI_EVENT_DOMAIN_ATTR_FORCE_INT = 0x7fffffff,
269
+ } CUpti_EventDomainAttribute;
270
+
271
+ /**
272
+ * \brief The collection method used for an event.
273
+ *
274
+ * The collection method indicates how an event is collected.
275
+ */
276
+ typedef enum {
277
+ /**
278
+ * Event is collected using a hardware global performance monitor.
279
+ */
280
+ CUPTI_EVENT_COLLECTION_METHOD_PM = 0,
281
+ /**
282
+ * Event is collected using a hardware SM performance monitor.
283
+ */
284
+ CUPTI_EVENT_COLLECTION_METHOD_SM = 1,
285
+ /**
286
+ * Event is collected using software instrumentation.
287
+ */
288
+ CUPTI_EVENT_COLLECTION_METHOD_INSTRUMENTED = 2,
289
+ /**
290
+ * Event is collected using NvLink throughput counter method.
291
+ */
292
+ CUPTI_EVENT_COLLECTION_METHOD_NVLINK_TC = 3,
293
+ CUPTI_EVENT_COLLECTION_METHOD_FORCE_INT = 0x7fffffff
294
+ } CUpti_EventCollectionMethod;
295
+
296
+ /**
297
+ * \brief Event group attributes.
298
+ *
299
+ * Event group attributes. These attributes can be read using \ref
300
+ * cuptiEventGroupGetAttribute. Attributes marked [rw] can also be
301
+ * written using \ref cuptiEventGroupSetAttribute.
302
+ */
303
+ typedef enum {
304
+ /**
305
+ * The domain to which the event group is bound. This attribute is
306
+ * set when the first event is added to the group. Value is a
307
+ * CUpti_EventDomainID.
308
+ */
309
+ CUPTI_EVENT_GROUP_ATTR_EVENT_DOMAIN_ID = 0,
310
+ /**
311
+ * [rw] Profile all the instances of the domain for this
312
+ * eventgroup. This feature can be used to get load balancing
313
+ * across all instances of a domain. Value is an integer.
314
+ */
315
+ CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES = 1,
316
+ /**
317
+ * [rw] Reserved for user data.
318
+ */
319
+ CUPTI_EVENT_GROUP_ATTR_USER_DATA = 2,
320
+ /**
321
+ * Number of events in the group. Value is a uint32_t.
322
+ */
323
+ CUPTI_EVENT_GROUP_ATTR_NUM_EVENTS = 3,
324
+ /**
325
+ * Enumerates events in the group. Value is a pointer to buffer of
326
+ * size sizeof(CUpti_EventID) * num_of_events in the eventgroup.
327
+ * num_of_events can be queried using
328
+ * CUPTI_EVENT_GROUP_ATTR_NUM_EVENTS.
329
+ */
330
+ CUPTI_EVENT_GROUP_ATTR_EVENTS = 4,
331
+ /**
332
+ * Number of instances of the domain bound to this event group that
333
+ * will be counted. Value is a uint32_t.
334
+ */
335
+ CUPTI_EVENT_GROUP_ATTR_INSTANCE_COUNT = 5,
336
+ /**
337
+ * Event group scope can be set to CUPTI_EVENT_PROFILING_SCOPE_DEVICE or
338
+ * CUPTI_EVENT_PROFILING_SCOPE_CONTEXT for an eventGroup, before
339
+ * adding any event.
340
+ * Sets the scope of eventgroup as CUPTI_EVENT_PROFILING_SCOPE_DEVICE or
341
+ * CUPTI_EVENT_PROFILING_SCOPE_CONTEXT when the scope of the events
342
+ * that will be added is CUPTI_EVENT_PROFILING_SCOPE_BOTH.
343
+ * If profiling scope of event is either
344
+ * CUPTI_EVENT_PROFILING_SCOPE_DEVICE or CUPTI_EVENT_PROFILING_SCOPE_CONTEXT
345
+ * then setting this attribute will not affect the default scope.
346
+ * It is not allowed to add events of different scope to same eventgroup.
347
+ * Value is a uint32_t.
348
+ */
349
+ CUPTI_EVENT_GROUP_ATTR_PROFILING_SCOPE = 6,
350
+ CUPTI_EVENT_GROUP_ATTR_FORCE_INT = 0x7fffffff,
351
+ } CUpti_EventGroupAttribute;
352
+
353
+ /**
354
+ * \brief Profiling scope for event.
355
+ *
356
+ * Profiling scope of event indicates if the event can be collected at context
357
+ * scope or device scope or both i.e. it can be collected at any of context or
358
+ * device scope.
359
+ */
360
+ typedef enum {
361
+ /**
362
+ * Event is collected at context scope.
363
+ */
364
+ CUPTI_EVENT_PROFILING_SCOPE_CONTEXT = 0,
365
+ /**
366
+ * Event is collected at device scope.
367
+ */
368
+ CUPTI_EVENT_PROFILING_SCOPE_DEVICE = 1,
369
+ /**
370
+ * Event can be collected at device or context scope.
371
+ * The scope can be set using \ref cuptiEventGroupSetAttribute API.
372
+ */
373
+ CUPTI_EVENT_PROFILING_SCOPE_BOTH = 2,
374
+ CUPTI_EVENT_PROFILING_SCOPE_FORCE_INT = 0x7fffffff
375
+ } CUpti_EventProfilingScope;
376
+
377
+ /**
378
+ * \brief Event attributes.
379
+ *
380
+ * Event attributes. These attributes can be read using \ref
381
+ * cuptiEventGetAttribute.
382
+ */
383
+ typedef enum {
384
+ /**
385
+ * Event name. Value is a null terminated const c-string.
386
+ */
387
+ CUPTI_EVENT_ATTR_NAME = 0,
388
+ /**
389
+ * Short description of event. Value is a null terminated const
390
+ * c-string.
391
+ */
392
+ CUPTI_EVENT_ATTR_SHORT_DESCRIPTION = 1,
393
+ /**
394
+ * Long description of event. Value is a null terminated const
395
+ * c-string.
396
+ */
397
+ CUPTI_EVENT_ATTR_LONG_DESCRIPTION = 2,
398
+ /**
399
+ * Category of event. Value is CUpti_EventCategory.
400
+ */
401
+ CUPTI_EVENT_ATTR_CATEGORY = 3,
402
+ /**
403
+ * Profiling scope of the events. It can be either device or context or both.
404
+ * Value is a \ref CUpti_EventProfilingScope.
405
+ */
406
+ CUPTI_EVENT_ATTR_PROFILING_SCOPE = 5,
407
+
408
+ CUPTI_EVENT_ATTR_FORCE_INT = 0x7fffffff,
409
+ } CUpti_EventAttribute;
410
+
411
+ /**
412
+ * \brief Event collection modes.
413
+ *
414
+ * The event collection mode determines the period over which the
415
+ * events within the enabled event groups will be collected.
416
+ */
417
+ typedef enum {
418
+ /**
419
+ * Events are collected for the entire duration between the
420
+ * cuptiEventGroupEnable and cuptiEventGroupDisable calls.
421
+ * Event values are reset when the events are read.
422
+ * For CUDA toolkit v6.0 and older this was the default mode.
423
+ */
424
+ CUPTI_EVENT_COLLECTION_MODE_CONTINUOUS = 0,
425
+ /**
426
+ * Events are collected only for the durations of kernel executions
427
+ * that occur between the cuptiEventGroupEnable and
428
+ * cuptiEventGroupDisable calls. Event collection begins when a
429
+ * kernel execution begins, and stops when kernel execution
430
+ * completes. Event values are reset to zero when each kernel
431
+ * execution begins. If multiple kernel executions occur between the
432
+ * cuptiEventGroupEnable and cuptiEventGroupDisable calls then the
433
+ * event values must be read after each kernel launch if those
434
+ * events need to be associated with the specific kernel launch.
435
+ * Note that collection in this mode may significantly change the
436
+ * overall performance characteristics of the application because
437
+ * kernel executions that occur between the cuptiEventGroupEnable and
438
+ * cuptiEventGroupDisable calls are serialized on the GPU.
439
+ * This is the default mode from CUDA toolkit v6.5
440
+ */
441
+ CUPTI_EVENT_COLLECTION_MODE_KERNEL = 1,
442
+ CUPTI_EVENT_COLLECTION_MODE_FORCE_INT = 0x7fffffff
443
+ } CUpti_EventCollectionMode;
444
+
445
+ /**
446
+ * \brief An event category.
447
+ *
448
+ * Each event is assigned to a category that represents the general
449
+ * type of the event. A event's category is accessed using \ref
450
+ * cuptiEventGetAttribute and the CUPTI_EVENT_ATTR_CATEGORY attribute.
451
+ */
452
+ typedef enum {
453
+ /**
454
+ * An instruction related event.
455
+ */
456
+ CUPTI_EVENT_CATEGORY_INSTRUCTION = 0,
457
+ /**
458
+ * A memory related event.
459
+ */
460
+ CUPTI_EVENT_CATEGORY_MEMORY = 1,
461
+ /**
462
+ * A cache related event.
463
+ */
464
+ CUPTI_EVENT_CATEGORY_CACHE = 2,
465
+ /**
466
+ * A profile-trigger event.
467
+ */
468
+ CUPTI_EVENT_CATEGORY_PROFILE_TRIGGER = 3,
469
+ /**
470
+ * A system event.
471
+ */
472
+ CUPTI_EVENT_CATEGORY_SYSTEM = 4,
473
+ CUPTI_EVENT_CATEGORY_FORCE_INT = 0x7fffffff
474
+ } CUpti_EventCategory;
475
+
476
+ /**
477
+ * \brief The overflow value for a CUPTI event.
478
+ *
479
+ * The CUPTI event value that indicates an overflow.
480
+ */
481
+ #define CUPTI_EVENT_OVERFLOW ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
482
+
483
+ /**
484
+ * \brief The value that indicates the event value is invalid
485
+ */
486
+ #define CUPTI_EVENT_INVALID ((uint64_t)0xFFFFFFFFFFFFFFFEULL)
487
+
488
+ /**
489
+ * \brief Flags for cuptiEventGroupReadEvent an
490
+ * cuptiEventGroupReadAllEvents.
491
+ *
492
+ * Flags for \ref cuptiEventGroupReadEvent an \ref
493
+ * cuptiEventGroupReadAllEvents.
494
+ */
495
+ typedef enum {
496
+ /**
497
+ * No flags.
498
+ */
499
+ CUPTI_EVENT_READ_FLAG_NONE = 0,
500
+ CUPTI_EVENT_READ_FLAG_FORCE_INT = 0x7fffffff,
501
+ } CUpti_ReadEventFlags;
502
+
503
+
504
+ /**
505
+ * \brief A set of event groups.
506
+ *
507
+ * A set of event groups. When returned by \ref
508
+ * cuptiEventGroupSetsCreate and \ref cuptiMetricCreateEventGroupSets
509
+ * a set indicates that event groups that can be enabled at the same
510
+ * time (i.e. all the events in the set can be collected
511
+ * simultaneously).
512
+ */
513
+ typedef struct {
514
+ /**
515
+ * The number of event groups in the set.
516
+ */
517
+ uint32_t numEventGroups;
518
+ /**
519
+ * An array of \p numEventGroups event groups.
520
+ */
521
+ CUpti_EventGroup *eventGroups;
522
+ } CUpti_EventGroupSet;
523
+
524
+ /**
525
+ * \brief A set of event group sets.
526
+ *
527
+ * A set of event group sets. When returned by \ref
528
+ * cuptiEventGroupSetsCreate and \ref cuptiMetricCreateEventGroupSets
529
+ * a CUpti_EventGroupSets indicates the number of passes required to
530
+ * collect all the events, and the event groups that should be
531
+ * collected during each pass.
532
+ */
533
+ typedef struct {
534
+ /**
535
+ * Number of event group sets.
536
+ */
537
+ uint32_t numSets;
538
+ /**
539
+ * An array of \p numSets event group sets.
540
+ */
541
+ CUpti_EventGroupSet *sets;
542
+ } CUpti_EventGroupSets;
543
+
544
+ /**
545
+ * \brief Set the event collection mode.
546
+ *
547
+ * Set the event collection mode for a \p context. The \p mode
548
+ * controls the event collection behavior of all events in event
549
+ * groups created in the \p context. This API is invalid in kernel
550
+ * replay mode.
551
+ * \note \b Thread-safety: this function is thread safe.
552
+ *
553
+ * \param context The context
554
+ * \param mode The event collection mode
555
+ *
556
+ * \retval CUPTI_SUCCESS
557
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
558
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
559
+ * \retval CUPTI_ERROR_INVALID_OPERATION if called when replay mode is enabled
560
+ * \retval CUPTI_ERROR_NOT_SUPPORTED if mode is not supported on the device
561
+ */
562
+
563
+ CUptiResult CUPTIAPI cuptiSetEventCollectionMode(CUcontext context,
564
+ CUpti_EventCollectionMode mode);
565
+
566
+ /**
567
+ * \brief Read a device attribute.
568
+ *
569
+ * Read a device attribute and return it in \p *value.
570
+ * \note \b Thread-safety: this function is thread safe.
571
+ *
572
+ * \param device The CUDA device
573
+ * \param attrib The attribute to read
574
+ * \param valueSize Size of buffer pointed by the value, and
575
+ * returns the number of bytes written to \p value
576
+ * \param value Returns the value of the attribute
577
+ *
578
+ * \retval CUPTI_SUCCESS
579
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
580
+ * \retval CUPTI_ERROR_INVALID_DEVICE
581
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
582
+ * is NULL, or if \p attrib is not a device attribute
583
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string
584
+ * attribute values, indicates that the \p value buffer is too small
585
+ * to hold the attribute value.
586
+ */
587
+ CUptiResult CUPTIAPI cuptiDeviceGetAttribute(CUdevice device,
588
+ CUpti_DeviceAttribute attrib,
589
+ size_t *valueSize,
590
+ void *value);
591
+
592
+ /**
593
+ * \brief Read a device timestamp.
594
+ *
595
+ * Returns the device timestamp in \p *timestamp. The timestamp is
596
+ * reported in nanoseconds and indicates the time since the device was
597
+ * last reset.
598
+ * \note \b Thread-safety: this function is thread safe.
599
+ *
600
+ * \param context A context on the device from which to get the timestamp
601
+ * \param timestamp Returns the device timestamp
602
+ *
603
+ * \retval CUPTI_SUCCESS
604
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
605
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
606
+ * \retval CUPTI_ERROR_INVALID_PARAMETER is \p timestamp is NULL
607
+
608
+ * **DEPRECATED** This API is deprecated as of CUDA 11.3
609
+ */
610
+ CUptiResult CUPTIAPI cuptiDeviceGetTimestamp(CUcontext context,
611
+ uint64_t *timestamp);
612
+
613
+ /**
614
+ * \brief Get the number of domains for a device.
615
+ *
616
+ * Returns the number of domains in \p numDomains for a device.
617
+ * \note \b Thread-safety: this function is thread safe.
618
+ *
619
+ * \param device The CUDA device
620
+ * \param numDomains Returns the number of domains
621
+ *
622
+ * \retval CUPTI_SUCCESS
623
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
624
+ * \retval CUPTI_ERROR_INVALID_DEVICE
625
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numDomains is NULL
626
+ */
627
+ CUptiResult CUPTIAPI cuptiDeviceGetNumEventDomains(CUdevice device,
628
+ uint32_t *numDomains);
629
+
630
+ /**
631
+ * \brief Get the event domains for a device.
632
+ *
633
+ * Returns the event domains IDs in \p domainArray for a device. The
634
+ * size of the \p domainArray buffer is given by \p
635
+ * *arraySizeBytes. The size of the \p domainArray buffer must be at
636
+ * least \p numdomains * sizeof(CUpti_EventDomainID) or else all
637
+ * domains will not be returned. The value returned in \p
638
+ * *arraySizeBytes contains the number of bytes returned in \p
639
+ * domainArray.
640
+ * \note \b Thread-safety: this function is thread safe.
641
+ *
642
+ * \param device The CUDA device
643
+ * \param arraySizeBytes The size of \p domainArray in bytes, and
644
+ * returns the number of bytes written to \p domainArray
645
+ * \param domainArray Returns the IDs of the event domains for the device
646
+ *
647
+ * \retval CUPTI_SUCCESS
648
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
649
+ * \retval CUPTI_ERROR_INVALID_DEVICE
650
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p arraySizeBytes or
651
+ * \p domainArray are NULL
652
+ */
653
+ CUptiResult CUPTIAPI cuptiDeviceEnumEventDomains(CUdevice device,
654
+ size_t *arraySizeBytes,
655
+ CUpti_EventDomainID *domainArray);
656
+
657
+ /**
658
+ * \brief Read an event domain attribute.
659
+ *
660
+ * Returns an event domain attribute in \p *value. The size of the \p
661
+ * value buffer is given by \p *valueSize. The value returned in \p
662
+ * *valueSize contains the number of bytes returned in \p value.
663
+ *
664
+ * If the attribute value is a c-string that is longer than \p
665
+ * *valueSize, then only the first \p *valueSize characters will be
666
+ * returned and there will be no terminating null byte.
667
+ * \note \b Thread-safety: this function is thread safe.
668
+ *
669
+ * \param device The CUDA device
670
+ * \param eventDomain ID of the event domain
671
+ * \param attrib The event domain attribute to read
672
+ * \param valueSize The size of the \p value buffer in bytes, and
673
+ * returns the number of bytes written to \p value
674
+ * \param value Returns the attribute's value
675
+ *
676
+ * \retval CUPTI_SUCCESS
677
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
678
+ * \retval CUPTI_ERROR_INVALID_DEVICE
679
+ * \retval CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID
680
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
681
+ * is NULL, or if \p attrib is not an event domain attribute
682
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string
683
+ * attribute values, indicates that the \p value buffer is too small
684
+ * to hold the attribute value.
685
+ */
686
+ CUptiResult CUPTIAPI cuptiDeviceGetEventDomainAttribute(CUdevice device,
687
+ CUpti_EventDomainID eventDomain,
688
+ CUpti_EventDomainAttribute attrib,
689
+ size_t *valueSize,
690
+ void *value);
691
+
692
+ /**
693
+ * \brief Get the number of event domains available on any device.
694
+ *
695
+ * Returns the total number of event domains available on any
696
+ * CUDA-capable device.
697
+ * \note \b Thread-safety: this function is thread safe.
698
+ *
699
+ * \param numDomains Returns the number of domains
700
+ *
701
+ * \retval CUPTI_SUCCESS
702
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numDomains is NULL
703
+ */
704
+ CUptiResult CUPTIAPI cuptiGetNumEventDomains(uint32_t *numDomains);
705
+
706
+ /**
707
+ * \brief Get the event domains available on any device.
708
+ *
709
+ * Returns all the event domains available on any CUDA-capable device.
710
+ * Event domain IDs are returned in \p domainArray. The size of the \p
711
+ * domainArray buffer is given by \p *arraySizeBytes. The size of the
712
+ * \p domainArray buffer must be at least \p numDomains *
713
+ * sizeof(CUpti_EventDomainID) or all domains will not be
714
+ * returned. The value returned in \p *arraySizeBytes contains the
715
+ * number of bytes returned in \p domainArray.
716
+ * \note \b Thread-safety: this function is thread safe.
717
+ *
718
+ * \param arraySizeBytes The size of \p domainArray in bytes, and
719
+ * returns the number of bytes written to \p domainArray
720
+ * \param domainArray Returns all the event domains
721
+ *
722
+ * \retval CUPTI_SUCCESS
723
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p arraySizeBytes or
724
+ * \p domainArray are NULL
725
+ */
726
+ CUptiResult CUPTIAPI cuptiEnumEventDomains(size_t *arraySizeBytes,
727
+ CUpti_EventDomainID *domainArray);
728
+
729
+ /**
730
+ * \brief Read an event domain attribute.
731
+ *
732
+ * Returns an event domain attribute in \p *value. The size of the \p
733
+ * value buffer is given by \p *valueSize. The value returned in \p
734
+ * *valueSize contains the number of bytes returned in \p value.
735
+ *
736
+ * If the attribute value is a c-string that is longer than \p
737
+ * *valueSize, then only the first \p *valueSize characters will be
738
+ * returned and there will be no terminating null byte.
739
+ * \note \b Thread-safety: this function is thread safe.
740
+ *
741
+ * \param eventDomain ID of the event domain
742
+ * \param attrib The event domain attribute to read
743
+ * \param valueSize The size of the \p value buffer in bytes, and
744
+ * returns the number of bytes written to \p value
745
+ * \param value Returns the attribute's value
746
+ *
747
+ * \retval CUPTI_SUCCESS
748
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
749
+ * \retval CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID
750
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
751
+ * is NULL, or if \p attrib is not an event domain attribute
752
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string
753
+ * attribute values, indicates that the \p value buffer is too small
754
+ * to hold the attribute value.
755
+ */
756
+ CUptiResult CUPTIAPI cuptiEventDomainGetAttribute(CUpti_EventDomainID eventDomain,
757
+ CUpti_EventDomainAttribute attrib,
758
+ size_t *valueSize,
759
+ void *value);
760
+
761
+ /**
762
+ * \brief Get number of events in a domain.
763
+ *
764
+ * Returns the number of events in \p numEvents for a domain.
765
+ * \note \b Thread-safety: this function is thread safe.
766
+ *
767
+ * \param eventDomain ID of the event domain
768
+ * \param numEvents Returns the number of events in the domain
769
+ *
770
+ * \retval CUPTI_SUCCESS
771
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
772
+ * \retval CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID
773
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numEvents is NULL
774
+ */
775
+ CUptiResult CUPTIAPI cuptiEventDomainGetNumEvents(CUpti_EventDomainID eventDomain,
776
+ uint32_t *numEvents);
777
+
778
+ /**
779
+ * \brief Get the events in a domain.
780
+ *
781
+ * Returns the event IDs in \p eventArray for a domain. The size of
782
+ * the \p eventArray buffer is given by \p *arraySizeBytes. The size
783
+ * of the \p eventArray buffer must be at least \p numdomainevents *
784
+ * sizeof(CUpti_EventID) or else all events will not be returned. The
785
+ * value returned in \p *arraySizeBytes contains the number of bytes
786
+ * returned in \p eventArray.
787
+ * \note \b Thread-safety: this function is thread safe.
788
+ *
789
+ * \param eventDomain ID of the event domain
790
+ * \param arraySizeBytes The size of \p eventArray in bytes, and
791
+ * returns the number of bytes written to \p eventArray
792
+ * \param eventArray Returns the IDs of the events in the domain
793
+ *
794
+ * \retval CUPTI_SUCCESS
795
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
796
+ * \retval CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID
797
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p arraySizeBytes or \p
798
+ * eventArray are NULL
799
+ */
800
+ CUptiResult CUPTIAPI cuptiEventDomainEnumEvents(CUpti_EventDomainID eventDomain,
801
+ size_t *arraySizeBytes,
802
+ CUpti_EventID *eventArray);
803
+
804
+ /**
805
+ * \brief Get an event attribute.
806
+ *
807
+ * Returns an event attribute in \p *value. The size of the \p
808
+ * value buffer is given by \p *valueSize. The value returned in \p
809
+ * *valueSize contains the number of bytes returned in \p value.
810
+ *
811
+ * If the attribute value is a c-string that is longer than \p
812
+ * *valueSize, then only the first \p *valueSize characters will be
813
+ * returned and there will be no terminating null byte.
814
+ * \note \b Thread-safety: this function is thread safe.
815
+ *
816
+ * \param event ID of the event
817
+ * \param attrib The event attribute to read
818
+ * \param valueSize The size of the \p value buffer in bytes, and
819
+ * returns the number of bytes written to \p value
820
+ * \param value Returns the attribute's value
821
+ *
822
+ * \retval CUPTI_SUCCESS
823
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
824
+ * \retval CUPTI_ERROR_INVALID_EVENT_ID
825
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
826
+ * is NULL, or if \p attrib is not an event attribute
827
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string
828
+ * attribute values, indicates that the \p value buffer is too small
829
+ * to hold the attribute value.
830
+ */
831
+ CUptiResult CUPTIAPI cuptiEventGetAttribute(CUpti_EventID event,
832
+ CUpti_EventAttribute attrib,
833
+ size_t *valueSize,
834
+ void *value);
835
+
836
+ /**
837
+ * \brief Find an event by name.
838
+ *
839
+ * Find an event by name and return the event ID in \p *event.
840
+ * \note \b Thread-safety: this function is thread safe.
841
+ *
842
+ * \param device The CUDA device
843
+ * \param eventName The name of the event to find
844
+ * \param event Returns the ID of the found event or undefined if
845
+ * unable to find the event
846
+ *
847
+ * \retval CUPTI_SUCCESS
848
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
849
+ * \retval CUPTI_ERROR_INVALID_DEVICE
850
+ * \retval CUPTI_ERROR_INVALID_EVENT_NAME if unable to find an event
851
+ * with name \p eventName. In this case \p *event is undefined
852
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventName or \p event are NULL
853
+ */
854
+ CUptiResult CUPTIAPI cuptiEventGetIdFromName(CUdevice device,
855
+ const char *eventName,
856
+ CUpti_EventID *event);
857
+
858
+ /**
859
+ * \brief Create a new event group for a context.
860
+ *
861
+ * Creates a new event group for \p context and returns the new group
862
+ * in \p *eventGroup.
863
+ * \note \p flags are reserved for future use and should be set to zero.
864
+ * \note \b Thread-safety: this function is thread safe.
865
+ *
866
+ * \param context The context for the event group
867
+ * \param eventGroup Returns the new event group
868
+ * \param flags Reserved - must be zero
869
+ *
870
+ * \retval CUPTI_SUCCESS
871
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
872
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
873
+ * \retval CUPTI_ERROR_OUT_OF_MEMORY
874
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
875
+ */
876
+ CUptiResult CUPTIAPI cuptiEventGroupCreate(CUcontext context,
877
+ CUpti_EventGroup *eventGroup,
878
+ uint32_t flags);
879
+
880
+ /**
881
+ * \brief Destroy an event group.
882
+ *
883
+ * Destroy an \p eventGroup and free its resources. An event group
884
+ * cannot be destroyed if it is enabled.
885
+ * \note \b Thread-safety: this function is thread safe.
886
+ *
887
+ * \param eventGroup The event group to destroy
888
+ *
889
+ * \retval CUPTI_SUCCESS
890
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
891
+ * \retval CUPTI_ERROR_INVALID_OPERATION if the event group is enabled
892
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if eventGroup is NULL
893
+ */
894
+ CUptiResult CUPTIAPI cuptiEventGroupDestroy(CUpti_EventGroup eventGroup);
895
+
896
+ /**
897
+ * \brief Read an event group attribute.
898
+ *
899
+ * Read an event group attribute and return it in \p *value.
900
+ * \note \b Thread-safety: this function is thread safe but client
901
+ * must guard against simultaneous destruction or modification of \p
902
+ * eventGroup (for example, client must guard against simultaneous
903
+ * calls to \ref cuptiEventGroupDestroy, \ref cuptiEventGroupAddEvent,
904
+ * etc.), and must guard against simultaneous destruction of the
905
+ * context in which \p eventGroup was created (for example, client
906
+ * must guard against simultaneous calls to cudaDeviceReset,
907
+ * cuCtxDestroy, etc.).
908
+ *
909
+ * \param eventGroup The event group
910
+ * \param attrib The attribute to read
911
+ * \param valueSize Size of buffer pointed by the value, and
912
+ * returns the number of bytes written to \p value
913
+ * \param value Returns the value of the attribute
914
+ *
915
+ * \retval CUPTI_SUCCESS
916
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
917
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
918
+ * is NULL, or if \p attrib is not an eventgroup attribute
919
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string
920
+ * attribute values, indicates that the \p value buffer is too small
921
+ * to hold the attribute value.
922
+ */
923
+ CUptiResult CUPTIAPI cuptiEventGroupGetAttribute(CUpti_EventGroup eventGroup,
924
+ CUpti_EventGroupAttribute attrib,
925
+ size_t *valueSize,
926
+ void *value);
927
+
928
+ /**
929
+ * \brief Write an event group attribute.
930
+ *
931
+ * Write an event group attribute.
932
+ * \note \b Thread-safety: this function is thread safe.
933
+ *
934
+ * \param eventGroup The event group
935
+ * \param attrib The attribute to write
936
+ * \param valueSize The size, in bytes, of the value
937
+ * \param value The attribute value to write
938
+ *
939
+ * \retval CUPTI_SUCCESS
940
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
941
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
942
+ * is NULL, or if \p attrib is not an event group attribute, or if
943
+ * \p attrib is not a writable attribute
944
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT Indicates that
945
+ * the \p value buffer is too small to hold the attribute value.
946
+ */
947
+ CUptiResult CUPTIAPI cuptiEventGroupSetAttribute(CUpti_EventGroup eventGroup,
948
+ CUpti_EventGroupAttribute attrib,
949
+ size_t valueSize,
950
+ void *value);
951
+
952
+ /**
953
+ * \brief Add an event to an event group.
954
+ *
955
+ * Add an event to an event group. The event add can fail for a number of reasons:
956
+ * \li The event group is enabled
957
+ * \li The event does not belong to the same event domain as the
958
+ * events that are already in the event group
959
+ * \li Device limitations on the events that can belong to the same group
960
+ * \li The event group is full
961
+ *
962
+ * \note \b Thread-safety: this function is thread safe.
963
+ *
964
+ * \param eventGroup The event group
965
+ * \param event The event to add to the group
966
+ *
967
+ * \retval CUPTI_SUCCESS
968
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
969
+ * \retval CUPTI_ERROR_INVALID_EVENT_ID
970
+ * \retval CUPTI_ERROR_OUT_OF_MEMORY
971
+ * \retval CUPTI_ERROR_INVALID_OPERATION if \p eventGroup is enabled
972
+ * \retval CUPTI_ERROR_NOT_COMPATIBLE if \p event belongs to a
973
+ * different event domain than the events already in \p eventGroup, or
974
+ * if a device limitation prevents \p event from being collected at
975
+ * the same time as the events already in \p eventGroup
976
+ * \retval CUPTI_ERROR_MAX_LIMIT_REACHED if \p eventGroup is full
977
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
978
+ */
979
+ CUptiResult CUPTIAPI cuptiEventGroupAddEvent(CUpti_EventGroup eventGroup,
980
+ CUpti_EventID event);
981
+
982
+ /**
983
+ * \brief Remove an event from an event group.
984
+ *
985
+ * Remove \p event from the an event group. The event cannot be
986
+ * removed if the event group is enabled.
987
+ * \note \b Thread-safety: this function is thread safe.
988
+ *
989
+ * \param eventGroup The event group
990
+ * \param event The event to remove from the group
991
+ *
992
+ * \retval CUPTI_SUCCESS
993
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
994
+ * \retval CUPTI_ERROR_INVALID_EVENT_ID
995
+ * \retval CUPTI_ERROR_INVALID_OPERATION if \p eventGroup is enabled
996
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
997
+ */
998
+ CUptiResult CUPTIAPI cuptiEventGroupRemoveEvent(CUpti_EventGroup eventGroup,
999
+ CUpti_EventID event);
1000
+
1001
+ /**
1002
+ * \brief Remove all events from an event group.
1003
+ *
1004
+ * Remove all events from an event group. Events cannot be removed if
1005
+ * the event group is enabled.
1006
+ * \note \b Thread-safety: this function is thread safe.
1007
+ *
1008
+ * \param eventGroup The event group
1009
+ *
1010
+ * \retval CUPTI_SUCCESS
1011
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1012
+ * \retval CUPTI_ERROR_INVALID_OPERATION if \p eventGroup is enabled
1013
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
1014
+ */
1015
+ CUptiResult CUPTIAPI cuptiEventGroupRemoveAllEvents(CUpti_EventGroup eventGroup);
1016
+
1017
+ /**
1018
+ * \brief Zero all the event counts in an event group.
1019
+ *
1020
+ * Zero all the event counts in an event group.
1021
+ * \note \b Thread-safety: this function is thread safe but client
1022
+ * must guard against simultaneous destruction or modification of \p
1023
+ * eventGroup (for example, client must guard against simultaneous
1024
+ * calls to \ref cuptiEventGroupDestroy, \ref cuptiEventGroupAddEvent,
1025
+ * etc.), and must guard against simultaneous destruction of the
1026
+ * context in which \p eventGroup was created (for example, client
1027
+ * must guard against simultaneous calls to cudaDeviceReset,
1028
+ * cuCtxDestroy, etc.).
1029
+ *
1030
+ * \param eventGroup The event group
1031
+ *
1032
+ * \retval CUPTI_SUCCESS
1033
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1034
+ * \retval CUPTI_ERROR_HARDWARE
1035
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
1036
+ */
1037
+ CUptiResult CUPTIAPI cuptiEventGroupResetAllEvents(CUpti_EventGroup eventGroup);
1038
+
1039
+ /**
1040
+ * \brief Enable an event group.
1041
+ *
1042
+ * Enable an event group. Enabling an event group zeros the value of
1043
+ * all the events in the group and then starts collection of those
1044
+ * events.
1045
+ * \note \b Thread-safety: this function is thread safe.
1046
+ *
1047
+ * \param eventGroup The event group
1048
+ *
1049
+ * \retval CUPTI_SUCCESS
1050
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1051
+ * \retval CUPTI_ERROR_HARDWARE
1052
+ * \retval CUPTI_ERROR_NOT_READY if \p eventGroup does not contain any events
1053
+ * \retval CUPTI_ERROR_NOT_COMPATIBLE if \p eventGroup cannot be
1054
+ * enabled due to other already enabled event groups
1055
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
1056
+ * \retval CUPTI_ERROR_HARDWARE_BUSY if another client is profiling
1057
+ * and hardware is busy
1058
+ */
1059
+ CUptiResult CUPTIAPI cuptiEventGroupEnable(CUpti_EventGroup eventGroup);
1060
+
1061
+ /**
1062
+ * \brief Disable an event group.
1063
+ *
1064
+ * Disable an event group. Disabling an event group stops collection
1065
+ * of events contained in the group.
1066
+ * \note \b Thread-safety: this function is thread safe.
1067
+ *
1068
+ * \param eventGroup The event group
1069
+ *
1070
+ * \retval CUPTI_SUCCESS
1071
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1072
+ * \retval CUPTI_ERROR_HARDWARE
1073
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
1074
+ */
1075
+ CUptiResult CUPTIAPI cuptiEventGroupDisable(CUpti_EventGroup eventGroup);
1076
+
1077
+ /**
1078
+ * \brief Read the value for an event in an event group.
1079
+ *
1080
+ * Read the value for an event in an event group. The event value is
1081
+ * returned in the \p eventValueBuffer buffer. \p
1082
+ * eventValueBufferSizeBytes indicates the size of the \p
1083
+ * eventValueBuffer buffer. The buffer must be at least sizeof(uint64)
1084
+ * if ::CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES is not set
1085
+ * on the group containing the event. The buffer must be at least
1086
+ * (sizeof(uint64) * number of domain instances) if
1087
+ * ::CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES is set on the
1088
+ * group.
1089
+ *
1090
+ * If any instance of an event counter overflows, the value returned
1091
+ * for that event instance will be ::CUPTI_EVENT_OVERFLOW.
1092
+ *
1093
+ * The only allowed value for \p flags is ::CUPTI_EVENT_READ_FLAG_NONE.
1094
+ *
1095
+ * Reading an event from a disabled event group is not allowed. After
1096
+ * being read, an event's value is reset to zero.
1097
+ * \note \b Thread-safety: this function is thread safe but client
1098
+ * must guard against simultaneous destruction or modification of \p
1099
+ * eventGroup (for example, client must guard against simultaneous
1100
+ * calls to \ref cuptiEventGroupDestroy, \ref cuptiEventGroupAddEvent,
1101
+ * etc.), and must guard against simultaneous destruction of the
1102
+ * context in which \p eventGroup was created (for example, client
1103
+ * must guard against simultaneous calls to cudaDeviceReset,
1104
+ * cuCtxDestroy, etc.). If \ref cuptiEventGroupResetAllEvents is
1105
+ * called simultaneously with this function, then returned event
1106
+ * values are undefined.
1107
+ *
1108
+ * \param eventGroup The event group
1109
+ * \param flags Flags controlling the reading mode
1110
+ * \param event The event to read
1111
+ * \param eventValueBufferSizeBytes The size of \p eventValueBuffer
1112
+ * in bytes, and returns the number of bytes written to \p
1113
+ * eventValueBuffer
1114
+ * \param eventValueBuffer Returns the event value(s)
1115
+ *
1116
+ * \retval CUPTI_SUCCESS
1117
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1118
+ * \retval CUPTI_ERROR_INVALID_EVENT_ID
1119
+ * \retval CUPTI_ERROR_HARDWARE
1120
+ * \retval CUPTI_ERROR_INVALID_OPERATION if \p eventGroup is disabled
1121
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup, \p
1122
+ * eventValueBufferSizeBytes or \p eventValueBuffer is NULL
1123
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT if size of \p eventValueBuffer
1124
+ * is not sufficient
1125
+ */
1126
+ CUptiResult CUPTIAPI cuptiEventGroupReadEvent(CUpti_EventGroup eventGroup,
1127
+ CUpti_ReadEventFlags flags,
1128
+ CUpti_EventID event,
1129
+ size_t *eventValueBufferSizeBytes,
1130
+ uint64_t *eventValueBuffer);
1131
+
1132
+ /**
1133
+ * \brief Read the values for all the events in an event group.
1134
+ *
1135
+ * Read the values for all the events in an event group. The event
1136
+ * values are returned in the \p eventValueBuffer buffer. \p
1137
+ * eventValueBufferSizeBytes indicates the size of \p
1138
+ * eventValueBuffer. The buffer must be at least (sizeof(uint64) *
1139
+ * number of events in group) if
1140
+ * ::CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES is not set on
1141
+ * the group containing the events. The buffer must be at least
1142
+ * (sizeof(uint64) * number of domain instances * number of events in
1143
+ * group) if ::CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES is
1144
+ * set on the group.
1145
+ *
1146
+ * The data format returned in \p eventValueBuffer is:
1147
+ * - domain instance 0: event0 event1 ... eventN
1148
+ * - domain instance 1: event0 event1 ... eventN
1149
+ * - ...
1150
+ * - domain instance M: event0 event1 ... eventN
1151
+ *
1152
+ * The event order in \p eventValueBuffer is returned in \p
1153
+ * eventIdArray. The size of \p eventIdArray is specified in \p
1154
+ * eventIdArraySizeBytes. The size should be at least
1155
+ * (sizeof(CUpti_EventID) * number of events in group).
1156
+ *
1157
+ * If any instance of any event counter overflows, the value returned
1158
+ * for that event instance will be ::CUPTI_EVENT_OVERFLOW.
1159
+ *
1160
+ * The only allowed value for \p flags is ::CUPTI_EVENT_READ_FLAG_NONE.
1161
+ *
1162
+ * Reading events from a disabled event group is not allowed. After
1163
+ * being read, an event's value is reset to zero.
1164
+ * \note \b Thread-safety: this function is thread safe but client
1165
+ * must guard against simultaneous destruction or modification of \p
1166
+ * eventGroup (for example, client must guard against simultaneous
1167
+ * calls to \ref cuptiEventGroupDestroy, \ref cuptiEventGroupAddEvent,
1168
+ * etc.), and must guard against simultaneous destruction of the
1169
+ * context in which \p eventGroup was created (for example, client
1170
+ * must guard against simultaneous calls to cudaDeviceReset,
1171
+ * cuCtxDestroy, etc.). If \ref cuptiEventGroupResetAllEvents is
1172
+ * called simultaneously with this function, then returned event
1173
+ * values are undefined.
1174
+ *
1175
+ * \param eventGroup The event group
1176
+ * \param flags Flags controlling the reading mode
1177
+ * \param eventValueBufferSizeBytes The size of \p eventValueBuffer in
1178
+ * bytes, and returns the number of bytes written to \p
1179
+ * eventValueBuffer
1180
+ * \param eventValueBuffer Returns the event values
1181
+ * \param eventIdArraySizeBytes The size of \p eventIdArray in bytes,
1182
+ * and returns the number of bytes written to \p eventIdArray
1183
+ * \param eventIdArray Returns the IDs of the events in the same order
1184
+ * as the values return in eventValueBuffer.
1185
+ * \param numEventIdsRead Returns the number of event IDs returned
1186
+ * in \p eventIdArray
1187
+ *
1188
+ * \retval CUPTI_SUCCESS
1189
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1190
+ * \retval CUPTI_ERROR_HARDWARE
1191
+ * \retval CUPTI_ERROR_INVALID_OPERATION if \p eventGroup is disabled
1192
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup, \p
1193
+ * eventValueBufferSizeBytes, \p eventValueBuffer, \p
1194
+ * eventIdArraySizeBytes, \p eventIdArray or \p numEventIdsRead is
1195
+ * NULL
1196
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT if size of \p eventValueBuffer
1197
+ * or \p eventIdArray is not sufficient
1198
+ */
1199
+ CUptiResult CUPTIAPI cuptiEventGroupReadAllEvents(CUpti_EventGroup eventGroup,
1200
+ CUpti_ReadEventFlags flags,
1201
+ size_t *eventValueBufferSizeBytes,
1202
+ uint64_t *eventValueBuffer,
1203
+ size_t *eventIdArraySizeBytes,
1204
+ CUpti_EventID *eventIdArray,
1205
+ size_t *numEventIdsRead);
1206
+
1207
+ /**
1208
+ * \brief For a set of events, get the grouping that indicates the
1209
+ * number of passes and the event groups necessary to collect the
1210
+ * events.
1211
+ *
1212
+ * The number of events that can be collected simultaneously varies by
1213
+ * device and by the type of the events. When events can be collected
1214
+ * simultaneously, they may need to be grouped into multiple event
1215
+ * groups because they are from different event domains. This function
1216
+ * takes a set of events and determines how many passes are required
1217
+ * to collect all those events, and which events can be collected
1218
+ * simultaneously in each pass.
1219
+ *
1220
+ * The CUpti_EventGroupSets returned in \p eventGroupPasses indicates
1221
+ * how many passes are required to collect the events with the \p
1222
+ * numSets field. Within each event group set, the \p sets array
1223
+ * indicates the event groups that should be collected on each pass.
1224
+ * \note \b Thread-safety: this function is thread safe, but client
1225
+ * must guard against another thread simultaneously destroying \p
1226
+ * context.
1227
+ *
1228
+ * \param context The context for event collection
1229
+ * \param eventIdArraySizeBytes Size of \p eventIdArray in bytes
1230
+ * \param eventIdArray Array of event IDs that need to be grouped
1231
+ * \param eventGroupPasses Returns a CUpti_EventGroupSets object that
1232
+ * indicates the number of passes required to collect the events and
1233
+ * the events to collect on each pass
1234
+ *
1235
+ * \retval CUPTI_SUCCESS
1236
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1237
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
1238
+ * \retval CUPTI_ERROR_INVALID_EVENT_ID
1239
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventIdArray or
1240
+ * \p eventGroupPasses is NULL
1241
+ */
1242
+ CUptiResult CUPTIAPI cuptiEventGroupSetsCreate(CUcontext context,
1243
+ size_t eventIdArraySizeBytes,
1244
+ CUpti_EventID *eventIdArray,
1245
+ CUpti_EventGroupSets **eventGroupPasses);
1246
+
1247
+ /**
1248
+ * \brief Destroy a event group sets object.
1249
+ *
1250
+ * Destroy a CUpti_EventGroupSets object.
1251
+ * \note \b Thread-safety: this function is thread safe.
1252
+ *
1253
+ * \param eventGroupSets The object to destroy
1254
+ *
1255
+ * \retval CUPTI_SUCCESS
1256
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1257
+ * \retval CUPTI_ERROR_INVALID_OPERATION if any of the event groups
1258
+ * contained in the sets is enabled
1259
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroupSets is NULL
1260
+ */
1261
+ CUptiResult CUPTIAPI cuptiEventGroupSetsDestroy(CUpti_EventGroupSets *eventGroupSets);
1262
+
1263
+
1264
+ /**
1265
+ * \brief Enable an event group set.
1266
+ *
1267
+ * Enable a set of event groups. Enabling a set of event groups zeros the value of
1268
+ * all the events in all the groups and then starts collection of those events.
1269
+ * \note \b Thread-safety: this function is thread safe.
1270
+ *
1271
+ * \param eventGroupSet The pointer to the event group set
1272
+ *
1273
+ * \retval CUPTI_SUCCESS
1274
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1275
+ * \retval CUPTI_ERROR_HARDWARE
1276
+ * \retval CUPTI_ERROR_NOT_READY if \p eventGroup does not contain any events
1277
+ * \retval CUPTI_ERROR_NOT_COMPATIBLE if \p eventGroup cannot be
1278
+ * enabled due to other already enabled event groups
1279
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroupSet is NULL
1280
+ * \retval CUPTI_ERROR_HARDWARE_BUSY if other client is profiling and hardware is
1281
+ * busy
1282
+ */
1283
+ CUptiResult CUPTIAPI cuptiEventGroupSetEnable(CUpti_EventGroupSet *eventGroupSet);
1284
+
1285
+ /**
1286
+ * \brief Disable an event group set.
1287
+ *
1288
+ * Disable a set of event groups. Disabling a set of event groups
1289
+ * stops collection of events contained in the groups.
1290
+ * \note \b Thread-safety: this function is thread safe.
1291
+ * \note \b If this call fails, some of the event groups in the set may be disabled
1292
+ * and other event groups may remain enabled.
1293
+ *
1294
+ * \param eventGroupSet The pointer to the event group set
1295
+ * \retval CUPTI_SUCCESS
1296
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1297
+ * \retval CUPTI_ERROR_HARDWARE
1298
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroupSet is NULL
1299
+ */
1300
+ CUptiResult CUPTIAPI cuptiEventGroupSetDisable(CUpti_EventGroupSet *eventGroupSet);
1301
+
1302
+ /**
1303
+ * \brief Enable kernel replay mode.
1304
+ *
1305
+ * Set profiling mode for the context to replay mode. In this mode,
1306
+ * any number of events can be collected in one run of the kernel. The
1307
+ * event collection mode will automatically switch to
1308
+ * CUPTI_EVENT_COLLECTION_MODE_KERNEL. In this mode, \ref
1309
+ * cuptiSetEventCollectionMode will return
1310
+ * CUPTI_ERROR_INVALID_OPERATION.
1311
+ * \note \b Kernels might take longer to run if many events are enabled.
1312
+ * \note \b Thread-safety: this function is thread safe.
1313
+ *
1314
+ * \param context The context
1315
+ * \retval CUPTI_SUCCESS
1316
+ */
1317
+ CUptiResult CUPTIAPI cuptiEnableKernelReplayMode(CUcontext context);
1318
+
1319
+ /**
1320
+ * \brief Disable kernel replay mode.
1321
+ *
1322
+ * Set profiling mode for the context to non-replay (default)
1323
+ * mode. Event collection mode will be set to
1324
+ * CUPTI_EVENT_COLLECTION_MODE_KERNEL. All previously enabled
1325
+ * event groups and event group sets will be disabled.
1326
+ * \note \b Thread-safety: this function is thread safe.
1327
+ *
1328
+ * \param context The context
1329
+ * \retval CUPTI_SUCCESS
1330
+ */
1331
+ CUptiResult CUPTIAPI cuptiDisableKernelReplayMode(CUcontext context);
1332
+
1333
+ /**
1334
+ * \brief Function type for getting updates on kernel replay.
1335
+ *
1336
+ * \param kernelName The mangled kernel name
1337
+ * \param numReplaysDone Number of replays done so far
1338
+ * \param customData Pointer of any custom data passed in when subscribing
1339
+ */
1340
+ typedef void (CUPTIAPI *CUpti_KernelReplayUpdateFunc)(
1341
+ const char *kernelName,
1342
+ int numReplaysDone,
1343
+ void *customData);
1344
+
1345
+ /**
1346
+ * \brief Subscribe to kernel replay updates.
1347
+ *
1348
+ * When subscribed, the function pointer passed in will be called each time a
1349
+ * kernel run is finished during kernel replay. Previously subscribed function
1350
+ * pointer will be replaced. Pass in NULL as the function pointer unsubscribes
1351
+ * the update.
1352
+ *
1353
+ * \param updateFunc The update function pointer
1354
+ * \param customData Pointer to any custom data
1355
+ * \retval CUPTI_SUCCESS
1356
+ */
1357
+ CUptiResult CUPTIAPI cuptiKernelReplaySubscribeUpdate(CUpti_KernelReplayUpdateFunc updateFunc, void *customData);
1358
+
1359
+ /** @} */ /* END CUPTI_EVENT_API */
1360
+
1361
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
1362
+ #pragma GCC visibility pop
1363
+ #endif
1364
+
1365
+ #if defined(__cplusplus)
1366
+ }
1367
+ #endif
1368
+
1369
+ #endif /*_CUPTI_EVENTS_H_*/
1370
+
1371
+
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_metrics.h ADDED
@@ -0,0 +1,825 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2011-2020 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_METRIC_H_)
51
+ #define _CUPTI_METRIC_H_
52
+
53
+ #include <cuda.h>
54
+ #include <string.h>
55
+ #include <cuda_stdint.h>
56
+ #include <cupti_result.h>
57
+
58
+ #ifndef CUPTIAPI
59
+ #ifdef _WIN32
60
+ #define CUPTIAPI __stdcall
61
+ #else
62
+ #define CUPTIAPI
63
+ #endif
64
+ #endif
65
+
66
+ #if defined(__cplusplus)
67
+ extern "C" {
68
+ #endif
69
+
70
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
71
+ #pragma GCC visibility push(default)
72
+ #endif
73
+
74
+ /**
75
+ * \defgroup CUPTI_METRIC_API CUPTI Metric API
76
+ * Functions, types, and enums that implement the CUPTI Metric API.
77
+ *
78
+ * \note CUPTI metric API from the header cupti_metrics.h are not supported on devices
79
+ * with compute capability 7.5 and higher (i.e. Turing and later GPU architectures).
80
+ * These API will be deprecated in a future CUDA release. These are replaced by
81
+ * Profiling API in the header cupti_profiler_target.h and Perfworks metrics API
82
+ * in the headers nvperf_host.h and nvperf_target.h which are supported on
83
+ * devices with compute capability 7.0 and higher (i.e. Volta and later GPU
84
+ * architectures).
85
+ *
86
+ * @{
87
+ */
88
+
89
+ /**
90
+ * \brief ID for a metric.
91
+ *
92
+ * A metric provides a measure of some aspect of the device.
93
+ */
94
+ typedef uint32_t CUpti_MetricID;
95
+
96
+ /**
97
+ * \brief A metric category.
98
+ *
99
+ * Each metric is assigned to a category that represents the general
100
+ * type of the metric. A metric's category is accessed using \ref
101
+ * cuptiMetricGetAttribute and the CUPTI_METRIC_ATTR_CATEGORY
102
+ * attribute.
103
+ */
104
+ typedef enum {
105
+ /**
106
+ * A memory related metric.
107
+ */
108
+ CUPTI_METRIC_CATEGORY_MEMORY = 0,
109
+ /**
110
+ * An instruction related metric.
111
+ */
112
+ CUPTI_METRIC_CATEGORY_INSTRUCTION = 1,
113
+ /**
114
+ * A multiprocessor related metric.
115
+ */
116
+ CUPTI_METRIC_CATEGORY_MULTIPROCESSOR = 2,
117
+ /**
118
+ * A cache related metric.
119
+ */
120
+ CUPTI_METRIC_CATEGORY_CACHE = 3,
121
+ /**
122
+ * A texture related metric.
123
+ */
124
+ CUPTI_METRIC_CATEGORY_TEXTURE = 4,
125
+ /**
126
+ *A Nvlink related metric.
127
+ */
128
+ CUPTI_METRIC_CATEGORY_NVLINK = 5,
129
+ /**
130
+ *A PCIe related metric.
131
+ */
132
+ CUPTI_METRIC_CATEGORY_PCIE = 6,
133
+ CUPTI_METRIC_CATEGORY_FORCE_INT = 0x7fffffff,
134
+ } CUpti_MetricCategory;
135
+
136
+ /**
137
+ * \brief A metric evaluation mode.
138
+ *
139
+ * A metric can be evaluated per hardware instance to know the load balancing
140
+ * across instances of a domain or the metric can be evaluated in aggregate mode
141
+ * when the events involved in metric evaluation are from different event
142
+ * domains. It might be possible to evaluate some metrics in both
143
+ * modes for convenience. A metric's evaluation mode is accessed using \ref
144
+ * CUpti_MetricEvaluationMode and the CUPTI_METRIC_ATTR_EVALUATION_MODE
145
+ * attribute.
146
+ */
147
+ typedef enum {
148
+ /**
149
+ * If this bit is set, the metric can be profiled for each instance of the
150
+ * domain. The event values passed to \ref cuptiMetricGetValue can contain
151
+ * values for one instance of the domain. And \ref cuptiMetricGetValue can
152
+ * be called for each instance.
153
+ */
154
+ CUPTI_METRIC_EVALUATION_MODE_PER_INSTANCE = 1,
155
+ /**
156
+ * If this bit is set, the metric can be profiled over all instances. The
157
+ * event values passed to \ref cuptiMetricGetValue can be aggregated values
158
+ * of events for all instances of the domain.
159
+ */
160
+ CUPTI_METRIC_EVALUATION_MODE_AGGREGATE = 1 << 1,
161
+ CUPTI_METRIC_EVALUATION_MODE_FORCE_INT = 0x7fffffff,
162
+ } CUpti_MetricEvaluationMode;
163
+
164
+ /**
165
+ * \brief Kinds of metric values.
166
+ *
167
+ * Metric values can be one of several different kinds. Corresponding
168
+ * to each kind is a member of the CUpti_MetricValue union. The metric
169
+ * value returned by \ref cuptiMetricGetValue should be accessed using
170
+ * the appropriate member of that union based on its value kind.
171
+ */
172
+ typedef enum {
173
+ /**
174
+ * The metric value is a 64-bit double.
175
+ */
176
+ CUPTI_METRIC_VALUE_KIND_DOUBLE = 0,
177
+ /**
178
+ * The metric value is a 64-bit unsigned integer.
179
+ */
180
+ CUPTI_METRIC_VALUE_KIND_UINT64 = 1,
181
+ /**
182
+ * The metric value is a percentage represented by a 64-bit
183
+ * double. For example, 57.5% is represented by the value 57.5.
184
+ */
185
+ CUPTI_METRIC_VALUE_KIND_PERCENT = 2,
186
+ /**
187
+ * The metric value is a throughput represented by a 64-bit
188
+ * integer. The unit for throughput values is bytes/second.
189
+ */
190
+ CUPTI_METRIC_VALUE_KIND_THROUGHPUT = 3,
191
+ /**
192
+ * The metric value is a 64-bit signed integer.
193
+ */
194
+ CUPTI_METRIC_VALUE_KIND_INT64 = 4,
195
+ /**
196
+ * The metric value is a utilization level, as represented by
197
+ * CUpti_MetricValueUtilizationLevel.
198
+ */
199
+ CUPTI_METRIC_VALUE_KIND_UTILIZATION_LEVEL = 5,
200
+
201
+ CUPTI_METRIC_VALUE_KIND_FORCE_INT = 0x7fffffff
202
+ } CUpti_MetricValueKind;
203
+
204
+ /**
205
+ * \brief Enumeration of utilization levels for metrics values of kind
206
+ * CUPTI_METRIC_VALUE_KIND_UTILIZATION_LEVEL. Utilization values can
207
+ * vary from IDLE (0) to MAX (10) but the enumeration only provides
208
+ * specific names for a few values.
209
+ */
210
+ typedef enum {
211
+ CUPTI_METRIC_VALUE_UTILIZATION_IDLE = 0,
212
+ CUPTI_METRIC_VALUE_UTILIZATION_LOW = 2,
213
+ CUPTI_METRIC_VALUE_UTILIZATION_MID = 5,
214
+ CUPTI_METRIC_VALUE_UTILIZATION_HIGH = 8,
215
+ CUPTI_METRIC_VALUE_UTILIZATION_MAX = 10,
216
+ CUPTI_METRIC_VALUE_UTILIZATION_FORCE_INT = 0x7fffffff
217
+ } CUpti_MetricValueUtilizationLevel;
218
+
219
+ /**
220
+ * \brief Metric attributes.
221
+ *
222
+ * Metric attributes describe properties of a metric. These attributes
223
+ * can be read using \ref cuptiMetricGetAttribute.
224
+ */
225
+ typedef enum {
226
+ /**
227
+ * Metric name. Value is a null terminated const c-string.
228
+ */
229
+ CUPTI_METRIC_ATTR_NAME = 0,
230
+ /**
231
+ * Short description of metric. Value is a null terminated const c-string.
232
+ */
233
+ CUPTI_METRIC_ATTR_SHORT_DESCRIPTION = 1,
234
+ /**
235
+ * Long description of metric. Value is a null terminated const c-string.
236
+ */
237
+ CUPTI_METRIC_ATTR_LONG_DESCRIPTION = 2,
238
+ /**
239
+ * Category of the metric. Value is of type CUpti_MetricCategory.
240
+ */
241
+ CUPTI_METRIC_ATTR_CATEGORY = 3,
242
+ /**
243
+ * Value type of the metric. Value is of type CUpti_MetricValueKind.
244
+ */
245
+ CUPTI_METRIC_ATTR_VALUE_KIND = 4,
246
+ /**
247
+ * Metric evaluation mode. Value is of type CUpti_MetricEvaluationMode.
248
+ */
249
+ CUPTI_METRIC_ATTR_EVALUATION_MODE = 5,
250
+ CUPTI_METRIC_ATTR_FORCE_INT = 0x7fffffff,
251
+ } CUpti_MetricAttribute;
252
+
253
+ /**
254
+ * \brief A metric value.
255
+ *
256
+ * Metric values can be one of several different kinds. Corresponding
257
+ * to each kind is a member of the CUpti_MetricValue union. The metric
258
+ * value returned by \ref cuptiMetricGetValue should be accessed using
259
+ * the appropriate member of that union based on its value kind.
260
+ */
261
+ typedef union {
262
+ /*
263
+ * Value for CUPTI_METRIC_VALUE_KIND_DOUBLE.
264
+ */
265
+ double metricValueDouble;
266
+ /*
267
+ * Value for CUPTI_METRIC_VALUE_KIND_UINT64.
268
+ */
269
+ uint64_t metricValueUint64;
270
+ /*
271
+ * Value for CUPTI_METRIC_VALUE_KIND_INT64.
272
+ */
273
+ int64_t metricValueInt64;
274
+ /*
275
+ * Value for CUPTI_METRIC_VALUE_KIND_PERCENT. For example, 57.5% is
276
+ * represented by the value 57.5.
277
+ */
278
+ double metricValuePercent;
279
+ /*
280
+ * Value for CUPTI_METRIC_VALUE_KIND_THROUGHPUT. The unit for
281
+ * throughput values is bytes/second.
282
+ */
283
+ uint64_t metricValueThroughput;
284
+ /*
285
+ * Value for CUPTI_METRIC_VALUE_KIND_UTILIZATION_LEVEL.
286
+ */
287
+ CUpti_MetricValueUtilizationLevel metricValueUtilizationLevel;
288
+ } CUpti_MetricValue;
289
+
290
+ /**
291
+ * \brief Device class.
292
+ *
293
+ * Enumeration of device classes for metric property
294
+ * CUPTI_METRIC_PROPERTY_DEVICE_CLASS.
295
+ */
296
+ typedef enum {
297
+ CUPTI_METRIC_PROPERTY_DEVICE_CLASS_TESLA = 0,
298
+ CUPTI_METRIC_PROPERTY_DEVICE_CLASS_QUADRO = 1,
299
+ CUPTI_METRIC_PROPERTY_DEVICE_CLASS_GEFORCE = 2,
300
+ CUPTI_METRIC_PROPERTY_DEVICE_CLASS_TEGRA = 3,
301
+ } CUpti_MetricPropertyDeviceClass;
302
+
303
+ /**
304
+ * \brief Metric device properties.
305
+ *
306
+ * Metric device properties describe device properties which are needed for a metric.
307
+ * Some of these properties can be collected using cuDeviceGetAttribute.
308
+ */
309
+ typedef enum {
310
+ /*
311
+ * Number of multiprocessors on a device. This can be collected
312
+ * using value of \param CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT of
313
+ * cuDeviceGetAttribute.
314
+ */
315
+ CUPTI_METRIC_PROPERTY_MULTIPROCESSOR_COUNT,
316
+ /*
317
+ * Maximum number of warps on a multiprocessor. This can be
318
+ * collected using ratio of value of \param
319
+ * CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR and \param
320
+ * CU_DEVICE_ATTRIBUTE_WARP_SIZE of cuDeviceGetAttribute.
321
+ */
322
+ CUPTI_METRIC_PROPERTY_WARPS_PER_MULTIPROCESSOR,
323
+ /*
324
+ * GPU Time for kernel in ns. This should be profiled using CUPTI
325
+ * Activity API.
326
+ */
327
+ CUPTI_METRIC_PROPERTY_KERNEL_GPU_TIME,
328
+ /*
329
+ * Clock rate for device in KHz. This should be collected using
330
+ * value of \param CU_DEVICE_ATTRIBUTE_CLOCK_RATE of
331
+ * cuDeviceGetAttribute.
332
+ */
333
+ CUPTI_METRIC_PROPERTY_CLOCK_RATE,
334
+ /*
335
+ * Number of Frame buffer units for device. This should be collected
336
+ * using value of \param CUPTI_DEVICE_ATTRIBUTE_MAX_FRAME_BUFFERS of
337
+ * cuptiDeviceGetAttribute.
338
+ */
339
+ CUPTI_METRIC_PROPERTY_FRAME_BUFFER_COUNT,
340
+ /*
341
+ * Global memory bandwidth in KBytes/sec. This should be collected
342
+ * using value of \param CUPTI_DEVICE_ATTR_GLOBAL_MEMORY_BANDWIDTH
343
+ * of cuptiDeviceGetAttribute.
344
+ */
345
+ CUPTI_METRIC_PROPERTY_GLOBAL_MEMORY_BANDWIDTH,
346
+ /*
347
+ * PCIE link rate in Mega bits/sec. This should be collected using
348
+ * value of \param CUPTI_DEVICE_ATTR_PCIE_LINK_RATE of
349
+ * cuptiDeviceGetAttribute.
350
+ */
351
+ CUPTI_METRIC_PROPERTY_PCIE_LINK_RATE,
352
+ /*
353
+ * PCIE link width for device. This should be collected using
354
+ * value of \param CUPTI_DEVICE_ATTR_PCIE_LINK_WIDTH of
355
+ * cuptiDeviceGetAttribute.
356
+ */
357
+ CUPTI_METRIC_PROPERTY_PCIE_LINK_WIDTH,
358
+ /*
359
+ * PCIE generation for device. This should be collected using
360
+ * value of \param CUPTI_DEVICE_ATTR_PCIE_GEN of
361
+ * cuptiDeviceGetAttribute.
362
+ */
363
+ CUPTI_METRIC_PROPERTY_PCIE_GEN,
364
+ /*
365
+ * The device class. This should be collected using
366
+ * value of \param CUPTI_DEVICE_ATTR_DEVICE_CLASS of
367
+ * cuptiDeviceGetAttribute.
368
+ */
369
+ CUPTI_METRIC_PROPERTY_DEVICE_CLASS,
370
+ /*
371
+ * Peak single precision floating point operations that
372
+ * can be performed in one cycle by the device.
373
+ * This should be collected using value of
374
+ * \param CUPTI_DEVICE_ATTR_FLOP_SP_PER_CYCLE of
375
+ * cuptiDeviceGetAttribute.
376
+ */
377
+ CUPTI_METRIC_PROPERTY_FLOP_SP_PER_CYCLE,
378
+ /*
379
+ * Peak double precision floating point operations that
380
+ * can be performed in one cycle by the device.
381
+ * This should be collected using value of
382
+ * \param CUPTI_DEVICE_ATTR_FLOP_DP_PER_CYCLE of
383
+ * cuptiDeviceGetAttribute.
384
+ */
385
+ CUPTI_METRIC_PROPERTY_FLOP_DP_PER_CYCLE,
386
+ /*
387
+ * Number of L2 units on a device. This can be collected
388
+ * using value of \param CUPTI_DEVICE_ATTR_MAX_L2_UNITS of
389
+ * cuDeviceGetAttribute.
390
+ */
391
+ CUPTI_METRIC_PROPERTY_L2_UNITS,
392
+ /*
393
+ * Whether ECC support is enabled on the device. This can be
394
+ * collected using value of \param CU_DEVICE_ATTRIBUTE_ECC_ENABLED of
395
+ * cuDeviceGetAttribute.
396
+ */
397
+ CUPTI_METRIC_PROPERTY_ECC_ENABLED,
398
+ /*
399
+ * Peak half precision floating point operations that
400
+ * can be performed in one cycle by the device.
401
+ * This should be collected using value of
402
+ * \param CUPTI_DEVICE_ATTR_FLOP_HP_PER_CYCLE of
403
+ * cuptiDeviceGetAttribute.
404
+ */
405
+ CUPTI_METRIC_PROPERTY_FLOP_HP_PER_CYCLE,
406
+ /*
407
+ * NVLINK Bandwitdh for device. This should be collected
408
+ * using value of \param CUPTI_DEVICE_ATTR_GPU_CPU_NVLINK_BW of
409
+ * cuptiDeviceGetAttribute.
410
+ */
411
+ CUPTI_METRIC_PROPERTY_GPU_CPU_NVLINK_BANDWIDTH,
412
+ } CUpti_MetricPropertyID;
413
+
414
+ /**
415
+ * \brief Get the total number of metrics available on any device.
416
+ *
417
+ * Returns the total number of metrics available on any CUDA-capable
418
+ * devices.
419
+ *
420
+ * \param numMetrics Returns the number of metrics
421
+ *
422
+ * \retval CUPTI_SUCCESS
423
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numMetrics is NULL
424
+ */
425
+ CUptiResult CUPTIAPI cuptiGetNumMetrics(uint32_t *numMetrics);
426
+
427
+ /**
428
+ * \brief Get all the metrics available on any device.
429
+ *
430
+ * Returns the metric IDs in \p metricArray for all CUDA-capable
431
+ * devices. The size of the \p metricArray buffer is given by \p
432
+ * *arraySizeBytes. The size of the \p metricArray buffer must be at
433
+ * least \p numMetrics * sizeof(CUpti_MetricID) or all metric IDs will
434
+ * not be returned. The value returned in \p *arraySizeBytes contains
435
+ * the number of bytes returned in \p metricArray.
436
+ *
437
+ * \param arraySizeBytes The size of \p metricArray in bytes, and
438
+ * returns the number of bytes written to \p metricArray
439
+ * \param metricArray Returns the IDs of the metrics
440
+ *
441
+ * \retval CUPTI_SUCCESS
442
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p arraySizeBytes or
443
+ * \p metricArray are NULL
444
+ */
445
+ CUptiResult CUPTIAPI cuptiEnumMetrics(size_t *arraySizeBytes,
446
+ CUpti_MetricID *metricArray);
447
+
448
+ /**
449
+ * \brief Get the number of metrics for a device.
450
+ *
451
+ * Returns the number of metrics available for a device.
452
+ *
453
+ * \param device The CUDA device
454
+ * \param numMetrics Returns the number of metrics available for the
455
+ * device
456
+ *
457
+ * \retval CUPTI_SUCCESS
458
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
459
+ * \retval CUPTI_ERROR_INVALID_DEVICE
460
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numMetrics is NULL
461
+ */
462
+ CUptiResult CUPTIAPI cuptiDeviceGetNumMetrics(CUdevice device,
463
+ uint32_t *numMetrics);
464
+
465
+ /**
466
+ * \brief Get the metrics for a device.
467
+ *
468
+ * Returns the metric IDs in \p metricArray for a device. The size of
469
+ * the \p metricArray buffer is given by \p *arraySizeBytes. The size
470
+ * of the \p metricArray buffer must be at least \p numMetrics *
471
+ * sizeof(CUpti_MetricID) or else all metric IDs will not be
472
+ * returned. The value returned in \p *arraySizeBytes contains the
473
+ * number of bytes returned in \p metricArray.
474
+ *
475
+ * \param device The CUDA device
476
+ * \param arraySizeBytes The size of \p metricArray in bytes, and
477
+ * returns the number of bytes written to \p metricArray
478
+ * \param metricArray Returns the IDs of the metrics for the device
479
+ *
480
+ * \retval CUPTI_SUCCESS
481
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
482
+ * \retval CUPTI_ERROR_INVALID_DEVICE
483
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p arraySizeBytes or
484
+ * \p metricArray are NULL
485
+ */
486
+ CUptiResult CUPTIAPI cuptiDeviceEnumMetrics(CUdevice device,
487
+ size_t *arraySizeBytes,
488
+ CUpti_MetricID *metricArray);
489
+
490
+ /**
491
+ * \brief Get a metric attribute.
492
+ *
493
+ * Returns a metric attribute in \p *value. The size of the \p
494
+ * value buffer is given by \p *valueSize. The value returned in \p
495
+ * *valueSize contains the number of bytes returned in \p value.
496
+ *
497
+ * If the attribute value is a c-string that is longer than \p
498
+ * *valueSize, then only the first \p *valueSize characters will be
499
+ * returned and there will be no terminating null byte.
500
+ *
501
+ * \param metric ID of the metric
502
+ * \param attrib The metric attribute to read
503
+ * \param valueSize The size of the \p value buffer in bytes, and
504
+ * returns the number of bytes written to \p value
505
+ * \param value Returns the attribute's value
506
+ *
507
+ * \retval CUPTI_SUCCESS
508
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
509
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
510
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
511
+ * is NULL, or if \p attrib is not a metric attribute
512
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string
513
+ * attribute values, indicates that the \p value buffer is too small
514
+ * to hold the attribute value.
515
+ */
516
+ CUptiResult CUPTIAPI cuptiMetricGetAttribute(CUpti_MetricID metric,
517
+ CUpti_MetricAttribute attrib,
518
+ size_t *valueSize,
519
+ void *value);
520
+
521
+ /**
522
+ * \brief Find an metric by name.
523
+ *
524
+ * Find a metric by name and return the metric ID in \p *metric.
525
+ *
526
+ * \param device The CUDA device
527
+ * \param metricName The name of metric to find
528
+ * \param metric Returns the ID of the found metric or undefined if
529
+ * unable to find the metric
530
+ *
531
+ * \retval CUPTI_SUCCESS
532
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
533
+ * \retval CUPTI_ERROR_INVALID_DEVICE
534
+ * \retval CUPTI_ERROR_INVALID_METRIC_NAME if unable to find a metric
535
+ * with name \p metricName. In this case \p *metric is undefined
536
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p metricName or \p
537
+ * metric are NULL.
538
+ */
539
+ CUptiResult CUPTIAPI cuptiMetricGetIdFromName(CUdevice device,
540
+ const char *metricName,
541
+ CUpti_MetricID *metric);
542
+
543
+ /**
544
+ * \brief Get number of events required to calculate a metric.
545
+ *
546
+ * Returns the number of events in \p numEvents that are required to
547
+ * calculate a metric.
548
+ *
549
+ * \param metric ID of the metric
550
+ * \param numEvents Returns the number of events required for the metric
551
+ *
552
+ * \retval CUPTI_SUCCESS
553
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
554
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
555
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numEvents is NULL
556
+ */
557
+ CUptiResult CUPTIAPI cuptiMetricGetNumEvents(CUpti_MetricID metric,
558
+ uint32_t *numEvents);
559
+
560
+ /**
561
+ * \brief Get the events required to calculating a metric.
562
+ *
563
+ * Gets the event IDs in \p eventIdArray required to calculate a \p
564
+ * metric. The size of the \p eventIdArray buffer is given by \p
565
+ * *eventIdArraySizeBytes and must be at least \p numEvents *
566
+ * sizeof(CUpti_EventID) or all events will not be returned. The value
567
+ * returned in \p *eventIdArraySizeBytes contains the number of bytes
568
+ * returned in \p eventIdArray.
569
+ *
570
+ * \param metric ID of the metric
571
+ * \param eventIdArraySizeBytes The size of \p eventIdArray in bytes,
572
+ * and returns the number of bytes written to \p eventIdArray
573
+ * \param eventIdArray Returns the IDs of the events required to
574
+ * calculate \p metric
575
+ *
576
+ * \retval CUPTI_SUCCESS
577
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
578
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
579
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventIdArraySizeBytes or \p
580
+ * eventIdArray are NULL.
581
+ */
582
+ CUptiResult CUPTIAPI cuptiMetricEnumEvents(CUpti_MetricID metric,
583
+ size_t *eventIdArraySizeBytes,
584
+ CUpti_EventID *eventIdArray);
585
+
586
+ /**
587
+ * \brief Get number of properties required to calculate a metric.
588
+ *
589
+ * Returns the number of properties in \p numProp that are required to
590
+ * calculate a metric.
591
+ *
592
+ * \param metric ID of the metric
593
+ * \param numProp Returns the number of properties required for the
594
+ * metric
595
+ *
596
+ * \retval CUPTI_SUCCESS
597
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
598
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
599
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numProp is NULL
600
+ */
601
+ CUptiResult CUPTIAPI cuptiMetricGetNumProperties(CUpti_MetricID metric,
602
+ uint32_t *numProp);
603
+
604
+ /**
605
+ * \brief Get the properties required to calculating a metric.
606
+ *
607
+ * Gets the property IDs in \p propIdArray required to calculate a \p
608
+ * metric. The size of the \p propIdArray buffer is given by \p
609
+ * *propIdArraySizeBytes and must be at least \p numProp *
610
+ * sizeof(CUpti_DeviceAttribute) or all properties will not be
611
+ * returned. The value returned in \p *propIdArraySizeBytes contains
612
+ * the number of bytes returned in \p propIdArray.
613
+ *
614
+ * \param metric ID of the metric
615
+ * \param propIdArraySizeBytes The size of \p propIdArray in bytes,
616
+ * and returns the number of bytes written to \p propIdArray
617
+ * \param propIdArray Returns the IDs of the properties required to
618
+ * calculate \p metric
619
+ *
620
+ * \retval CUPTI_SUCCESS
621
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
622
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
623
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p propIdArraySizeBytes or \p
624
+ * propIdArray are NULL.
625
+ */
626
+ CUptiResult CUPTIAPI cuptiMetricEnumProperties(CUpti_MetricID metric,
627
+ size_t *propIdArraySizeBytes,
628
+ CUpti_MetricPropertyID *propIdArray);
629
+
630
+
631
+ /**
632
+ * \brief For a metric get the groups of events that must be collected
633
+ * in the same pass.
634
+ *
635
+ * For a metric get the groups of events that must be collected in the
636
+ * same pass to ensure that the metric is calculated correctly. If the
637
+ * events are not collected as specified then the metric value may be
638
+ * inaccurate.
639
+ *
640
+ * The function returns NULL if a metric does not have any required
641
+ * event group. In this case the events needed for the metric can be
642
+ * grouped in any manner for collection.
643
+ *
644
+ * \param context The context for event collection
645
+ * \param metric The metric ID
646
+ * \param eventGroupSets Returns a CUpti_EventGroupSets object that
647
+ * indicates the events that must be collected in the same pass to
648
+ * ensure the metric is calculated correctly. Returns NULL if no
649
+ * grouping is required for metric
650
+ * \retval CUPTI_SUCCESS
651
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
652
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
653
+ */
654
+ CUptiResult CUPTIAPI cuptiMetricGetRequiredEventGroupSets(CUcontext context,
655
+ CUpti_MetricID metric,
656
+ CUpti_EventGroupSets **eventGroupSets);
657
+
658
+ /**
659
+ * \brief For a set of metrics, get the grouping that indicates the
660
+ * number of passes and the event groups necessary to collect the
661
+ * events required for those metrics.
662
+ *
663
+ * For a set of metrics, get the grouping that indicates the number of
664
+ * passes and the event groups necessary to collect the events
665
+ * required for those metrics.
666
+ *
667
+ * \see cuptiEventGroupSetsCreate for details on event group set
668
+ * creation.
669
+ *
670
+ * \param context The context for event collection
671
+ * \param metricIdArraySizeBytes Size of the metricIdArray in bytes
672
+ * \param metricIdArray Array of metric IDs
673
+ * \param eventGroupPasses Returns a CUpti_EventGroupSets object that
674
+ * indicates the number of passes required to collect the events and
675
+ * the events to collect on each pass
676
+ *
677
+ * \retval CUPTI_SUCCESS
678
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
679
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
680
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
681
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p metricIdArray or
682
+ * \p eventGroupPasses is NULL
683
+ */
684
+ CUptiResult CUPTIAPI cuptiMetricCreateEventGroupSets(CUcontext context,
685
+ size_t metricIdArraySizeBytes,
686
+ CUpti_MetricID *metricIdArray,
687
+ CUpti_EventGroupSets **eventGroupPasses);
688
+
689
+ /**
690
+ * \brief Calculate the value for a metric.
691
+ *
692
+ * Use the events collected for a metric to calculate the metric
693
+ * value. Metric value evaluation depends on the evaluation mode
694
+ * \ref CUpti_MetricEvaluationMode that the metric supports.
695
+ * If a metric has evaluation mode as CUPTI_METRIC_EVALUATION_MODE_PER_INSTANCE,
696
+ * then it assumes that the input event value is for one domain instance.
697
+ * If a metric has evaluation mode as CUPTI_METRIC_EVALUATION_MODE_AGGREGATE,
698
+ * it assumes that input event values are
699
+ * normalized to represent all domain instances on a device. For the
700
+ * most accurate metric collection, the events required for the metric
701
+ * should be collected for all profiled domain instances. For example,
702
+ * to collect all instances of an event, set the
703
+ * CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES attribute on
704
+ * the group containing the event to 1. The normalized value for the
705
+ * event is then: (\p sum_event_values * \p totalInstanceCount) / \p
706
+ * instanceCount, where \p sum_event_values is the summation of the
707
+ * event values across all profiled domain instances, \p
708
+ * totalInstanceCount is obtained from querying
709
+ * CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT and \p instanceCount
710
+ * is obtained from querying CUPTI_EVENT_GROUP_ATTR_INSTANCE_COUNT (or
711
+ * CUPTI_EVENT_DOMAIN_ATTR_INSTANCE_COUNT).
712
+ *
713
+ * \param device The CUDA device that the metric is being calculated for
714
+ * \param metric The metric ID
715
+ * \param eventIdArraySizeBytes The size of \p eventIdArray in bytes
716
+ * \param eventIdArray The event IDs required to calculate \p metric
717
+ * \param eventValueArraySizeBytes The size of \p eventValueArray in bytes
718
+ * \param eventValueArray The normalized event values required to
719
+ * calculate \p metric. The values must be order to match the order of
720
+ * events in \p eventIdArray
721
+ * \param timeDuration The duration over which the events were
722
+ * collected, in ns
723
+ * \param metricValue Returns the value for the metric
724
+ *
725
+ * \retval CUPTI_SUCCESS
726
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
727
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
728
+ * \retval CUPTI_ERROR_INVALID_OPERATION
729
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT if the
730
+ * eventIdArray does not contain all the events needed for metric
731
+ * \retval CUPTI_ERROR_INVALID_EVENT_VALUE if any of the
732
+ * event values required for the metric is CUPTI_EVENT_OVERFLOW
733
+ * \retval CUPTI_ERROR_INVALID_METRIC_VALUE if the computed metric value
734
+ * cannot be represented in the metric's value type. For example,
735
+ * if the metric value type is unsigned and the computed metric value is negative
736
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p metricValue,
737
+ * \p eventIdArray or \p eventValueArray is NULL
738
+ */
739
+ CUptiResult CUPTIAPI cuptiMetricGetValue(CUdevice device,
740
+ CUpti_MetricID metric,
741
+ size_t eventIdArraySizeBytes,
742
+ CUpti_EventID *eventIdArray,
743
+ size_t eventValueArraySizeBytes,
744
+ uint64_t *eventValueArray,
745
+ uint64_t timeDuration,
746
+ CUpti_MetricValue *metricValue);
747
+
748
+ /**
749
+ * \brief Calculate the value for a metric.
750
+ *
751
+ * Use the events and properties collected for a metric to calculate
752
+ * the metric value. Metric value evaluation depends on the evaluation
753
+ * mode \ref CUpti_MetricEvaluationMode that the metric supports. If
754
+ * a metric has evaluation mode as
755
+ * CUPTI_METRIC_EVALUATION_MODE_PER_INSTANCE, then it assumes that the
756
+ * input event value is for one domain instance. If a metric has
757
+ * evaluation mode as CUPTI_METRIC_EVALUATION_MODE_AGGREGATE, it
758
+ * assumes that input event values are normalized to represent all
759
+ * domain instances on a device. For the most accurate metric
760
+ * collection, the events required for the metric should be collected
761
+ * for all profiled domain instances. For example, to collect all
762
+ * instances of an event, set the
763
+ * CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES attribute on
764
+ * the group containing the event to 1. The normalized value for the
765
+ * event is then: (\p sum_event_values * \p totalInstanceCount) / \p
766
+ * instanceCount, where \p sum_event_values is the summation of the
767
+ * event values across all profiled domain instances, \p
768
+ * totalInstanceCount is obtained from querying
769
+ * CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT and \p instanceCount
770
+ * is obtained from querying CUPTI_EVENT_GROUP_ATTR_INSTANCE_COUNT (or
771
+ * CUPTI_EVENT_DOMAIN_ATTR_INSTANCE_COUNT).
772
+ *
773
+ * \param metric The metric ID
774
+ * \param eventIdArraySizeBytes The size of \p eventIdArray in bytes
775
+ * \param eventIdArray The event IDs required to calculate \p metric
776
+ * \param eventValueArraySizeBytes The size of \p eventValueArray in bytes
777
+ * \param eventValueArray The normalized event values required to
778
+ * calculate \p metric. The values must be order to match the order of
779
+ * events in \p eventIdArray
780
+ * \param propIdArraySizeBytes The size of \p propIdArray in bytes
781
+ * \param propIdArray The metric property IDs required to calculate \p metric
782
+ * \param propValueArraySizeBytes The size of \p propValueArray in bytes
783
+ * \param propValueArray The metric property values required to
784
+ * calculate \p metric. The values must be order to match the order of
785
+ * metric properties in \p propIdArray
786
+ * \param metricValue Returns the value for the metric
787
+ *
788
+ * \retval CUPTI_SUCCESS
789
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
790
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
791
+ * \retval CUPTI_ERROR_INVALID_OPERATION
792
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT if the
793
+ * eventIdArray does not contain all the events needed for metric
794
+ * \retval CUPTI_ERROR_INVALID_EVENT_VALUE if any of the
795
+ * event values required for the metric is CUPTI_EVENT_OVERFLOW
796
+ * \retval CUPTI_ERROR_NOT_COMPATIBLE if the computed metric value
797
+ * cannot be represented in the metric's value type. For example,
798
+ * if the metric value type is unsigned and the computed metric value is negative
799
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p metricValue,
800
+ * \p eventIdArray or \p eventValueArray is NULL
801
+ */
802
+ CUptiResult CUPTIAPI cuptiMetricGetValue2(CUpti_MetricID metric,
803
+ size_t eventIdArraySizeBytes,
804
+ CUpti_EventID *eventIdArray,
805
+ size_t eventValueArraySizeBytes,
806
+ uint64_t *eventValueArray,
807
+ size_t propIdArraySizeBytes,
808
+ CUpti_MetricPropertyID *propIdArray,
809
+ size_t propValueArraySizeBytes,
810
+ uint64_t *propValueArray,
811
+ CUpti_MetricValue *metricValue);
812
+
813
+ /** @} */ /* END CUPTI_METRIC_API */
814
+
815
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
816
+ #pragma GCC visibility pop
817
+ #endif
818
+
819
+ #if defined(__cplusplus)
820
+ }
821
+ #endif
822
+
823
+ #endif /*_CUPTI_METRIC_H_*/
824
+
825
+
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_nvtx_cbid.h ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2013-2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
51
+ #pragma GCC visibility push(default)
52
+ #endif
53
+
54
+ typedef enum {
55
+ CUPTI_CBID_NVTX_INVALID = 0,
56
+ CUPTI_CBID_NVTX_nvtxMarkA = 1,
57
+ CUPTI_CBID_NVTX_nvtxMarkW = 2,
58
+ CUPTI_CBID_NVTX_nvtxMarkEx = 3,
59
+ CUPTI_CBID_NVTX_nvtxRangeStartA = 4,
60
+ CUPTI_CBID_NVTX_nvtxRangeStartW = 5,
61
+ CUPTI_CBID_NVTX_nvtxRangeStartEx = 6,
62
+ CUPTI_CBID_NVTX_nvtxRangeEnd = 7,
63
+ CUPTI_CBID_NVTX_nvtxRangePushA = 8,
64
+ CUPTI_CBID_NVTX_nvtxRangePushW = 9,
65
+ CUPTI_CBID_NVTX_nvtxRangePushEx = 10,
66
+ CUPTI_CBID_NVTX_nvtxRangePop = 11,
67
+ CUPTI_CBID_NVTX_nvtxNameCategoryA = 12,
68
+ CUPTI_CBID_NVTX_nvtxNameCategoryW = 13,
69
+ CUPTI_CBID_NVTX_nvtxNameOsThreadA = 14,
70
+ CUPTI_CBID_NVTX_nvtxNameOsThreadW = 15,
71
+ CUPTI_CBID_NVTX_nvtxNameCuDeviceA = 16,
72
+ CUPTI_CBID_NVTX_nvtxNameCuDeviceW = 17,
73
+ CUPTI_CBID_NVTX_nvtxNameCuContextA = 18,
74
+ CUPTI_CBID_NVTX_nvtxNameCuContextW = 19,
75
+ CUPTI_CBID_NVTX_nvtxNameCuStreamA = 20,
76
+ CUPTI_CBID_NVTX_nvtxNameCuStreamW = 21,
77
+ CUPTI_CBID_NVTX_nvtxNameCuEventA = 22,
78
+ CUPTI_CBID_NVTX_nvtxNameCuEventW = 23,
79
+ CUPTI_CBID_NVTX_nvtxNameCudaDeviceA = 24,
80
+ CUPTI_CBID_NVTX_nvtxNameCudaDeviceW = 25,
81
+ CUPTI_CBID_NVTX_nvtxNameCudaStreamA = 26,
82
+ CUPTI_CBID_NVTX_nvtxNameCudaStreamW = 27,
83
+ CUPTI_CBID_NVTX_nvtxNameCudaEventA = 28,
84
+ CUPTI_CBID_NVTX_nvtxNameCudaEventW = 29,
85
+ CUPTI_CBID_NVTX_nvtxDomainMarkEx = 30,
86
+ CUPTI_CBID_NVTX_nvtxDomainRangeStartEx = 31,
87
+ CUPTI_CBID_NVTX_nvtxDomainRangeEnd = 32,
88
+ CUPTI_CBID_NVTX_nvtxDomainRangePushEx = 33,
89
+ CUPTI_CBID_NVTX_nvtxDomainRangePop = 34,
90
+ CUPTI_CBID_NVTX_nvtxDomainResourceCreate = 35,
91
+ CUPTI_CBID_NVTX_nvtxDomainResourceDestroy = 36,
92
+ CUPTI_CBID_NVTX_nvtxDomainNameCategoryA = 37,
93
+ CUPTI_CBID_NVTX_nvtxDomainNameCategoryW = 38,
94
+ CUPTI_CBID_NVTX_nvtxDomainRegisterStringA = 39,
95
+ CUPTI_CBID_NVTX_nvtxDomainRegisterStringW = 40,
96
+ CUPTI_CBID_NVTX_nvtxDomainCreateA = 41,
97
+ CUPTI_CBID_NVTX_nvtxDomainCreateW = 42,
98
+ CUPTI_CBID_NVTX_nvtxDomainDestroy = 43,
99
+ CUPTI_CBID_NVTX_nvtxDomainSyncUserCreate = 44,
100
+ CUPTI_CBID_NVTX_nvtxDomainSyncUserDestroy = 45,
101
+ CUPTI_CBID_NVTX_nvtxDomainSyncUserAcquireStart = 46,
102
+ CUPTI_CBID_NVTX_nvtxDomainSyncUserAcquireFailed = 47,
103
+ CUPTI_CBID_NVTX_nvtxDomainSyncUserAcquireSuccess = 48,
104
+ CUPTI_CBID_NVTX_nvtxDomainSyncUserReleasing = 49,
105
+ CUPTI_CBID_NVTX_SIZE,
106
+ CUPTI_CBID_NVTX_FORCE_INT = 0x7fffffff
107
+ } CUpti_nvtx_api_trace_cbid;
108
+
109
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
110
+ #pragma GCC visibility pop
111
+ #endif
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_pcsampling.h ADDED
@@ -0,0 +1,950 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2020-2022 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_PCSAMPLING_H_)
51
+ #define _CUPTI_PCSAMPLING_H_
52
+
53
+ #include <cuda.h>
54
+ #include <stdint.h>
55
+ #include <stddef.h>
56
+ #include "cupti_result.h"
57
+
58
+ #ifndef CUPTIAPI
59
+ #ifdef _WIN32
60
+ #define CUPTIAPI __stdcall
61
+ #else
62
+ #define CUPTIAPI
63
+ #endif
64
+ #endif
65
+
66
+ #define ACTIVITY_RECORD_ALIGNMENT 8
67
+ #if defined(_WIN32) // Windows 32- and 64-bit
68
+ #define START_PACKED_ALIGNMENT __pragma(pack(push,1)) // exact fit - no padding
69
+ #define PACKED_ALIGNMENT __declspec(align(ACTIVITY_RECORD_ALIGNMENT))
70
+ #define END_PACKED_ALIGNMENT __pragma(pack(pop))
71
+ #elif defined(__GNUC__) // GCC
72
+ #define START_PACKED_ALIGNMENT
73
+ #define PACKED_ALIGNMENT __attribute__ ((__packed__)) __attribute__ ((aligned (ACTIVITY_RECORD_ALIGNMENT)))
74
+ #define END_PACKED_ALIGNMENT
75
+ #else // all other compilers
76
+ #define START_PACKED_ALIGNMENT
77
+ #define PACKED_ALIGNMENT
78
+ #define END_PACKED_ALIGNMENT
79
+ #endif
80
+
81
+ #if defined(__cplusplus)
82
+ extern "C" {
83
+ #endif
84
+
85
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
86
+ #pragma GCC visibility push(default)
87
+ #endif
88
+
89
+ /**
90
+ * \defgroup CUPTI_PCSAMPLING_API CUPTI PC Sampling API
91
+ * Functions, types, and enums that implement the CUPTI PC Sampling API.
92
+ * @{
93
+ */
94
+
95
+ #ifndef CUPTI_PCSAMPLING_STRUCT_SIZE
96
+ #define CUPTI_PCSAMPLING_STRUCT_SIZE(type_, lastfield_) (offsetof(type_, lastfield_) + sizeof(((type_*)0)->lastfield_))
97
+ #endif
98
+
99
+ #ifndef CUPTI_STALL_REASON_STRING_SIZE
100
+ #define CUPTI_STALL_REASON_STRING_SIZE 128
101
+ #endif
102
+
103
+ /**
104
+ * \brief PC Sampling collection mode
105
+ */
106
+ typedef enum
107
+ {
108
+ /**
109
+ * INVALID Value
110
+ */
111
+ CUPTI_PC_SAMPLING_COLLECTION_MODE_INVALID = 0,
112
+ /**
113
+ * Continuous mode. Kernels are not serialized in this mode.
114
+ */
115
+ CUPTI_PC_SAMPLING_COLLECTION_MODE_CONTINUOUS = 1,
116
+ /**
117
+ * Serialized mode. Kernels are serialized in this mode.
118
+ */
119
+ CUPTI_PC_SAMPLING_COLLECTION_MODE_KERNEL_SERIALIZED = 2,
120
+ } CUpti_PCSamplingCollectionMode;
121
+
122
+ /**
123
+ * \brief PC Sampling stall reasons
124
+ */
125
+ typedef struct PACKED_ALIGNMENT
126
+ {
127
+ /**
128
+ * [r] Collected stall reason index
129
+ */
130
+ uint32_t pcSamplingStallReasonIndex;
131
+ /**
132
+ * [r] Number of times the PC was sampled with the stallReason.
133
+ */
134
+ uint32_t samples;
135
+ } CUpti_PCSamplingStallReason;
136
+
137
+ /**
138
+ * \brief PC Sampling data
139
+ */
140
+ typedef struct PACKED_ALIGNMENT
141
+ {
142
+ /**
143
+ * [w] Size of the data structure.
144
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
145
+ * available in the structure. Used to preserve backward compatibility.
146
+ */
147
+ size_t size;
148
+ /**
149
+ * [r] Unique cubin id
150
+ */
151
+ uint64_t cubinCrc;
152
+ /**
153
+ * [r] PC offset
154
+ */
155
+ uint64_t pcOffset;
156
+ /**
157
+ * The function's unique symbol index in the module.
158
+ */
159
+ uint32_t functionIndex;
160
+ /**
161
+ * Padding
162
+ */
163
+ uint32_t pad;
164
+ /**
165
+ * [r] The function name. This name string might be shared across all the records
166
+ * including records from activity APIs representing the same function, and so it should not be
167
+ * modified or freed until post processing of all the records is done. Once done, it is user’s responsibility to
168
+ * free the memory using free() function.
169
+ */
170
+ char* functionName;
171
+ /**
172
+ * [r] Collected stall reason count
173
+ */
174
+ size_t stallReasonCount;
175
+ /**
176
+ * [r] Stall reason id
177
+ * Total samples
178
+ */
179
+ CUpti_PCSamplingStallReason *stallReason;
180
+ } CUpti_PCSamplingPCData;
181
+
182
+ /**
183
+ * \brief PC Sampling output data format
184
+ */
185
+ typedef enum
186
+ {
187
+ CUPTI_PC_SAMPLING_OUTPUT_DATA_FORMAT_INVALID = 0,
188
+ /**
189
+ * HW buffer data will be parsed during collection of data
190
+ */
191
+ CUPTI_PC_SAMPLING_OUTPUT_DATA_FORMAT_PARSED = 1,
192
+ } CUpti_PCSamplingOutputDataFormat;
193
+
194
+ /**
195
+ * \brief Collected PC Sampling data
196
+ *
197
+ */
198
+ typedef struct PACKED_ALIGNMENT
199
+ {
200
+ /**
201
+ * [w] Size of the data structure.
202
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
203
+ * available in the structure. Used to preserve backward compatibility.
204
+ */
205
+ size_t size;
206
+ /**
207
+ * [w] Number of PCs to be collected
208
+ */
209
+ size_t collectNumPcs;
210
+ /**
211
+ * [r] Number of samples collected across all PCs.
212
+ * It includes samples for user modules, samples for non-user kernels and dropped samples.
213
+ * It includes counts for all non selected stall reasons.
214
+ * CUPTI does not provide PC records for non-user kernels.
215
+ * CUPTI does not provide PC records for instructions for which all selected stall reason metrics counts are zero.
216
+ */
217
+ uint64_t totalSamples;
218
+ /**
219
+ * [r] Number of samples that were dropped by hardware due to backpressure/overflow.
220
+ */
221
+ uint64_t droppedSamples;
222
+ /**
223
+ * [r] Number of PCs collected
224
+ */
225
+ size_t totalNumPcs;
226
+ /**
227
+ * [r] Number of PCs available for collection
228
+ */
229
+ size_t remainingNumPcs;
230
+ /**
231
+ * [r] Unique identifier for each range.
232
+ * Data collected across multiple ranges in multiple buffers can be identified using range id.
233
+ */
234
+ uint64_t rangeId;
235
+ /**
236
+ * [r] Profiled PC data
237
+ * This data struct should have enough memory to collect number of PCs mentioned in \brief collectNumPcs
238
+ */
239
+ CUpti_PCSamplingPCData *pPcData;
240
+ /**
241
+ * [r] Number of samples collected across all non user kernels PCs.
242
+ * It includes samples for non-user kernels.
243
+ * It includes counts for all non selected stall reasons as well.
244
+ * CUPTI does not provide PC records for non-user kernels.
245
+ */
246
+ uint64_t nonUsrKernelsTotalSamples;
247
+
248
+ /**
249
+ * [r] Status of the hardware buffer.
250
+ * CUPTI returns the error code CUPTI_ERROR_OUT_OF_MEMORY when hardware buffer is full.
251
+ * When hardware buffer is full, user will get pc data as 0. To mitigate this issue, one or more of the below options can be tried:
252
+ * 1. Increase the hardware buffer size using the attribute CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_HARDWARE_BUFFER_SIZE
253
+ * 2. Decrease the thread sleep span using the attribute CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_WORKER_THREAD_PERIODIC_SLEEP_SPAN
254
+ * 3. Decrease the sampling frequency using the attribute CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SAMPLING_PERIOD
255
+ */
256
+ uint8_t hardwareBufferFull;
257
+ } CUpti_PCSamplingData;
258
+
259
+ /**
260
+ * \brief PC Sampling configuration attributes
261
+ *
262
+ * PC Sampling configuration attribute types. These attributes can be read
263
+ * using \ref cuptiPCSamplingGetConfigurationAttribute and can be written
264
+ * using \ref cuptiPCSamplingSetConfigurationAttribute. Attributes marked
265
+ * [r] can only be read using \ref cuptiPCSamplingGetConfigurationAttribute
266
+ * [w] can only be written using \ref cuptiPCSamplingSetConfigurationAttribute
267
+ * [rw] can be read using \ref cuptiPCSamplingGetConfigurationAttribute and
268
+ * written using \ref cuptiPCSamplingSetConfigurationAttribute
269
+ */
270
+ typedef enum
271
+ {
272
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_INVALID = 0,
273
+ /**
274
+ * [rw] Sampling period for PC Sampling.
275
+ * DEFAULT - CUPTI defined value based on number of SMs
276
+ * Valid values for the sampling
277
+ * periods are between 5 to 31 both inclusive. This will set the
278
+ * sampling period to (2^samplingPeriod) cycles.
279
+ * For e.g. for sampling period = 5 to 31, cycles = 32, 64, 128,..., 2^31
280
+ * Value is a uint32_t
281
+ */
282
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SAMPLING_PERIOD = 1,
283
+ /**
284
+ * [w] Number of stall reasons to collect.
285
+ * DEFAULT - All stall reasons will be collected
286
+ * Value is a size_t
287
+ * [w] Stall reasons to collect
288
+ * DEFAULT - All stall reasons will be collected
289
+ * Input value should be a pointer pointing to array of stall reason indexes
290
+ * containing all the stall reason indexes to collect.
291
+ */
292
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_STALL_REASON = 2,
293
+ /**
294
+ * [rw] Size of SW buffer for raw PC counter data downloaded from HW buffer
295
+ * DEFAULT - 1 MB, which can accommodate approximately 5500 PCs
296
+ * with all stall reasons
297
+ * Approximately it takes 16 Bytes (and some fixed size memory)
298
+ * to accommodate one PC with one stall reason
299
+ * For e.g. 1 PC with 1 stall reason = 32 Bytes
300
+ * 1 PC with 2 stall reason = 48 Bytes
301
+ * 1 PC with 4 stall reason = 96 Bytes
302
+ * Value is a size_t
303
+ */
304
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SCRATCH_BUFFER_SIZE = 3,
305
+ /**
306
+ * [rw] Size of HW buffer in bytes
307
+ * DEFAULT - 512 MB
308
+ * If sampling period is too less, HW buffer can overflow
309
+ * and drop PC data
310
+ * Value is a size_t
311
+ */
312
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_HARDWARE_BUFFER_SIZE = 4,
313
+ /**
314
+ * [rw] PC Sampling collection mode
315
+ * DEFAULT - CUPTI_PC_SAMPLING_COLLECTION_MODE_CONTINUOUS
316
+ * Input value should be of type \ref CUpti_PCSamplingCollectionMode.
317
+ */
318
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_COLLECTION_MODE = 5,
319
+ /**
320
+ * [rw] Control over PC Sampling data collection range
321
+ * Default - 0
322
+ * 1 - Allows user to start and stop PC Sampling using APIs -
323
+ * \ref cuptiPCSamplingStart() - Start PC Sampling
324
+ * \ref cuptiPCSamplingStop() - Stop PC Sampling
325
+ * Value is a uint32_t
326
+ */
327
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL = 6,
328
+ /**
329
+ * [w] Value for output data format
330
+ * Default - CUPTI_PC_SAMPLING_OUTPUT_DATA_FORMAT_PARSED
331
+ * Input value should be of type \ref CUpti_PCSamplingOutputDataFormat.
332
+ */
333
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_OUTPUT_DATA_FORMAT = 7,
334
+ /**
335
+ * [w] Data buffer to hold collected PC Sampling data PARSED_DATA
336
+ * Default - none.
337
+ * Buffer type is void * which can point to PARSED_DATA
338
+ * Refer \ref CUpti_PCSamplingData for buffer format for PARSED_DATA
339
+ */
340
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SAMPLING_DATA_BUFFER = 8,
341
+ /**
342
+ * [rw] Control sleep time of the worker threads created by CUPTI for various PC sampling operations.
343
+ * CUPTI creates multiple worker threads to offload certain operations to these threads. This includes decoding of HW data to
344
+ * the CUPTI PC sampling data and correlating PC data to SASS instructions. CUPTI wakes up these threads periodically.
345
+ * Default - 100 milliseconds.
346
+ * Value is a uint32_t
347
+ */
348
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_WORKER_THREAD_PERIODIC_SLEEP_SPAN = 9,
349
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_FORCE_INT = 0x7fffffff,
350
+ } CUpti_PCSamplingConfigurationAttributeType;
351
+
352
+ /**
353
+ * \brief PC sampling configuration information structure
354
+ *
355
+ * This structure provides \ref CUpti_PCSamplingConfigurationAttributeType which can be configured
356
+ * or queried for PC sampling configuration
357
+ */
358
+ typedef struct
359
+ {
360
+ /**
361
+ * Refer \ref CUpti_PCSamplingConfigurationAttributeType for all supported attribute types
362
+ */
363
+ CUpti_PCSamplingConfigurationAttributeType attributeType;
364
+ /*
365
+ * Configure or query status for \p attributeType
366
+ * CUPTI_SUCCESS for valid \p attributeType and \p attributeData
367
+ * CUPTI_ERROR_INVALID_OPERATION if \p attributeData is not valid
368
+ * CUPTI_ERROR_INVALID_PARAMETER if \p attributeType is not valid
369
+ */
370
+ CUptiResult attributeStatus;
371
+ union
372
+ {
373
+ /**
374
+ * Invalid Value
375
+ */
376
+ struct
377
+ {
378
+ uint64_t data[3];
379
+ } invalidData;
380
+ /**
381
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SAMPLING_PERIOD
382
+ */
383
+ struct
384
+ {
385
+ uint32_t samplingPeriod;
386
+ } samplingPeriodData;
387
+ /**
388
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_STALL_REASON
389
+ */
390
+ struct
391
+ {
392
+ size_t stallReasonCount;
393
+ uint32_t *pStallReasonIndex;
394
+ } stallReasonData;
395
+ /**
396
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SCRATCH_BUFFER_SIZE
397
+ */
398
+ struct
399
+ {
400
+ size_t scratchBufferSize;
401
+ } scratchBufferSizeData;
402
+ /**
403
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_HARDWARE_BUFFER_SIZE
404
+ */
405
+ struct
406
+ {
407
+ size_t hardwareBufferSize;
408
+ } hardwareBufferSizeData;
409
+ /**
410
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_COLLECTION_MODE
411
+ */
412
+ struct
413
+ {
414
+ CUpti_PCSamplingCollectionMode collectionMode;
415
+ } collectionModeData;
416
+ /**
417
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL
418
+ */
419
+ struct
420
+ {
421
+ uint32_t enableStartStopControl;
422
+ } enableStartStopControlData;
423
+ /**
424
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_OUTPUT_DATA_FORMAT
425
+ */
426
+ struct
427
+ {
428
+ CUpti_PCSamplingOutputDataFormat outputDataFormat;
429
+ } outputDataFormatData;
430
+ /**
431
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SAMPLING_DATA_BUFFER
432
+ */
433
+ struct
434
+ {
435
+ void *samplingDataBuffer;
436
+ } samplingDataBufferData;
437
+ /**
438
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_WORKER_THREAD_PERIODIC_SLEEP_SPAN
439
+ */
440
+ struct
441
+ {
442
+ uint32_t workerThreadPeriodicSleepSpan;
443
+ } workerThreadPeriodicSleepSpanData;
444
+
445
+ } attributeData;
446
+ } CUpti_PCSamplingConfigurationInfo;
447
+
448
+ /**
449
+ * \brief PC sampling configuration structure
450
+ *
451
+ * This structure configures PC sampling using \ref cuptiPCSamplingSetConfigurationAttribute
452
+ * and queries PC sampling default configuration using \ref cuptiPCSamplingGetConfigurationAttribute
453
+ */
454
+ typedef struct
455
+ {
456
+ /**
457
+ * [w] Size of the data structure i.e. CUpti_PCSamplingConfigurationInfoParamsSize
458
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
459
+ * available in the structure. Used to preserve backward compatibility.
460
+ */
461
+ size_t size;
462
+ /**
463
+ * [w] Assign to NULL
464
+ */
465
+ void* pPriv;
466
+ /**
467
+ * [w] CUcontext
468
+ */
469
+ CUcontext ctx;
470
+ /**
471
+ * [w] Number of attributes to configure using \ref cuptiPCSamplingSetConfigurationAttribute or query
472
+ * using \ref cuptiPCSamplingGetConfigurationAttribute
473
+ */
474
+ size_t numAttributes;
475
+ /**
476
+ * Refer \ref CUpti_PCSamplingConfigurationInfo
477
+ */
478
+ CUpti_PCSamplingConfigurationInfo *pPCSamplingConfigurationInfo;
479
+ } CUpti_PCSamplingConfigurationInfoParams;
480
+ #define CUpti_PCSamplingConfigurationInfoParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingConfigurationInfoParams,pPCSamplingConfigurationInfo)
481
+
482
+ /**
483
+ * \brief Write PC Sampling configuration attribute.
484
+ *
485
+ * \param pParams A pointer to \ref CUpti_PCSamplingConfigurationInfoParams
486
+ * containing PC sampling configuration.
487
+ *
488
+ * \retval CUPTI_SUCCESS
489
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called with
490
+ * some invalid \p attrib.
491
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if attribute \p value is not valid
492
+ * or any \p pParams is not valid
493
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
494
+ * does not support the API
495
+ */
496
+ CUptiResult CUPTIAPI cuptiPCSamplingSetConfigurationAttribute(CUpti_PCSamplingConfigurationInfoParams *pParams);
497
+
498
+ /**
499
+ * \brief Read PC Sampling configuration attribute.
500
+ *
501
+ * \param pParams A pointer to \ref CUpti_PCSamplingConfigurationInfoParams
502
+ * containing PC sampling configuration.
503
+ *
504
+ * \retval CUPTI_SUCCESS
505
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called with
506
+ * some invalid attribute.
507
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p attrib is not valid
508
+ * or any \p pParams is not valid
509
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT indicates that
510
+ * the \p value buffer is too small to hold the attribute value
511
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
512
+ * does not support the API
513
+ */
514
+ CUptiResult CUPTIAPI cuptiPCSamplingGetConfigurationAttribute(CUpti_PCSamplingConfigurationInfoParams *pParams);
515
+
516
+ /**
517
+ * \brief Params for cuptiPCSamplingEnable
518
+ */
519
+ typedef struct
520
+ {
521
+ /**
522
+ * [w] Size of the data structure i.e. CUpti_PCSamplingGetDataParamsSize
523
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
524
+ * available in the structure. Used to preserve backward compatibility.
525
+ */
526
+ size_t size;
527
+ /**
528
+ * [w] Assign to NULL
529
+ */
530
+ void* pPriv;
531
+ /**
532
+ * [w] CUcontext
533
+ */
534
+ CUcontext ctx;
535
+ /**
536
+ * \param pcSamplingData Data buffer to hold collected PC Sampling data PARSED_DATA
537
+ * Buffer type is void * which can point to PARSED_DATA
538
+ * Refer \ref CUpti_PCSamplingData for buffer format for PARSED_DATA
539
+ */
540
+ void *pcSamplingData;
541
+ } CUpti_PCSamplingGetDataParams;
542
+ #define CUpti_PCSamplingGetDataParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingGetDataParams, pcSamplingData)
543
+ /**
544
+ * \brief Flush GPU PC sampling data periodically.
545
+ *
546
+ * Flushing of GPU PC Sampling data is required at following point to maintain uniqueness of PCs:
547
+ * For \brief CUPTI_PC_SAMPLING_COLLECTION_MODE_CONTINUOUS, after every module load-unload-load
548
+ * For \brief CUPTI_PC_SAMPLING_COLLECTION_MODE_KERNEL_SERIALIZED, after every kernel ends
549
+ * If configuration option \brief CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL
550
+ * is enabled, then after every range end i.e. \brief cuptiPCSamplingStop()
551
+ *
552
+ * If application is profiled in \brief CUPTI_PC_SAMPLING_COLLECTION_MODE_CONTINUOUS, with disabled
553
+ * \brief CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL, and there is no module unload,
554
+ * user can collect data in two ways:
555
+ * Use \brief cuptiPCSamplingGetData() API periodically
556
+ * Use \brief cuptiPCSamplingDisable() on application exit and read GPU PC sampling data from sampling
557
+ * data buffer passed during configuration.
558
+ * Note: In case, \brief cuptiPCSamplingGetData() API is not called periodically, then sampling data buffer
559
+ * passed during configuration should be large enough to hold all PCs data.
560
+ * \brief cuptiPCSamplingGetData() API never does device synchronization.
561
+ * It is possible that when the API is called there is some unconsumed data from the HW buffer. In this case
562
+ * CUPTI provides only the data available with it at that moment.
563
+ *
564
+ * \param Refer \ref CUpti_PCSamplingGetDataParams
565
+ *
566
+ * \retval CUPTI_SUCCESS
567
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called without
568
+ * enabling PC sampling.
569
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
570
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
571
+ * \retval CUPTI_ERROR_OUT_OF_MEMORY indicates that the HW buffer is full
572
+ * does not support the API
573
+ */
574
+ CUptiResult CUPTIAPI cuptiPCSamplingGetData(CUpti_PCSamplingGetDataParams *pParams);
575
+
576
+ /**
577
+ * \brief Params for cuptiPCSamplingEnable
578
+ */
579
+ typedef struct
580
+ {
581
+ /**
582
+ * [w] Size of the data structure i.e. CUpti_PCSamplingEnableParamsSize
583
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
584
+ * available in the structure. Used to preserve backward compatibility.
585
+ */
586
+ size_t size;
587
+ /**
588
+ * [w] Assign to NULL
589
+ */
590
+ void* pPriv;
591
+ /**
592
+ * [w] CUcontext
593
+ */
594
+ CUcontext ctx;
595
+ } CUpti_PCSamplingEnableParams;
596
+ #define CUpti_PCSamplingEnableParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingEnableParams, ctx)
597
+
598
+ /**
599
+ * \brief Enable PC sampling.
600
+ *
601
+ * \param Refer \ref CUpti_PCSamplingEnableParams
602
+ *
603
+ * \retval CUPTI_SUCCESS
604
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
605
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
606
+ * does not support the API
607
+ */
608
+ CUptiResult CUPTIAPI cuptiPCSamplingEnable(CUpti_PCSamplingEnableParams *pParams);
609
+
610
+ /**
611
+ * \brief Params for cuptiPCSamplingDisable
612
+ */
613
+ typedef struct
614
+ {
615
+ /**
616
+ * [w] Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize
617
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
618
+ * available in the structure. Used to preserve backward compatibility.
619
+ */
620
+ size_t size;
621
+ /**
622
+ * [w] Assign to NULL
623
+ */
624
+ void* pPriv;
625
+ /**
626
+ * [w] CUcontext
627
+ */
628
+ CUcontext ctx;
629
+ } CUpti_PCSamplingDisableParams;
630
+ #define CUpti_PCSamplingDisableParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingDisableParams, ctx)
631
+
632
+ /**
633
+ * \brief Disable PC sampling.
634
+ *
635
+ * For application which doesn't destroy the CUDA context explicitly,
636
+ * this API does the PC Sampling tear-down, joins threads and copies PC records in the buffer provided
637
+ * during the PC sampling configuration. PC records which can't be accommodated in the buffer are discarded.
638
+ *
639
+ * \param Refer \ref CUpti_PCSamplingDisableParams
640
+ *
641
+ * \retval CUPTI_SUCCESS
642
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
643
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
644
+ * does not support the API
645
+ */
646
+ CUptiResult CUPTIAPI cuptiPCSamplingDisable(CUpti_PCSamplingDisableParams *pParams);
647
+
648
+ /**
649
+ * \brief Params for cuptiPCSamplingStart
650
+ */
651
+ typedef struct
652
+ {
653
+ /**
654
+ * [w] Size of the data structure i.e. CUpti_PCSamplingStartParamsSize
655
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
656
+ * available in the structure. Used to preserve backward compatibility.
657
+ */
658
+ size_t size;
659
+ /**
660
+ * [w] Assign to NULL
661
+ */
662
+ void* pPriv;
663
+ /**
664
+ * [w] CUcontext
665
+ */
666
+ CUcontext ctx;
667
+ } CUpti_PCSamplingStartParams;
668
+ #define CUpti_PCSamplingStartParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingStartParams, ctx)
669
+
670
+ /**
671
+ * \brief Start PC sampling.
672
+ *
673
+ * User can collect PC Sampling data for user-defined range specified by Start/Stop APIs.
674
+ * This API can be used to mark starting of range. Set configuration option
675
+ * \brief CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL to use this API.
676
+ *
677
+ * \param Refer \ref CUpti_PCSamplingStartParams
678
+ *
679
+ * \retval CUPTI_SUCCESS
680
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called with
681
+ * incorrect PC Sampling configuration.
682
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
683
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
684
+ * does not support the API
685
+ */
686
+ CUptiResult CUPTIAPI cuptiPCSamplingStart(CUpti_PCSamplingStartParams *pParams);
687
+
688
+ /**
689
+ * \brief Params for cuptiPCSamplingStop
690
+ */
691
+ typedef struct
692
+ {
693
+ /**
694
+ * [w] Size of the data structure i.e. CUpti_PCSamplingStopParamsSize
695
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
696
+ * available in the structure. Used to preserve backward compatibility.
697
+ */
698
+ size_t size;
699
+ /**
700
+ * [w] Assign to NULL
701
+ */
702
+ void* pPriv;
703
+ /**
704
+ * [w] CUcontext
705
+ */
706
+ CUcontext ctx;
707
+ } CUpti_PCSamplingStopParams;
708
+ #define CUpti_PCSamplingStopParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingStopParams, ctx)
709
+
710
+ /**
711
+ * \brief Stop PC sampling.
712
+ *
713
+ * User can collect PC Sampling data for user-defined range specified by Start/Stop APIs.
714
+ * This API can be used to mark end of range. Set configuration option
715
+ * \brief CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL to use this API.
716
+ *
717
+ * \param Refer \ref CUpti_PCSamplingStopParams
718
+ *
719
+ * \retval CUPTI_SUCCESS
720
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called with
721
+ * incorrect PC Sampling configuration.
722
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
723
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
724
+ * does not support the API
725
+ */
726
+ CUptiResult CUPTIAPI cuptiPCSamplingStop(CUpti_PCSamplingStopParams *pParams);
727
+
728
+ /**
729
+ * \brief Params for cuptiPCSamplingGetNumStallReasons
730
+ */
731
+ typedef struct
732
+ {
733
+ /**
734
+ * [w] Size of the data structure i.e. CUpti_PCSamplingGetNumStallReasonsParamsSize
735
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
736
+ * available in the structure. Used to preserve backward compatibility.
737
+ */
738
+ size_t size;
739
+ /**
740
+ * [w] Assign to NULL
741
+ */
742
+ void* pPriv;
743
+ /**
744
+ * [w] CUcontext
745
+ */
746
+ CUcontext ctx;
747
+ /**
748
+ * [r] Number of stall reasons
749
+ */
750
+ size_t *numStallReasons;
751
+ } CUpti_PCSamplingGetNumStallReasonsParams;
752
+ #define CUpti_PCSamplingGetNumStallReasonsParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingGetNumStallReasonsParams, numStallReasons)
753
+
754
+ /**
755
+ * \brief Get PC sampling stall reason count.
756
+ *
757
+ * \param Refer \ref CUpti_PCSamplingGetNumStallReasonsParams
758
+ *
759
+ * \retval CUPTI_SUCCESS
760
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
761
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
762
+ * does not support the API
763
+ */
764
+ CUptiResult CUPTIAPI cuptiPCSamplingGetNumStallReasons(CUpti_PCSamplingGetNumStallReasonsParams *pParams);
765
+
766
+ /**
767
+ * \brief Params for cuptiPCSamplingGetStallReasons
768
+ */
769
+ typedef struct
770
+ {
771
+ /**
772
+ * [w] Size of the data structure i.e. CUpti_PCSamplingGetStallReasonsParamsSize
773
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
774
+ * available in the structure. Used to preserve backward compatibility.
775
+ */
776
+ size_t size;
777
+ /**
778
+ * [w] Assign to NULL
779
+ */
780
+ void* pPriv;
781
+ /**
782
+ * [w] CUcontext
783
+ */
784
+ CUcontext ctx;
785
+ /**
786
+ * [w] Number of stall reasons
787
+ */
788
+ size_t numStallReasons;
789
+ /**
790
+ * [r] Stall reason index
791
+ */
792
+ uint32_t *stallReasonIndex;
793
+ /**
794
+ * [r] Stall reasons name
795
+ */
796
+ char **stallReasons;
797
+ } CUpti_PCSamplingGetStallReasonsParams;
798
+ #define CUpti_PCSamplingGetStallReasonsParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingGetStallReasonsParams, stallReasons)
799
+
800
+ /**
801
+ * \brief Get PC sampling stall reasons.
802
+ *
803
+ * \param Refer \ref CUpti_PCSamplingGetStallReasonsParams
804
+ *
805
+ * \retval CUPTI_SUCCESS
806
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
807
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
808
+ * does not support the API
809
+ */
810
+ CUptiResult CUPTIAPI cuptiPCSamplingGetStallReasons(CUpti_PCSamplingGetStallReasonsParams *pParams);
811
+
812
+ /**
813
+ * \brief Params for cuptiGetSassToSourceCorrelation
814
+ */
815
+ typedef struct {
816
+ /**
817
+ * [w] Size of the data structure i.e. CUpti_GetSassToSourceCorrelationParamsSize
818
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
819
+ * available in the structure. Used to preserve backward compatibility.
820
+ */
821
+ size_t size;
822
+ /**
823
+ * [w] Pointer to cubin binary where function belongs.
824
+ */
825
+ const void* cubin;
826
+ /**
827
+ * [w] Function name to which PC belongs.
828
+ */
829
+ const char *functionName;
830
+ /**
831
+ * [w] Size of cubin binary.
832
+ */
833
+ size_t cubinSize;
834
+ /**
835
+ * [r] Line number in the source code.
836
+ */
837
+ uint32_t lineNumber;
838
+ /**
839
+ * [w] PC offset
840
+ */
841
+ uint64_t pcOffset;
842
+ /**
843
+ * [r] Path for the source file.
844
+ */
845
+ char *fileName;
846
+ /**
847
+ * [r] Path for the directory of source file.
848
+ */
849
+ char *dirName;
850
+ } CUpti_GetSassToSourceCorrelationParams;
851
+ #define CUpti_GetSassToSourceCorrelationParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_GetSassToSourceCorrelationParams, dirName)
852
+
853
+ /**
854
+ * \brief SASS to Source correlation.
855
+ *
856
+ * \param Refer \ref CUpti_GetSassToSourceCorrelationParams
857
+ *
858
+ * It is expected from user to free allocated memory for fileName and dirName after use.
859
+ *
860
+ * \retval CUPTI_SUCCESS
861
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if either of the parameters cubin or functionName
862
+ * is NULL or cubinSize is zero or size field is not set correctly.
863
+ * \retval CUPTI_ERROR_INVALID_MODULE provided cubin is invalid.
864
+ * \retval CUPTI_ERROR_UNKNOWN an internal error occurred.
865
+ * This error code is also used for cases when the function is not present in the module.
866
+ * A better error code will be returned in the future release.
867
+ */
868
+ CUptiResult CUPTIAPI cuptiGetSassToSourceCorrelation(CUpti_GetSassToSourceCorrelationParams *pParams);
869
+
870
+ /**
871
+ * \brief Params for cuptiGetCubinCrc
872
+ */
873
+ typedef struct {
874
+ /**
875
+ * [w] Size of configuration structure.
876
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
877
+ * available in the structure. Used to preserve backward compatibility.
878
+ */
879
+ size_t size;
880
+ /**
881
+ * [w] Size of cubin binary.
882
+ */
883
+ size_t cubinSize;
884
+ /**
885
+ * [w] Pointer to cubin binary
886
+ */
887
+ const void* cubin;
888
+ /**
889
+ * [r] Computed CRC will be stored in it.
890
+ */
891
+ uint64_t cubinCrc;
892
+ } CUpti_GetCubinCrcParams;
893
+ #define CUpti_GetCubinCrcParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_GetCubinCrcParams, cubinCrc)
894
+
895
+ /**
896
+ * \brief Get the CRC of cubin.
897
+ *
898
+ * This function returns the CRC of provided cubin binary.
899
+ *
900
+ * \param Refer \ref CUpti_GetCubinCrcParams
901
+ *
902
+ * \retval CUPTI_SUCCESS
903
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if parameter cubin is NULL or
904
+ * provided cubinSize is zero or size field is not set.
905
+ */
906
+ CUptiResult CUPTIAPI cuptiGetCubinCrc(CUpti_GetCubinCrcParams *pParams);
907
+
908
+ /**
909
+ * \brief Function type for callback used by CUPTI to request crc of
910
+ * loaded module.
911
+ *
912
+ * This callback function ask for crc of provided module in function.
913
+ * The provided crc will be stored in PC sampling records i.e. in the field 'cubinCrc' of the PC sampling
914
+ * struct CUpti_PCSamplingPCData. The CRC is uses during the offline source correlation to uniquely identify the module.
915
+ *
916
+ * \param cubin The pointer to cubin binary
917
+ * \param cubinSize The size of cubin binary.
918
+ * \param cubinCrc Returns the computed crc of cubin.
919
+ */
920
+ typedef void (CUPTIAPI *CUpti_ComputeCrcCallbackFunc)(
921
+ const void* cubin,
922
+ size_t cubinSize,
923
+ uint64_t *cubinCrc);
924
+
925
+ /**
926
+ * \brief Register callback function with CUPTI to use
927
+ * your own algorithm to compute cubin crc.
928
+ *
929
+ * This function registers a callback function and it gets called
930
+ * from CUPTI when a CUDA module is loaded.
931
+ *
932
+ * \param funcComputeCubinCrc callback is invoked when a CUDA module
933
+ * is loaded.
934
+ *
935
+ * \retval CUPTI_SUCCESS
936
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p funcComputeCubinCrc is NULL.
937
+ */
938
+ CUptiResult CUPTIAPI cuptiRegisterComputeCrcCallback(CUpti_ComputeCrcCallbackFunc funcComputeCubinCrc);
939
+
940
+ /** @} */ /* END CUPTI_PCSAMPLING_API */
941
+
942
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
943
+ #pragma GCC visibility pop
944
+ #endif
945
+
946
+ #if defined(__cplusplus)
947
+ }
948
+ #endif
949
+
950
+ #endif /*_CUPTI_PCSAMPLING_H_*/
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_pcsampling_util.h ADDED
@@ -0,0 +1,419 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if !defined(_CUPTI_PCSAMPLING_UTIL_H_)
2
+ #define _CUPTI_PCSAMPLING_UTIL_H_
3
+
4
+ #include <cupti_pcsampling.h>
5
+ #include <fstream>
6
+
7
+ #ifndef CUPTIUTILAPI
8
+ #ifdef _WIN32
9
+ #define CUPTIUTILAPI __stdcall
10
+ #else
11
+ #define CUPTIUTILAPI
12
+ #endif
13
+ #endif
14
+
15
+ #define ACTIVITY_RECORD_ALIGNMENT 8
16
+ #if defined(_WIN32) // Windows 32- and 64-bit
17
+ #define START_PACKED_ALIGNMENT __pragma(pack(push,1)) // exact fit - no padding
18
+ #define PACKED_ALIGNMENT __declspec(align(ACTIVITY_RECORD_ALIGNMENT))
19
+ #define END_PACKED_ALIGNMENT __pragma(pack(pop))
20
+ #elif defined(__GNUC__) // GCC
21
+ #define START_PACKED_ALIGNMENT
22
+ #define PACKED_ALIGNMENT __attribute__ ((__packed__)) __attribute__ ((aligned (ACTIVITY_RECORD_ALIGNMENT)))
23
+ #define END_PACKED_ALIGNMENT
24
+ #else // all other compilers
25
+ #define START_PACKED_ALIGNMENT
26
+ #define PACKED_ALIGNMENT
27
+ #define END_PACKED_ALIGNMENT
28
+ #endif
29
+
30
+ #ifndef CUPTI_UTIL_STRUCT_SIZE
31
+ #define CUPTI_UTIL_STRUCT_SIZE(type_, lastfield_) (offsetof(type_, lastfield_) + sizeof(((type_*)0)->lastfield_))
32
+ #endif
33
+
34
+ #ifndef CHECK_PC_SAMPLING_STRUCT_FIELD_EXISTS
35
+ #define CHECK_PC_SAMPLING_STRUCT_FIELD_EXISTS(type, member, structSize) \
36
+ (offsetof(type, member) < structSize)
37
+ #endif
38
+
39
+ #if defined(__cplusplus)
40
+ extern "C" {
41
+ #endif
42
+
43
+ #if defined(__GNUC__)
44
+ #pragma GCC visibility push(default)
45
+ #endif
46
+
47
+ namespace CUPTI { namespace PcSamplingUtil {
48
+
49
+ /**
50
+ * \defgroup CUPTI_PCSAMPLING_UTILITY CUPTI PC Sampling Utility API
51
+ * Functions, types, and enums that implement the CUPTI PC Sampling Utility API.
52
+ * @{
53
+ */
54
+
55
+ /**
56
+ * \brief Header info will be stored in file.
57
+ */
58
+ typedef struct PACKED_ALIGNMENT {
59
+ /**
60
+ * Version of file format.
61
+ */
62
+ uint32_t version;
63
+ /**
64
+ * Total number of buffers present in the file.
65
+ */
66
+ uint32_t totalBuffers;
67
+ } Header;
68
+
69
+ /**
70
+ * \brief BufferInfo will be stored in the file for every buffer
71
+ * i.e for every call of UtilDumpPcSamplingBufferInFile() API.
72
+ */
73
+ typedef struct PACKED_ALIGNMENT {
74
+ /**
75
+ * Total number of PC records.
76
+ */
77
+ uint64_t recordCount;
78
+ /**
79
+ * Count of all stall reasons supported on the GPU
80
+ */
81
+ size_t numStallReasons;
82
+ /**
83
+ * Total number of stall reasons in single record.
84
+ */
85
+ uint64_t numSelectedStallReasons;
86
+ /**
87
+ * Buffer size in Bytes.
88
+ */
89
+ uint64_t bufferByteSize;
90
+ } BufferInfo;
91
+
92
+ /**
93
+ * \brief All available stall reasons name and respective indexes
94
+ * will be stored in it.
95
+ */
96
+ typedef struct PACKED_ALIGNMENT {
97
+ /**
98
+ * Number of all available stall reasons
99
+ */
100
+ size_t numStallReasons;
101
+ /**
102
+ * Stall reasons names of all available stall reasons
103
+ */
104
+ char **stallReasons;
105
+ /**
106
+ * Stall reason index of all available stall reasons
107
+ */
108
+ uint32_t *stallReasonIndex;
109
+ } PcSamplingStallReasons;
110
+
111
+ typedef enum {
112
+ /**
113
+ * Invalid buffer type.
114
+ */
115
+ PC_SAMPLING_BUFFER_INVALID = 0,
116
+ /**
117
+ * Refers to CUpti_PCSamplingData buffer.
118
+ */
119
+ PC_SAMPLING_BUFFER_PC_TO_COUNTER_DATA = 1
120
+ } PcSamplingBufferType;
121
+
122
+ /**
123
+ * \brief CUPTI PC sampling utility API result codes.
124
+ *
125
+ * Error and result codes returned by CUPTI PC sampling utility API.
126
+ */
127
+ typedef enum {
128
+ /**
129
+ * No error
130
+ */
131
+ CUPTI_UTIL_SUCCESS = 0,
132
+ /**
133
+ * One or more of the parameters are invalid.
134
+ */
135
+ CUPTI_UTIL_ERROR_INVALID_PARAMETER = 1,
136
+ /**
137
+ * Unable to create a new file
138
+ */
139
+ CUPTI_UTIL_ERROR_UNABLE_TO_CREATE_FILE = 2,
140
+ /**
141
+ * Unable to open a file
142
+ */
143
+ CUPTI_UTIL_ERROR_UNABLE_TO_OPEN_FILE = 3,
144
+ /**
145
+ * Read or write operation failed
146
+ */
147
+ CUPTI_UTIL_ERROR_READ_WRITE_OPERATION_FAILED = 4,
148
+ /**
149
+ * Provided file handle is corrupted.
150
+ */
151
+ CUPTI_UTIL_ERROR_FILE_HANDLE_CORRUPTED = 5,
152
+ /**
153
+ * seek operation failed.
154
+ */
155
+ CUPTI_UTIL_ERROR_SEEK_OPERATION_FAILED = 6,
156
+ /**
157
+ * Unable to allocate enough memory to perform the requested
158
+ * operation.
159
+ */
160
+ CUPTI_UTIL_ERROR_OUT_OF_MEMORY = 7,
161
+ /**
162
+ * An unknown internal error has occurred.
163
+ */
164
+ CUPTI_UTIL_ERROR_UNKNOWN = 999,
165
+ CUPTI_UTIL_ERROR_FORCE_INT = 0x7fffffff
166
+ } CUptiUtilResult;
167
+
168
+ /**
169
+ * \brief Params for \ref CuptiUtilPutPcSampData
170
+ */
171
+ typedef struct {
172
+ /**
173
+ * Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize
174
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
175
+ * available in the structure. Used to preserve backward compatibility.
176
+ */
177
+ size_t size;
178
+ /**
179
+ * Type of buffer to store in file
180
+ */
181
+ PcSamplingBufferType bufferType;
182
+ /**
183
+ * PC sampling buffer.
184
+ */
185
+ void *pSamplingData;
186
+ /**
187
+ * Number of configured attributes
188
+ */
189
+ size_t numAttributes;
190
+ /**
191
+ * Refer \ref CUpti_PCSamplingConfigurationInfo
192
+ * It is expected to provide configuration details of at least
193
+ * CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_STALL_REASON attribute.
194
+ */
195
+ CUpti_PCSamplingConfigurationInfo *pPCSamplingConfigurationInfo;
196
+ /**
197
+ * Refer \ref PcSamplingStallReasons.
198
+ */
199
+ PcSamplingStallReasons *pPcSamplingStallReasons;
200
+ /**
201
+ * File name to store buffer into it.
202
+ */
203
+ const char* fileName;
204
+ } CUptiUtil_PutPcSampDataParams;
205
+ #define CUptiUtil_PutPcSampDataParamsSize CUPTI_UTIL_STRUCT_SIZE(CUptiUtil_PutPcSampDataParams, fileName)
206
+
207
+ /**
208
+ * \brief Dump PC sampling data into the file.
209
+ *
210
+ * This API can be called multiple times.
211
+ * It will append buffer in the file.
212
+ * For every buffer it will store BufferInfo
213
+ * so that before retrieving data it will help to allocate buffer
214
+ * to store retrieved data.
215
+ * This API creates file if file does not present.
216
+ * If stallReasonIndex or stallReasons pointer of \ref CUptiUtil_PutPcSampDataParams is NULL
217
+ * then stall reasons data will not be stored in file.
218
+ * It is expected to store all available stall reason data at least once to refer it during
219
+ * offline correlation.
220
+ *
221
+ * \retval CUPTI_UTIL_SUCCESS
222
+ * \retval CUPTI_UTIL_ERROR_INVALID_PARAMETER error out if buffer type is invalid
223
+ * or if either of pSamplingData, pParams pointer is NULL or stall reason configuration details not provided
224
+ * or filename is empty.
225
+ * \retval CUPTI_UTIL_ERROR_UNABLE_TO_CREATE_FILE
226
+ * \retval CUPTI_UTIL_ERROR_UNABLE_TO_OPEN_FILE
227
+ * \retval CUPTI_UTIL_ERROR_READ_WRITE_OPERATION_FAILED
228
+ */
229
+ CUptiUtilResult CUPTIUTILAPI CuptiUtilPutPcSampData(CUptiUtil_PutPcSampDataParams *pParams);
230
+
231
+ /**
232
+ * \brief Params for \ref CuptiUtilGetHeaderData
233
+ */
234
+ typedef struct {
235
+ /**
236
+ * Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize
237
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
238
+ * available in the structure. Used to preserve backward compatibility.
239
+ */
240
+ size_t size;
241
+ /**
242
+ * File handle.
243
+ */
244
+ std::ifstream *fileHandler;
245
+ /**
246
+ * Header Info.
247
+ */
248
+ Header headerInfo;
249
+
250
+ } CUptiUtil_GetHeaderDataParams;
251
+ #define CUptiUtil_GetHeaderDataParamsSize CUPTI_UTIL_STRUCT_SIZE(CUptiUtil_GetHeaderDataParams, headerInfo)
252
+
253
+ /**
254
+ * \brief Get header data of file.
255
+ *
256
+ * This API must be called once initially while retrieving data from file.
257
+ * \ref Header structure, it gives info about total number
258
+ * of buffers present in the file.
259
+ *
260
+ * \retval CUPTI_UTIL_SUCCESS
261
+ * \retval CUPTI_UTIL_ERROR_INVALID_PARAMETER error out if either of pParam or fileHandle is NULL or param struct size is incorrect.
262
+ * \retval CUPTI_UTIL_ERROR_FILE_HANDLE_CORRUPTED file handle is not in good state to read data from file
263
+ * \retval CUPTI_UTIL_ERROR_READ_WRITE_OPERATION_FAILED failed to read data from file.
264
+ */
265
+ CUptiUtilResult CUPTIUTILAPI CuptiUtilGetHeaderData(CUptiUtil_GetHeaderDataParams *pParams);
266
+
267
+ /**
268
+ * \brief Params for \ref CuptiUtilGetBufferInfo
269
+ */
270
+ typedef struct {
271
+ /**
272
+ * Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize
273
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
274
+ * available in the structure. Used to preserve backward compatibility.
275
+ */
276
+ size_t size;
277
+ /**
278
+ * File handle.
279
+ */
280
+ std::ifstream *fileHandler;
281
+ /**
282
+ * Buffer Info.
283
+ */
284
+ BufferInfo bufferInfoData;
285
+ } CUptiUtil_GetBufferInfoParams;
286
+ #define CUptiUtil_GetBufferInfoParamsSize CUPTI_UTIL_STRUCT_SIZE(CUptiUtil_GetBufferInfoParams, bufferInfoData)
287
+
288
+ /**
289
+ * \brief Get buffer info data of file.
290
+ *
291
+ * This API must be called every time before calling CuptiUtilGetPcSampData API.
292
+ * \ref BufferInfo structure, it gives info about recordCount and stallReasonCount
293
+ * of every record in the buffer. This will help to allocate exact buffer to retrieve data into it.
294
+ *
295
+ * \retval CUPTI_UTIL_SUCCESS
296
+ * \retval CUPTI_UTIL_ERROR_INVALID_PARAMETER error out if either of pParam or fileHandle is NULL or param struct size is incorrect.
297
+ * \retval CUPTI_UTIL_ERROR_FILE_HANDLE_CORRUPTED file handle is not in good state to read data from file.
298
+ * \retval CUPTI_UTIL_ERROR_READ_WRITE_OPERATION_FAILED failed to read data from file.
299
+ */
300
+ CUptiUtilResult CUPTIUTILAPI CuptiUtilGetBufferInfo(CUptiUtil_GetBufferInfoParams *pParams);
301
+
302
+ /**
303
+ * \brief Params for \ref CuptiUtilGetPcSampData
304
+ */
305
+ typedef struct {
306
+ /**
307
+ * Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize
308
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
309
+ * available in the structure. Used to preserve backward compatibility.
310
+ */
311
+ size_t size;
312
+ /**
313
+ * File handle.
314
+ */
315
+ std::ifstream *fileHandler;
316
+ /**
317
+ * Type of buffer to store in file
318
+ */
319
+ PcSamplingBufferType bufferType;
320
+ /**
321
+ * Pointer to collected buffer info using \ref CuptiUtilGetBufferInfo
322
+ */
323
+ BufferInfo *pBufferInfoData;
324
+ /**
325
+ * Pointer to allocated memory to store retrieved data from file.
326
+ */
327
+ void *pSamplingData;
328
+ /**
329
+ * Number of configuration attributes
330
+ */
331
+ size_t numAttributes;
332
+ /**
333
+ * Refer \ref CUpti_PCSamplingConfigurationInfo
334
+ */
335
+ CUpti_PCSamplingConfigurationInfo *pPCSamplingConfigurationInfo;
336
+ /**
337
+ * Refer \ref PcSamplingStallReasons.
338
+ * For stallReasons field of \ref PcSamplingStallReasons it is expected to
339
+ * allocate memory for each string element of array.
340
+ */
341
+ PcSamplingStallReasons *pPcSamplingStallReasons;
342
+ } CUptiUtil_GetPcSampDataParams;
343
+ #define CUptiUtil_GetPcSampDataParamsSize CUPTI_UTIL_STRUCT_SIZE(CUptiUtil_GetPcSampDataParams, pPcSamplingStallReasons)
344
+
345
+ /**
346
+ * \brief Retrieve PC sampling data from file into allocated buffer.
347
+ *
348
+ * This API must be called after CuptiUtilGetBufferInfo API.
349
+ * It will retrieve data from file into allocated buffer.
350
+ *
351
+ * \retval CUPTI_UTIL_SUCCESS
352
+ * \retval CUPTI_UTIL_ERROR_INVALID_PARAMETER error out if buffer type is invalid
353
+ * or if either of pSampData, pParams is NULL. If pPcSamplingStallReasons is not NULL then
354
+ * error out if either of stallReasonIndex, stallReasons or stallReasons array element pointer is NULL.
355
+ * or filename is empty.
356
+ * \retval CUPTI_UTIL_ERROR_READ_WRITE_OPERATION_FAILED
357
+ * \retval CUPTI_UTIL_ERROR_FILE_HANDLE_CORRUPTED file handle is not in good state to read data from file.
358
+ */
359
+ CUptiUtilResult CUPTIUTILAPI CuptiUtilGetPcSampData(CUptiUtil_GetPcSampDataParams *pParams);
360
+
361
+ /**
362
+ * \brief Params for \ref CuptiUtilMergePcSampData
363
+ */
364
+ typedef struct
365
+ {
366
+ /**
367
+ * Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize
368
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
369
+ * available in the structure. Used to preserve backward compatibility.
370
+ */
371
+ size_t size;
372
+ /**
373
+ * Number of buffers to merge.
374
+ */
375
+ size_t numberOfBuffers;
376
+ /**
377
+ * Pointer to array of buffers to merge
378
+ */
379
+ CUpti_PCSamplingData *PcSampDataBuffer;
380
+ /**
381
+ * Pointer to array of merged buffers as per the range id.
382
+ */
383
+ CUpti_PCSamplingData **MergedPcSampDataBuffers;
384
+ /**
385
+ * Number of merged buffers.
386
+ */
387
+ size_t *numMergedBuffer;
388
+ } CUptiUtil_MergePcSampDataParams;
389
+ #define CUptiUtil_MergePcSampDataParamsSize CUPTI_UTIL_STRUCT_SIZE(CUptiUtil_MergePcSampDataParams, numMergedBuffer)
390
+
391
+ /**
392
+ * \brief Merge PC sampling data range id wise.
393
+ *
394
+ * This API merge PC sampling data range id wise.
395
+ * It allocates memory for merged data and fill data in it
396
+ * and provide buffer pointer in MergedPcSampDataBuffers field.
397
+ * It is expected from user to free merge data buffers after use.
398
+ *
399
+ * \retval CUPTI_UTIL_SUCCESS
400
+ * \retval CUPTI_UTIL_ERROR_INVALID_PARAMETER error out if param struct size is invalid
401
+ * or count of buffers to merge is invalid i.e less than 1
402
+ * or either of PcSampDataBuffer, MergedPcSampDataBuffers, numMergedBuffer is NULL
403
+ * \retval CUPTI_UTIL_ERROR_OUT_OF_MEMORY Unable to allocate memory for merged buffer.
404
+ */
405
+ CUptiUtilResult CUPTIUTILAPI CuptiUtilMergePcSampData(CUptiUtil_MergePcSampDataParams *pParams);
406
+
407
+ /** @} */ /* END CUPTI_PCSAMPLING_UTILITY */
408
+
409
+ } }
410
+
411
+ #if defined(__GNUC__)
412
+ #pragma GCC visibility pop
413
+ #endif
414
+
415
+ #if defined(__cplusplus)
416
+ }
417
+ #endif
418
+
419
+ #endif