applied-ai-018 commited on
Commit
5477c76
·
verified ·
1 Parent(s): ae5c8c8

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +4 -0
  2. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py27_np17.gz +3 -0
  3. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl +3 -0
  4. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.bz2 +3 -0
  5. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.xz +3 -0
  6. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.xz +3 -0
  7. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl +3 -0
  8. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.bz2 +3 -0
  9. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.xz +3 -0
  10. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl +3 -0
  11. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl.bz2 +3 -0
  12. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl.xz +3 -0
  13. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_compressed_pickle_py27_np16.gz +3 -0
  14. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_compressed_pickle_py27_np17.gz +3 -0
  15. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_compressed_pickle_py35_np19.gz +3 -0
  16. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl +3 -0
  17. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_01.npy +3 -0
  18. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_03.npy +3 -0
  19. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_01.npy +3 -0
  20. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl +3 -0
  21. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_01.npy +3 -0
  22. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_02.npy +3 -0
  23. llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_04.npy +3 -0
  24. llmeval-env/lib/python3.10/site-packages/nvidia/cublas/__init__.py +0 -0
  25. llmeval-env/lib/python3.10/site-packages/nvidia/cublas/__pycache__/__init__.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/nvidia/cublas/include/__init__.py +0 -0
  27. llmeval-env/lib/python3.10/site-packages/nvidia/cublas/include/__pycache__/__init__.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/nvidia/cublas/include/cublas.h +891 -0
  29. llmeval-env/lib/python3.10/site-packages/nvidia/cublas/include/cublasLt.h +1815 -0
  30. llmeval-env/lib/python3.10/site-packages/nvidia/cublas/include/cublasXt.h +693 -0
  31. llmeval-env/lib/python3.10/site-packages/nvidia/cublas/include/cublas_api.h +0 -0
  32. llmeval-env/lib/python3.10/site-packages/nvidia/cublas/include/cublas_v2.h +478 -0
  33. llmeval-env/lib/python3.10/site-packages/nvidia/cublas/include/nvblas.h +824 -0
  34. llmeval-env/lib/python3.10/site-packages/nvidia/cublas/lib/__init__.py +0 -0
  35. llmeval-env/lib/python3.10/site-packages/nvidia/cublas/lib/__pycache__/__init__.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/nvidia/cublas/lib/libnvblas.so.12 +0 -0
  37. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/include/Openacc/cupti_openacc.h +98 -0
  38. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/include/Openmp/cupti_openmp.h +100 -0
  39. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/include/Openmp/omp-tools.h +1083 -0
  40. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/include/__pycache__/__init__.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcheckpoint.so +3 -0
  42. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 +3 -0
  43. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so +3 -0
  44. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/include/nvrtc.h +845 -0
  45. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/__pycache__/__init__.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/nvidia/curand/__pycache__/__init__.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_globals.h +93 -0
  48. llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_lognormal.h +697 -0
  49. llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32_host.h +516 -0
  50. llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32_kernel.h +386 -0
.gitattributes CHANGED
@@ -91,3 +91,7 @@ llmeval-env/lib/python3.10/site-packages/yaml/_yaml.cpython-310-x86_64-linux-gnu
91
  llmeval-env/lib/python3.10/site-packages/numpy/core/_multiarray_umath.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
92
  llmeval-env/lib/python3.10/site-packages/sklearn/_loss/_loss.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
93
  llmeval-env/lib/python3.10/site-packages/numpy/core/_simd.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
91
  llmeval-env/lib/python3.10/site-packages/numpy/core/_multiarray_umath.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
92
  llmeval-env/lib/python3.10/site-packages/sklearn/_loss/_loss.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
93
  llmeval-env/lib/python3.10/site-packages/numpy/core/_simd.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
94
+ llmeval-env/lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text
95
+ llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcheckpoint.so filter=lfs diff=lfs merge=lfs -text
96
+ llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 filter=lfs diff=lfs merge=lfs -text
97
+ llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so filter=lfs diff=lfs merge=lfs -text
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py27_np17.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1f4e8cccfca94f25ae744d1f050b0734f663263ba38ed0642181404b348b17b
3
+ size 757
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89c4508e3dfbe01f801e4e739f1aded13f685941e89281c8050f0ca8aa3c97e5
3
+ size 986
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.bz2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a18415232322531c918164ae04148ebc258acd3a00fa4529728416755e14a15e
3
+ size 997
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.xz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efb146d450c6d061d06affb56f17384e7f64cbab9b516fcc6c4d3f8869b3e707
3
+ size 712
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.xz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04d7e68907e978b56975f9309492b8849e42a60974beb795c9e93273977f3cd3
3
+ size 752
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97b9ef2e896104321d3c5ce73b3de504788c38f04f08c8b56d7a29d6d1520a96
3
+ size 1068
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.bz2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6a1a9b884be654e2e3fc9a48251ecf0c6920e255c3f2ee5dd71d8252a694606
3
+ size 1005
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.xz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02cf30d8b196c303662b2dd035d2a58caeb762ae3a82345ffd1274961e7f5aa0
3
+ size 752
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e6b0e171782d5fd5a61d1844dc946eb27c5f6b2e8075d436b23808433142ebc
3
+ size 1068
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl.bz2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc8db259be742ca2ff36067277f5e4a03e6d78883ddee238da65a7c7d79ef804
3
+ size 991
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl.xz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd787f35b3197418d8c7bca77c9dc5ca47b6f22cd24524b3ccd074cf90f893d6
3
+ size 752
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_compressed_pickle_py27_np16.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34bb43aefa365c81f42af51402f84ea8c7a85c48c65b422e4e4fe8b2ee57883c
3
+ size 658
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_compressed_pickle_py27_np17.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34bb43aefa365c81f42af51402f84ea8c7a85c48c65b422e4e4fe8b2ee57883c
3
+ size 658
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_compressed_pickle_py35_np19.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f33bd8a21a41b729b05dac5deeb0e868f218a092b0e3fe5988094cf167217f6
3
+ size 673
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f29d7f1d2ceca07f10df172c0e826ef08163a14b12c6ef3fa80ec53a5fcdc3c
3
+ size 670
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_01.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0efbd7d9ce7eec3a6e0a0db41e795e0396cca3d6b037dad6c61b464843d28809
3
+ size 120
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_03.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0c45ae2a289841cbeba2443b7ebaa3b31c0a9e9dcc73294aca5729da0092405
3
+ size 236
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_01.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0efbd7d9ce7eec3a6e0a0db41e795e0396cca3d6b037dad6c61b464843d28809
3
+ size 120
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a538100e6ae94b16f2ab0f7d92d4d7e7a622be2dfcc0f6b0b73b623bc513ae2
3
+ size 691
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_01.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0efbd7d9ce7eec3a6e0a0db41e795e0396cca3d6b037dad6c61b464843d28809
3
+ size 120
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_02.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c1cf36cb781fbcc21b953bb0a0b45df092da0eae0e765882e5963ccd70105b1
3
+ size 120
llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_04.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ecbe244294ba93e08479b16c1b9a9411e3569ff660ed0459dca1d241381df05
3
+ size 104
llmeval-env/lib/python3.10/site-packages/nvidia/cublas/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/nvidia/cublas/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (186 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nvidia/cublas/include/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/nvidia/cublas/include/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (194 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nvidia/cublas/include/cublas.h ADDED
@@ -0,0 +1,891 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * This is the public header file for the CUBLAS library, defining the API
52
+ *
53
+ * CUBLAS is an implementation of BLAS (Basic Linear Algebra Subroutines)
54
+ * on top of the CUDA runtime.
55
+ */
56
+
57
+ #if !defined(CUBLAS_H_)
58
+ #define CUBLAS_H_
59
+
60
+ #if defined(CUBLAS_V2_H_)
61
+ #error "It is an error to include both cublas.h and cublas_v2.h"
62
+ #endif
63
+
64
+ #include <cuda_runtime.h>
65
+
66
+ #ifndef CUBLASWINAPI
67
+ #ifdef _WIN32
68
+ #define CUBLASWINAPI __stdcall
69
+ #else
70
+ #define CUBLASWINAPI
71
+ #endif
72
+ #endif
73
+
74
+ #undef CUBLASAPI
75
+ #ifdef __CUDACC__
76
+ #define CUBLASAPI __host__
77
+ #else
78
+ #define CUBLASAPI
79
+ #endif
80
+
81
+ #include "cublas_api.h"
82
+
83
+ #if defined(__cplusplus)
84
+ extern "C" {
85
+ #endif
86
+
87
+ /* CUBLAS data types */
88
+ #define cublasStatus cublasStatus_t
89
+
90
+ cublasStatus CUBLASWINAPI cublasInit(void);
91
+ cublasStatus CUBLASWINAPI cublasShutdown(void);
92
+ cublasStatus CUBLASWINAPI cublasGetError(void);
93
+
94
+ cublasStatus CUBLASWINAPI cublasGetVersion(int* version);
95
+ cublasStatus CUBLASWINAPI cublasAlloc(int n, int elemSize, void** devicePtr);
96
+
97
+ cublasStatus CUBLASWINAPI cublasFree(void* devicePtr);
98
+
99
+ cublasStatus CUBLASWINAPI cublasSetKernelStream(cudaStream_t stream);
100
+
101
+ /* ---------------- CUBLAS BLAS1 functions ---------------- */
102
+ /* NRM2 */
103
+ float CUBLASWINAPI cublasSnrm2(int n, const float* x, int incx);
104
+ double CUBLASWINAPI cublasDnrm2(int n, const double* x, int incx);
105
+ float CUBLASWINAPI cublasScnrm2(int n, const cuComplex* x, int incx);
106
+ double CUBLASWINAPI cublasDznrm2(int n, const cuDoubleComplex* x, int incx);
107
+ /*------------------------------------------------------------------------*/
108
+ /* DOT */
109
+ float CUBLASWINAPI cublasSdot(int n, const float* x, int incx, const float* y, int incy);
110
+ double CUBLASWINAPI cublasDdot(int n, const double* x, int incx, const double* y, int incy);
111
+ cuComplex CUBLASWINAPI cublasCdotu(int n, const cuComplex* x, int incx, const cuComplex* y, int incy);
112
+ cuComplex CUBLASWINAPI cublasCdotc(int n, const cuComplex* x, int incx, const cuComplex* y, int incy);
113
+ cuDoubleComplex CUBLASWINAPI cublasZdotu(int n, const cuDoubleComplex* x, int incx, const cuDoubleComplex* y, int incy);
114
+ cuDoubleComplex CUBLASWINAPI cublasZdotc(int n, const cuDoubleComplex* x, int incx, const cuDoubleComplex* y, int incy);
115
+ /*------------------------------------------------------------------------*/
116
+ /* SCAL */
117
+ void CUBLASWINAPI cublasSscal(int n, float alpha, float* x, int incx);
118
+ void CUBLASWINAPI cublasDscal(int n, double alpha, double* x, int incx);
119
+ void CUBLASWINAPI cublasCscal(int n, cuComplex alpha, cuComplex* x, int incx);
120
+ void CUBLASWINAPI cublasZscal(int n, cuDoubleComplex alpha, cuDoubleComplex* x, int incx);
121
+
122
+ void CUBLASWINAPI cublasCsscal(int n, float alpha, cuComplex* x, int incx);
123
+ void CUBLASWINAPI cublasZdscal(int n, double alpha, cuDoubleComplex* x, int incx);
124
+ /*------------------------------------------------------------------------*/
125
+ /* AXPY */
126
+ void CUBLASWINAPI cublasSaxpy(int n, float alpha, const float* x, int incx, float* y, int incy);
127
+ void CUBLASWINAPI cublasDaxpy(int n, double alpha, const double* x, int incx, double* y, int incy);
128
+ void CUBLASWINAPI cublasCaxpy(int n, cuComplex alpha, const cuComplex* x, int incx, cuComplex* y, int incy);
129
+ void CUBLASWINAPI
130
+ cublasZaxpy(int n, cuDoubleComplex alpha, const cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy);
131
+ /*------------------------------------------------------------------------*/
132
+ /* COPY */
133
+ void CUBLASWINAPI cublasScopy(int n, const float* x, int incx, float* y, int incy);
134
+ void CUBLASWINAPI cublasDcopy(int n, const double* x, int incx, double* y, int incy);
135
+ void CUBLASWINAPI cublasCcopy(int n, const cuComplex* x, int incx, cuComplex* y, int incy);
136
+ void CUBLASWINAPI cublasZcopy(int n, const cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy);
137
+ /*------------------------------------------------------------------------*/
138
+ /* SWAP */
139
+ void CUBLASWINAPI cublasSswap(int n, float* x, int incx, float* y, int incy);
140
+ void CUBLASWINAPI cublasDswap(int n, double* x, int incx, double* y, int incy);
141
+ void CUBLASWINAPI cublasCswap(int n, cuComplex* x, int incx, cuComplex* y, int incy);
142
+ void CUBLASWINAPI cublasZswap(int n, cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy);
143
+ /*------------------------------------------------------------------------*/
144
+ /* AMAX */
145
+ int CUBLASWINAPI cublasIsamax(int n, const float* x, int incx);
146
+ int CUBLASWINAPI cublasIdamax(int n, const double* x, int incx);
147
+ int CUBLASWINAPI cublasIcamax(int n, const cuComplex* x, int incx);
148
+ int CUBLASWINAPI cublasIzamax(int n, const cuDoubleComplex* x, int incx);
149
+ /*------------------------------------------------------------------------*/
150
+ /* AMIN */
151
+ int CUBLASWINAPI cublasIsamin(int n, const float* x, int incx);
152
+ int CUBLASWINAPI cublasIdamin(int n, const double* x, int incx);
153
+
154
+ int CUBLASWINAPI cublasIcamin(int n, const cuComplex* x, int incx);
155
+ int CUBLASWINAPI cublasIzamin(int n, const cuDoubleComplex* x, int incx);
156
+ /*------------------------------------------------------------------------*/
157
+ /* ASUM */
158
+ float CUBLASWINAPI cublasSasum(int n, const float* x, int incx);
159
+ double CUBLASWINAPI cublasDasum(int n, const double* x, int incx);
160
+ float CUBLASWINAPI cublasScasum(int n, const cuComplex* x, int incx);
161
+ double CUBLASWINAPI cublasDzasum(int n, const cuDoubleComplex* x, int incx);
162
+ /*------------------------------------------------------------------------*/
163
+ /* ROT */
164
+ void CUBLASWINAPI cublasSrot(int n, float* x, int incx, float* y, int incy, float sc, float ss);
165
+ void CUBLASWINAPI cublasDrot(int n, double* x, int incx, double* y, int incy, double sc, double ss);
166
+ void CUBLASWINAPI cublasCrot(int n, cuComplex* x, int incx, cuComplex* y, int incy, float c, cuComplex s);
167
+ void CUBLASWINAPI
168
+ cublasZrot(int n, cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy, double sc, cuDoubleComplex cs);
169
+ void CUBLASWINAPI cublasCsrot(int n, cuComplex* x, int incx, cuComplex* y, int incy, float c, float s);
170
+ void CUBLASWINAPI cublasZdrot(int n, cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy, double c, double s);
171
+ /*------------------------------------------------------------------------*/
172
+ /* ROTG */
173
+ void CUBLASWINAPI cublasSrotg(float* sa, float* sb, float* sc, float* ss);
174
+ void CUBLASWINAPI cublasDrotg(double* sa, double* sb, double* sc, double* ss);
175
+ void CUBLASWINAPI cublasCrotg(cuComplex* ca, cuComplex cb, float* sc, cuComplex* cs);
176
+ void CUBLASWINAPI cublasZrotg(cuDoubleComplex* ca, cuDoubleComplex cb, double* sc, cuDoubleComplex* cs);
177
+ /*------------------------------------------------------------------------*/
178
+ /* ROTM */
179
+ void CUBLASWINAPI cublasSrotm(int n, float* x, int incx, float* y, int incy, const float* sparam);
180
+ void CUBLASWINAPI cublasDrotm(int n, double* x, int incx, double* y, int incy, const double* sparam);
181
+ /*------------------------------------------------------------------------*/
182
+ /* ROTMG */
183
+ void CUBLASWINAPI cublasSrotmg(float* sd1, float* sd2, float* sx1, const float* sy1, float* sparam);
184
+ void CUBLASWINAPI cublasDrotmg(double* sd1, double* sd2, double* sx1, const double* sy1, double* sparam);
185
+
186
+ /* --------------- CUBLAS BLAS2 functions ---------------- */
187
+ /* GEMV */
188
+ void CUBLASWINAPI cublasSgemv(char trans,
189
+ int m,
190
+ int n,
191
+ float alpha,
192
+ const float* A,
193
+ int lda,
194
+ const float* x,
195
+ int incx,
196
+ float beta,
197
+ float* y,
198
+ int incy);
199
+ void CUBLASWINAPI cublasDgemv(char trans,
200
+ int m,
201
+ int n,
202
+ double alpha,
203
+ const double* A,
204
+ int lda,
205
+ const double* x,
206
+ int incx,
207
+ double beta,
208
+ double* y,
209
+ int incy);
210
+ void CUBLASWINAPI cublasCgemv(char trans,
211
+ int m,
212
+ int n,
213
+ cuComplex alpha,
214
+ const cuComplex* A,
215
+ int lda,
216
+ const cuComplex* x,
217
+ int incx,
218
+ cuComplex beta,
219
+ cuComplex* y,
220
+ int incy);
221
+ void CUBLASWINAPI cublasZgemv(char trans,
222
+ int m,
223
+ int n,
224
+ cuDoubleComplex alpha,
225
+ const cuDoubleComplex* A,
226
+ int lda,
227
+ const cuDoubleComplex* x,
228
+ int incx,
229
+ cuDoubleComplex beta,
230
+ cuDoubleComplex* y,
231
+ int incy);
232
+ /*------------------------------------------------------------------------*/
233
+ /* GBMV */
234
+ void CUBLASWINAPI cublasSgbmv(char trans,
235
+ int m,
236
+ int n,
237
+ int kl,
238
+ int ku,
239
+ float alpha,
240
+ const float* A,
241
+ int lda,
242
+ const float* x,
243
+ int incx,
244
+ float beta,
245
+ float* y,
246
+ int incy);
247
+ void CUBLASWINAPI cublasDgbmv(char trans,
248
+ int m,
249
+ int n,
250
+ int kl,
251
+ int ku,
252
+ double alpha,
253
+ const double* A,
254
+ int lda,
255
+ const double* x,
256
+ int incx,
257
+ double beta,
258
+ double* y,
259
+ int incy);
260
+ void CUBLASWINAPI cublasCgbmv(char trans,
261
+ int m,
262
+ int n,
263
+ int kl,
264
+ int ku,
265
+ cuComplex alpha,
266
+ const cuComplex* A,
267
+ int lda,
268
+ const cuComplex* x,
269
+ int incx,
270
+ cuComplex beta,
271
+ cuComplex* y,
272
+ int incy);
273
+ void CUBLASWINAPI cublasZgbmv(char trans,
274
+ int m,
275
+ int n,
276
+ int kl,
277
+ int ku,
278
+ cuDoubleComplex alpha,
279
+ const cuDoubleComplex* A,
280
+ int lda,
281
+ const cuDoubleComplex* x,
282
+ int incx,
283
+ cuDoubleComplex beta,
284
+ cuDoubleComplex* y,
285
+ int incy);
286
+ /*------------------------------------------------------------------------*/
287
+ /* TRMV */
288
+ void CUBLASWINAPI cublasStrmv(char uplo, char trans, char diag, int n, const float* A, int lda, float* x, int incx);
289
+ void CUBLASWINAPI cublasDtrmv(char uplo, char trans, char diag, int n, const double* A, int lda, double* x, int incx);
290
+ void CUBLASWINAPI
291
+ cublasCtrmv(char uplo, char trans, char diag, int n, const cuComplex* A, int lda, cuComplex* x, int incx);
292
+ void CUBLASWINAPI
293
+ cublasZtrmv(char uplo, char trans, char diag, int n, const cuDoubleComplex* A, int lda, cuDoubleComplex* x, int incx);
294
+ /*------------------------------------------------------------------------*/
295
+ /* TBMV */
296
+ void CUBLASWINAPI
297
+ cublasStbmv(char uplo, char trans, char diag, int n, int k, const float* A, int lda, float* x, int incx);
298
+ void CUBLASWINAPI
299
+ cublasDtbmv(char uplo, char trans, char diag, int n, int k, const double* A, int lda, double* x, int incx);
300
+ void CUBLASWINAPI
301
+ cublasCtbmv(char uplo, char trans, char diag, int n, int k, const cuComplex* A, int lda, cuComplex* x, int incx);
302
+ void CUBLASWINAPI cublasZtbmv(
303
+ char uplo, char trans, char diag, int n, int k, const cuDoubleComplex* A, int lda, cuDoubleComplex* x, int incx);
304
+ /*------------------------------------------------------------------------*/
305
+ /* TPMV */
306
+ void CUBLASWINAPI cublasStpmv(char uplo, char trans, char diag, int n, const float* AP, float* x, int incx);
307
+
308
+ void CUBLASWINAPI cublasDtpmv(char uplo, char trans, char diag, int n, const double* AP, double* x, int incx);
309
+
310
+ void CUBLASWINAPI cublasCtpmv(char uplo, char trans, char diag, int n, const cuComplex* AP, cuComplex* x, int incx);
311
+
312
+ void CUBLASWINAPI
313
+ cublasZtpmv(char uplo, char trans, char diag, int n, const cuDoubleComplex* AP, cuDoubleComplex* x, int incx);
314
+ /*------------------------------------------------------------------------*/
315
+ /* TRSV */
316
+ void CUBLASWINAPI cublasStrsv(char uplo, char trans, char diag, int n, const float* A, int lda, float* x, int incx);
317
+
318
+ void CUBLASWINAPI cublasDtrsv(char uplo, char trans, char diag, int n, const double* A, int lda, double* x, int incx);
319
+
320
+ void CUBLASWINAPI
321
+ cublasCtrsv(char uplo, char trans, char diag, int n, const cuComplex* A, int lda, cuComplex* x, int incx);
322
+
323
+ void CUBLASWINAPI
324
+ cublasZtrsv(char uplo, char trans, char diag, int n, const cuDoubleComplex* A, int lda, cuDoubleComplex* x, int incx);
325
+ /*------------------------------------------------------------------------*/
326
+ /* TPSV */
327
+ void CUBLASWINAPI cublasStpsv(char uplo, char trans, char diag, int n, const float* AP, float* x, int incx);
328
+
329
+ void CUBLASWINAPI cublasDtpsv(char uplo, char trans, char diag, int n, const double* AP, double* x, int incx);
330
+
331
+ void CUBLASWINAPI cublasCtpsv(char uplo, char trans, char diag, int n, const cuComplex* AP, cuComplex* x, int incx);
332
+
333
+ void CUBLASWINAPI
334
+ cublasZtpsv(char uplo, char trans, char diag, int n, const cuDoubleComplex* AP, cuDoubleComplex* x, int incx);
335
+ /*------------------------------------------------------------------------*/
336
+ /* TBSV */
337
+ void CUBLASWINAPI
338
+ cublasStbsv(char uplo, char trans, char diag, int n, int k, const float* A, int lda, float* x, int incx);
339
+
340
+ void CUBLASWINAPI
341
+ cublasDtbsv(char uplo, char trans, char diag, int n, int k, const double* A, int lda, double* x, int incx);
342
+ void CUBLASWINAPI
343
+ cublasCtbsv(char uplo, char trans, char diag, int n, int k, const cuComplex* A, int lda, cuComplex* x, int incx);
344
+
345
+ void CUBLASWINAPI cublasZtbsv(
346
+ char uplo, char trans, char diag, int n, int k, const cuDoubleComplex* A, int lda, cuDoubleComplex* x, int incx);
347
+ /*------------------------------------------------------------------------*/
348
+ /* SYMV/HEMV */
349
+ void CUBLASWINAPI cublasSsymv(
350
+ char uplo, int n, float alpha, const float* A, int lda, const float* x, int incx, float beta, float* y, int incy);
351
+ void CUBLASWINAPI cublasDsymv(char uplo,
352
+ int n,
353
+ double alpha,
354
+ const double* A,
355
+ int lda,
356
+ const double* x,
357
+ int incx,
358
+ double beta,
359
+ double* y,
360
+ int incy);
361
+ void CUBLASWINAPI cublasChemv(char uplo,
362
+ int n,
363
+ cuComplex alpha,
364
+ const cuComplex* A,
365
+ int lda,
366
+ const cuComplex* x,
367
+ int incx,
368
+ cuComplex beta,
369
+ cuComplex* y,
370
+ int incy);
371
+ void CUBLASWINAPI cublasZhemv(char uplo,
372
+ int n,
373
+ cuDoubleComplex alpha,
374
+ const cuDoubleComplex* A,
375
+ int lda,
376
+ const cuDoubleComplex* x,
377
+ int incx,
378
+ cuDoubleComplex beta,
379
+ cuDoubleComplex* y,
380
+ int incy);
381
+ /*------------------------------------------------------------------------*/
382
+ /* SBMV/HBMV */
383
+ void CUBLASWINAPI cublasSsbmv(char uplo,
384
+ int n,
385
+ int k,
386
+ float alpha,
387
+ const float* A,
388
+ int lda,
389
+ const float* x,
390
+ int incx,
391
+ float beta,
392
+ float* y,
393
+ int incy);
394
+ void CUBLASWINAPI cublasDsbmv(char uplo,
395
+ int n,
396
+ int k,
397
+ double alpha,
398
+ const double* A,
399
+ int lda,
400
+ const double* x,
401
+ int incx,
402
+ double beta,
403
+ double* y,
404
+ int incy);
405
+ void CUBLASWINAPI cublasChbmv(char uplo,
406
+ int n,
407
+ int k,
408
+ cuComplex alpha,
409
+ const cuComplex* A,
410
+ int lda,
411
+ const cuComplex* x,
412
+ int incx,
413
+ cuComplex beta,
414
+ cuComplex* y,
415
+ int incy);
416
+ void CUBLASWINAPI cublasZhbmv(char uplo,
417
+ int n,
418
+ int k,
419
+ cuDoubleComplex alpha,
420
+ const cuDoubleComplex* A,
421
+ int lda,
422
+ const cuDoubleComplex* x,
423
+ int incx,
424
+ cuDoubleComplex beta,
425
+ cuDoubleComplex* y,
426
+ int incy);
427
+ /*------------------------------------------------------------------------*/
428
+ /* SPMV/HPMV */
429
+ void CUBLASWINAPI
430
+ cublasSspmv(char uplo, int n, float alpha, const float* AP, const float* x, int incx, float beta, float* y, int incy);
431
+ void CUBLASWINAPI cublasDspmv(
432
+ char uplo, int n, double alpha, const double* AP, const double* x, int incx, double beta, double* y, int incy);
433
+ void CUBLASWINAPI cublasChpmv(char uplo,
434
+ int n,
435
+ cuComplex alpha,
436
+ const cuComplex* AP,
437
+ const cuComplex* x,
438
+ int incx,
439
+ cuComplex beta,
440
+ cuComplex* y,
441
+ int incy);
442
+ void CUBLASWINAPI cublasZhpmv(char uplo,
443
+ int n,
444
+ cuDoubleComplex alpha,
445
+ const cuDoubleComplex* AP,
446
+ const cuDoubleComplex* x,
447
+ int incx,
448
+ cuDoubleComplex beta,
449
+ cuDoubleComplex* y,
450
+ int incy);
451
+
452
+ /*------------------------------------------------------------------------*/
453
+ /* GER */
454
+ void CUBLASWINAPI
455
+ cublasSger(int m, int n, float alpha, const float* x, int incx, const float* y, int incy, float* A, int lda);
456
+ void CUBLASWINAPI
457
+ cublasDger(int m, int n, double alpha, const double* x, int incx, const double* y, int incy, double* A, int lda);
458
+
459
+ void CUBLASWINAPI cublasCgeru(
460
+ int m, int n, cuComplex alpha, const cuComplex* x, int incx, const cuComplex* y, int incy, cuComplex* A, int lda);
461
+ void CUBLASWINAPI cublasCgerc(
462
+ int m, int n, cuComplex alpha, const cuComplex* x, int incx, const cuComplex* y, int incy, cuComplex* A, int lda);
463
+ void CUBLASWINAPI cublasZgeru(int m,
464
+ int n,
465
+ cuDoubleComplex alpha,
466
+ const cuDoubleComplex* x,
467
+ int incx,
468
+ const cuDoubleComplex* y,
469
+ int incy,
470
+ cuDoubleComplex* A,
471
+ int lda);
472
+ void CUBLASWINAPI cublasZgerc(int m,
473
+ int n,
474
+ cuDoubleComplex alpha,
475
+ const cuDoubleComplex* x,
476
+ int incx,
477
+ const cuDoubleComplex* y,
478
+ int incy,
479
+ cuDoubleComplex* A,
480
+ int lda);
481
+ /*------------------------------------------------------------------------*/
482
+ /* SYR/HER */
483
+ void CUBLASWINAPI cublasSsyr(char uplo, int n, float alpha, const float* x, int incx, float* A, int lda);
484
+ void CUBLASWINAPI cublasDsyr(char uplo, int n, double alpha, const double* x, int incx, double* A, int lda);
485
+
486
+ void CUBLASWINAPI cublasCher(char uplo, int n, float alpha, const cuComplex* x, int incx, cuComplex* A, int lda);
487
+ void CUBLASWINAPI
488
+ cublasZher(char uplo, int n, double alpha, const cuDoubleComplex* x, int incx, cuDoubleComplex* A, int lda);
489
+
490
+ /*------------------------------------------------------------------------*/
491
+ /* SPR/HPR */
492
+ void CUBLASWINAPI cublasSspr(char uplo, int n, float alpha, const float* x, int incx, float* AP);
493
+ void CUBLASWINAPI cublasDspr(char uplo, int n, double alpha, const double* x, int incx, double* AP);
494
+ void CUBLASWINAPI cublasChpr(char uplo, int n, float alpha, const cuComplex* x, int incx, cuComplex* AP);
495
+ void CUBLASWINAPI cublasZhpr(char uplo, int n, double alpha, const cuDoubleComplex* x, int incx, cuDoubleComplex* AP);
496
+ /*------------------------------------------------------------------------*/
497
+ /* SYR2/HER2 */
498
+ void CUBLASWINAPI
499
+ cublasSsyr2(char uplo, int n, float alpha, const float* x, int incx, const float* y, int incy, float* A, int lda);
500
+ void CUBLASWINAPI
501
+ cublasDsyr2(char uplo, int n, double alpha, const double* x, int incx, const double* y, int incy, double* A, int lda);
502
+ void CUBLASWINAPI cublasCher2(char uplo,
503
+ int n,
504
+ cuComplex alpha,
505
+ const cuComplex* x,
506
+ int incx,
507
+ const cuComplex* y,
508
+ int incy,
509
+ cuComplex* A,
510
+ int lda);
511
+ void CUBLASWINAPI cublasZher2(char uplo,
512
+ int n,
513
+ cuDoubleComplex alpha,
514
+ const cuDoubleComplex* x,
515
+ int incx,
516
+ const cuDoubleComplex* y,
517
+ int incy,
518
+ cuDoubleComplex* A,
519
+ int lda);
520
+
521
+ /*------------------------------------------------------------------------*/
522
+ /* SPR2/HPR2 */
523
+ void CUBLASWINAPI
524
+ cublasSspr2(char uplo, int n, float alpha, const float* x, int incx, const float* y, int incy, float* AP);
525
+ void CUBLASWINAPI
526
+ cublasDspr2(char uplo, int n, double alpha, const double* x, int incx, const double* y, int incy, double* AP);
527
+ void CUBLASWINAPI cublasChpr2(
528
+ char uplo, int n, cuComplex alpha, const cuComplex* x, int incx, const cuComplex* y, int incy, cuComplex* AP);
529
+ void CUBLASWINAPI cublasZhpr2(char uplo,
530
+ int n,
531
+ cuDoubleComplex alpha,
532
+ const cuDoubleComplex* x,
533
+ int incx,
534
+ const cuDoubleComplex* y,
535
+ int incy,
536
+ cuDoubleComplex* AP);
537
+ /* ------------------------BLAS3 Functions ------------------------------- */
538
+ /* GEMM */
539
+ void CUBLASWINAPI cublasSgemm(char transa,
540
+ char transb,
541
+ int m,
542
+ int n,
543
+ int k,
544
+ float alpha,
545
+ const float* A,
546
+ int lda,
547
+ const float* B,
548
+ int ldb,
549
+ float beta,
550
+ float* C,
551
+ int ldc);
552
+ void CUBLASWINAPI cublasDgemm(char transa,
553
+ char transb,
554
+ int m,
555
+ int n,
556
+ int k,
557
+ double alpha,
558
+ const double* A,
559
+ int lda,
560
+ const double* B,
561
+ int ldb,
562
+ double beta,
563
+ double* C,
564
+ int ldc);
565
+ void CUBLASWINAPI cublasCgemm(char transa,
566
+ char transb,
567
+ int m,
568
+ int n,
569
+ int k,
570
+ cuComplex alpha,
571
+ const cuComplex* A,
572
+ int lda,
573
+ const cuComplex* B,
574
+ int ldb,
575
+ cuComplex beta,
576
+ cuComplex* C,
577
+ int ldc);
578
+ void CUBLASWINAPI cublasZgemm(char transa,
579
+ char transb,
580
+ int m,
581
+ int n,
582
+ int k,
583
+ cuDoubleComplex alpha,
584
+ const cuDoubleComplex* A,
585
+ int lda,
586
+ const cuDoubleComplex* B,
587
+ int ldb,
588
+ cuDoubleComplex beta,
589
+ cuDoubleComplex* C,
590
+ int ldc);
591
+ /* -------------------------------------------------------*/
592
+ /* SYRK */
593
+ void CUBLASWINAPI
594
+ cublasSsyrk(char uplo, char trans, int n, int k, float alpha, const float* A, int lda, float beta, float* C, int ldc);
595
+ void CUBLASWINAPI cublasDsyrk(
596
+ char uplo, char trans, int n, int k, double alpha, const double* A, int lda, double beta, double* C, int ldc);
597
+
598
+ void CUBLASWINAPI cublasCsyrk(char uplo,
599
+ char trans,
600
+ int n,
601
+ int k,
602
+ cuComplex alpha,
603
+ const cuComplex* A,
604
+ int lda,
605
+ cuComplex beta,
606
+ cuComplex* C,
607
+ int ldc);
608
+ void CUBLASWINAPI cublasZsyrk(char uplo,
609
+ char trans,
610
+ int n,
611
+ int k,
612
+ cuDoubleComplex alpha,
613
+ const cuDoubleComplex* A,
614
+ int lda,
615
+ cuDoubleComplex beta,
616
+ cuDoubleComplex* C,
617
+ int ldc);
618
+ /* ------------------------------------------------------- */
619
+ /* HERK */
620
+ void CUBLASWINAPI cublasCherk(
621
+ char uplo, char trans, int n, int k, float alpha, const cuComplex* A, int lda, float beta, cuComplex* C, int ldc);
622
+ void CUBLASWINAPI cublasZherk(char uplo,
623
+ char trans,
624
+ int n,
625
+ int k,
626
+ double alpha,
627
+ const cuDoubleComplex* A,
628
+ int lda,
629
+ double beta,
630
+ cuDoubleComplex* C,
631
+ int ldc);
632
+ /* ------------------------------------------------------- */
633
+ /* SYR2K */
634
+ void CUBLASWINAPI cublasSsyr2k(char uplo,
635
+ char trans,
636
+ int n,
637
+ int k,
638
+ float alpha,
639
+ const float* A,
640
+ int lda,
641
+ const float* B,
642
+ int ldb,
643
+ float beta,
644
+ float* C,
645
+ int ldc);
646
+
647
+ void CUBLASWINAPI cublasDsyr2k(char uplo,
648
+ char trans,
649
+ int n,
650
+ int k,
651
+ double alpha,
652
+ const double* A,
653
+ int lda,
654
+ const double* B,
655
+ int ldb,
656
+ double beta,
657
+ double* C,
658
+ int ldc);
659
+ void CUBLASWINAPI cublasCsyr2k(char uplo,
660
+ char trans,
661
+ int n,
662
+ int k,
663
+ cuComplex alpha,
664
+ const cuComplex* A,
665
+ int lda,
666
+ const cuComplex* B,
667
+ int ldb,
668
+ cuComplex beta,
669
+ cuComplex* C,
670
+ int ldc);
671
+
672
+ void CUBLASWINAPI cublasZsyr2k(char uplo,
673
+ char trans,
674
+ int n,
675
+ int k,
676
+ cuDoubleComplex alpha,
677
+ const cuDoubleComplex* A,
678
+ int lda,
679
+ const cuDoubleComplex* B,
680
+ int ldb,
681
+ cuDoubleComplex beta,
682
+ cuDoubleComplex* C,
683
+ int ldc);
684
+ /* ------------------------------------------------------- */
685
+ /* HER2K */
686
+ void CUBLASWINAPI cublasCher2k(char uplo,
687
+ char trans,
688
+ int n,
689
+ int k,
690
+ cuComplex alpha,
691
+ const cuComplex* A,
692
+ int lda,
693
+ const cuComplex* B,
694
+ int ldb,
695
+ float beta,
696
+ cuComplex* C,
697
+ int ldc);
698
+
699
+ void CUBLASWINAPI cublasZher2k(char uplo,
700
+ char trans,
701
+ int n,
702
+ int k,
703
+ cuDoubleComplex alpha,
704
+ const cuDoubleComplex* A,
705
+ int lda,
706
+ const cuDoubleComplex* B,
707
+ int ldb,
708
+ double beta,
709
+ cuDoubleComplex* C,
710
+ int ldc);
711
+
712
+ /*------------------------------------------------------------------------*/
713
+ /* SYMM*/
714
+ void CUBLASWINAPI cublasSsymm(char side,
715
+ char uplo,
716
+ int m,
717
+ int n,
718
+ float alpha,
719
+ const float* A,
720
+ int lda,
721
+ const float* B,
722
+ int ldb,
723
+ float beta,
724
+ float* C,
725
+ int ldc);
726
+ void CUBLASWINAPI cublasDsymm(char side,
727
+ char uplo,
728
+ int m,
729
+ int n,
730
+ double alpha,
731
+ const double* A,
732
+ int lda,
733
+ const double* B,
734
+ int ldb,
735
+ double beta,
736
+ double* C,
737
+ int ldc);
738
+
739
+ void CUBLASWINAPI cublasCsymm(char side,
740
+ char uplo,
741
+ int m,
742
+ int n,
743
+ cuComplex alpha,
744
+ const cuComplex* A,
745
+ int lda,
746
+ const cuComplex* B,
747
+ int ldb,
748
+ cuComplex beta,
749
+ cuComplex* C,
750
+ int ldc);
751
+
752
+ void CUBLASWINAPI cublasZsymm(char side,
753
+ char uplo,
754
+ int m,
755
+ int n,
756
+ cuDoubleComplex alpha,
757
+ const cuDoubleComplex* A,
758
+ int lda,
759
+ const cuDoubleComplex* B,
760
+ int ldb,
761
+ cuDoubleComplex beta,
762
+ cuDoubleComplex* C,
763
+ int ldc);
764
+ /*------------------------------------------------------------------------*/
765
+ /* HEMM*/
766
+ void CUBLASWINAPI cublasChemm(char side,
767
+ char uplo,
768
+ int m,
769
+ int n,
770
+ cuComplex alpha,
771
+ const cuComplex* A,
772
+ int lda,
773
+ const cuComplex* B,
774
+ int ldb,
775
+ cuComplex beta,
776
+ cuComplex* C,
777
+ int ldc);
778
+ void CUBLASWINAPI cublasZhemm(char side,
779
+ char uplo,
780
+ int m,
781
+ int n,
782
+ cuDoubleComplex alpha,
783
+ const cuDoubleComplex* A,
784
+ int lda,
785
+ const cuDoubleComplex* B,
786
+ int ldb,
787
+ cuDoubleComplex beta,
788
+ cuDoubleComplex* C,
789
+ int ldc);
790
+
791
+ /*------------------------------------------------------------------------*/
792
+ /* TRSM*/
793
+ void CUBLASWINAPI cublasStrsm(char side,
794
+ char uplo,
795
+ char transa,
796
+ char diag,
797
+ int m,
798
+ int n,
799
+ float alpha,
800
+ const float* A,
801
+ int lda,
802
+ float* B,
803
+ int ldb);
804
+
805
+ void CUBLASWINAPI cublasDtrsm(char side,
806
+ char uplo,
807
+ char transa,
808
+ char diag,
809
+ int m,
810
+ int n,
811
+ double alpha,
812
+ const double* A,
813
+ int lda,
814
+ double* B,
815
+ int ldb);
816
+
817
+ void CUBLASWINAPI cublasCtrsm(char side,
818
+ char uplo,
819
+ char transa,
820
+ char diag,
821
+ int m,
822
+ int n,
823
+ cuComplex alpha,
824
+ const cuComplex* A,
825
+ int lda,
826
+ cuComplex* B,
827
+ int ldb);
828
+
829
+ void CUBLASWINAPI cublasZtrsm(char side,
830
+ char uplo,
831
+ char transa,
832
+ char diag,
833
+ int m,
834
+ int n,
835
+ cuDoubleComplex alpha,
836
+ const cuDoubleComplex* A,
837
+ int lda,
838
+ cuDoubleComplex* B,
839
+ int ldb);
840
+ /*------------------------------------------------------------------------*/
841
+ /* TRMM*/
842
+ void CUBLASWINAPI cublasStrmm(char side,
843
+ char uplo,
844
+ char transa,
845
+ char diag,
846
+ int m,
847
+ int n,
848
+ float alpha,
849
+ const float* A,
850
+ int lda,
851
+ float* B,
852
+ int ldb);
853
+ void CUBLASWINAPI cublasDtrmm(char side,
854
+ char uplo,
855
+ char transa,
856
+ char diag,
857
+ int m,
858
+ int n,
859
+ double alpha,
860
+ const double* A,
861
+ int lda,
862
+ double* B,
863
+ int ldb);
864
+ void CUBLASWINAPI cublasCtrmm(char side,
865
+ char uplo,
866
+ char transa,
867
+ char diag,
868
+ int m,
869
+ int n,
870
+ cuComplex alpha,
871
+ const cuComplex* A,
872
+ int lda,
873
+ cuComplex* B,
874
+ int ldb);
875
+ void CUBLASWINAPI cublasZtrmm(char side,
876
+ char uplo,
877
+ char transa,
878
+ char diag,
879
+ int m,
880
+ int n,
881
+ cuDoubleComplex alpha,
882
+ const cuDoubleComplex* A,
883
+ int lda,
884
+ cuDoubleComplex* B,
885
+ int ldb);
886
+
887
+ #if defined(__cplusplus)
888
+ }
889
+ #endif /* __cplusplus */
890
+
891
+ #endif /* !defined(CUBLAS_H_) */
llmeval-env/lib/python3.10/site-packages/nvidia/cublas/include/cublasLt.h ADDED
@@ -0,0 +1,1815 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2022 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+ #pragma once
50
+
51
+ #ifndef CUBLASAPI
52
+ #ifdef __CUDACC__
53
+ #define CUBLASAPI __host__ __device__
54
+ #else
55
+ #define CUBLASAPI
56
+ #endif
57
+ #endif
58
+
59
+ #include <cublas_api.h>
60
+
61
+ #include <stdint.h>
62
+ #include <stddef.h>
63
+ #include <stdio.h>
64
+
65
+ #if defined(__cplusplus)
66
+ extern "C" {
67
+ #endif /* __cplusplus */
68
+
69
+ /** Opaque structure holding CUBLASLT context
70
+ */
71
+ typedef struct cublasLtContext* cublasLtHandle_t;
72
+
73
+ cublasStatus_t CUBLASWINAPI cublasLtCreate(cublasLtHandle_t* lightHandle);
74
+
75
+ cublasStatus_t CUBLASWINAPI cublasLtDestroy(cublasLtHandle_t lightHandle);
76
+
77
+ const char* CUBLASWINAPI cublasLtGetStatusName(cublasStatus_t status);
78
+
79
+ const char* CUBLASWINAPI cublasLtGetStatusString(cublasStatus_t status);
80
+
81
+ size_t CUBLASWINAPI cublasLtGetVersion(void);
82
+
83
+ size_t CUBLASWINAPI cublasLtGetCudartVersion(void);
84
+
85
+ cublasStatus_t CUBLASWINAPI cublasLtGetProperty(libraryPropertyType type, int* value);
86
+
87
+ cublasStatus_t CUBLASWINAPI cublasLtHeuristicsCacheGetCapacity(size_t* capacity);
88
+ cublasStatus_t CUBLASWINAPI cublasLtHeuristicsCacheSetCapacity(size_t capacity);
89
+
90
+ /** Restricts usage of CPU instructions (ISA) specified by the flags in the mask.
91
+ *
92
+ * Flags can be combined with bitwise OR(|) operator. Supported flags:
93
+ * - 0x1 -- x86-64 AVX512 ISA
94
+ *
95
+ * Default mask: 0 (any applicable ISA is allowed).
96
+ *
97
+ * The function returns the previous value of the mask.
98
+ * The function takes precedence over the environment variable CUBLASLT_DISABLE_CPU_INSTRUCTIONS_MASK.
99
+ */
100
+ unsigned CUBLASWINAPI cublasLtDisableCpuInstructionsSetMask(unsigned mask);
101
+
102
+ /** Semi-opaque descriptor for matrix memory layout
103
+ */
104
+ typedef struct {
105
+ uint64_t data[8];
106
+ } cublasLtMatrixLayoutOpaque_t;
107
+
108
+ /** Opaque descriptor for matrix memory layout
109
+ */
110
+ typedef cublasLtMatrixLayoutOpaque_t* cublasLtMatrixLayout_t;
111
+
112
+ /** Semi-opaque algorithm descriptor (to avoid complicated alloc/free schemes)
113
+ *
114
+ * This structure can be trivially serialized and later restored for use with the same version of cuBLAS library to save
115
+ * on selecting the right configuration again.
116
+ */
117
+ typedef struct {
118
+ uint64_t data[8];
119
+ } cublasLtMatmulAlgo_t;
120
+
121
+ /** Semi-opaque descriptor for cublasLtMatmul() operation details
122
+ */
123
+ typedef struct {
124
+ uint64_t data[23];
125
+ } cublasLtMatmulDescOpaque_t;
126
+
127
+ /** Opaque descriptor for cublasLtMatmul() operation details
128
+ */
129
+ typedef cublasLtMatmulDescOpaque_t* cublasLtMatmulDesc_t;
130
+
131
+ /** Semi-opaque descriptor for cublasLtMatrixTransform() operation details
132
+ */
133
+ typedef struct {
134
+ uint64_t data[8];
135
+ } cublasLtMatrixTransformDescOpaque_t;
136
+
137
+ /** Opaque descriptor for cublasLtMatrixTransform() operation details
138
+ */
139
+ typedef cublasLtMatrixTransformDescOpaque_t* cublasLtMatrixTransformDesc_t;
140
+
141
+ /** Semi-opaque descriptor for cublasLtMatmulPreference() operation details
142
+ */
143
+ typedef struct {
144
+ uint64_t data[8];
145
+ } cublasLtMatmulPreferenceOpaque_t;
146
+
147
+ /** Opaque descriptor for cublasLtMatmulAlgoGetHeuristic() configuration
148
+ */
149
+ typedef cublasLtMatmulPreferenceOpaque_t* cublasLtMatmulPreference_t;
150
+
151
+ /** Tile size (in C/D matrix Rows x Cols)
152
+ *
153
+ * General order of tile IDs is sorted by size first and by first dimension second.
154
+ */
155
+ typedef enum {
156
+ CUBLASLT_MATMUL_TILE_UNDEFINED = 0,
157
+ CUBLASLT_MATMUL_TILE_8x8 = 1,
158
+ CUBLASLT_MATMUL_TILE_8x16 = 2,
159
+ CUBLASLT_MATMUL_TILE_16x8 = 3,
160
+ CUBLASLT_MATMUL_TILE_8x32 = 4,
161
+ CUBLASLT_MATMUL_TILE_16x16 = 5,
162
+ CUBLASLT_MATMUL_TILE_32x8 = 6,
163
+ CUBLASLT_MATMUL_TILE_8x64 = 7,
164
+ CUBLASLT_MATMUL_TILE_16x32 = 8,
165
+ CUBLASLT_MATMUL_TILE_32x16 = 9,
166
+ CUBLASLT_MATMUL_TILE_64x8 = 10,
167
+ CUBLASLT_MATMUL_TILE_32x32 = 11,
168
+ CUBLASLT_MATMUL_TILE_32x64 = 12,
169
+ CUBLASLT_MATMUL_TILE_64x32 = 13,
170
+ CUBLASLT_MATMUL_TILE_32x128 = 14,
171
+ CUBLASLT_MATMUL_TILE_64x64 = 15,
172
+ CUBLASLT_MATMUL_TILE_128x32 = 16,
173
+ CUBLASLT_MATMUL_TILE_64x128 = 17,
174
+ CUBLASLT_MATMUL_TILE_128x64 = 18,
175
+ CUBLASLT_MATMUL_TILE_64x256 = 19,
176
+ CUBLASLT_MATMUL_TILE_128x128 = 20,
177
+ CUBLASLT_MATMUL_TILE_256x64 = 21,
178
+ CUBLASLT_MATMUL_TILE_64x512 = 22,
179
+ CUBLASLT_MATMUL_TILE_128x256 = 23,
180
+ CUBLASLT_MATMUL_TILE_256x128 = 24,
181
+ CUBLASLT_MATMUL_TILE_512x64 = 25,
182
+ CUBLASLT_MATMUL_TILE_64x96 = 26,
183
+ CUBLASLT_MATMUL_TILE_96x64 = 27,
184
+ CUBLASLT_MATMUL_TILE_96x128 = 28,
185
+ CUBLASLT_MATMUL_TILE_128x160 = 29,
186
+ CUBLASLT_MATMUL_TILE_160x128 = 30,
187
+ CUBLASLT_MATMUL_TILE_192x128 = 31,
188
+ CUBLASLT_MATMUL_TILE_128x192 = 32,
189
+ CUBLASLT_MATMUL_TILE_128x96 = 33,
190
+ CUBLASLT_MATMUL_TILE_32x256 = 34,
191
+ CUBLASLT_MATMUL_TILE_256x32 = 35,
192
+ CUBLASLT_MATMUL_TILE_END
193
+ } cublasLtMatmulTile_t;
194
+
195
+ /** Size and number of stages in which elements are read into shared memory
196
+ *
197
+ * General order of stages IDs is sorted by stage size first and by number of stages second.
198
+ */
199
+ typedef enum {
200
+ CUBLASLT_MATMUL_STAGES_UNDEFINED = 0,
201
+ CUBLASLT_MATMUL_STAGES_16x1 = 1,
202
+ CUBLASLT_MATMUL_STAGES_16x2 = 2,
203
+ CUBLASLT_MATMUL_STAGES_16x3 = 3,
204
+ CUBLASLT_MATMUL_STAGES_16x4 = 4,
205
+ CUBLASLT_MATMUL_STAGES_16x5 = 5,
206
+ CUBLASLT_MATMUL_STAGES_16x6 = 6,
207
+ CUBLASLT_MATMUL_STAGES_32x1 = 7,
208
+ CUBLASLT_MATMUL_STAGES_32x2 = 8,
209
+ CUBLASLT_MATMUL_STAGES_32x3 = 9,
210
+ CUBLASLT_MATMUL_STAGES_32x4 = 10,
211
+ CUBLASLT_MATMUL_STAGES_32x5 = 11,
212
+ CUBLASLT_MATMUL_STAGES_32x6 = 12,
213
+ CUBLASLT_MATMUL_STAGES_64x1 = 13,
214
+ CUBLASLT_MATMUL_STAGES_64x2 = 14,
215
+ CUBLASLT_MATMUL_STAGES_64x3 = 15,
216
+ CUBLASLT_MATMUL_STAGES_64x4 = 16,
217
+ CUBLASLT_MATMUL_STAGES_64x5 = 17,
218
+ CUBLASLT_MATMUL_STAGES_64x6 = 18,
219
+ CUBLASLT_MATMUL_STAGES_128x1 = 19,
220
+ CUBLASLT_MATMUL_STAGES_128x2 = 20,
221
+ CUBLASLT_MATMUL_STAGES_128x3 = 21,
222
+ CUBLASLT_MATMUL_STAGES_128x4 = 22,
223
+ CUBLASLT_MATMUL_STAGES_128x5 = 23,
224
+ CUBLASLT_MATMUL_STAGES_128x6 = 24,
225
+ CUBLASLT_MATMUL_STAGES_32x10 = 25,
226
+ CUBLASLT_MATMUL_STAGES_8x4 = 26,
227
+ CUBLASLT_MATMUL_STAGES_16x10 = 27,
228
+ CUBLASLT_MATMUL_STAGES_8x5 = 28,
229
+ CUBLASLT_MATMUL_STAGES_8x3 = 31,
230
+ CUBLASLT_MATMUL_STAGES_8xAUTO = 32,
231
+ CUBLASLT_MATMUL_STAGES_16xAUTO = 33,
232
+ CUBLASLT_MATMUL_STAGES_32xAUTO = 34,
233
+ CUBLASLT_MATMUL_STAGES_64xAUTO = 35,
234
+ CUBLASLT_MATMUL_STAGES_128xAUTO = 36,
235
+ CUBLASLT_MATMUL_STAGES_END
236
+ } cublasLtMatmulStages_t;
237
+
238
+ /** Thread Block Cluster size
239
+ *
240
+ * Typically dimensioned similar to cublasLtMatmulTile_t, with the third coordinate unused at this time.
241
+ */
242
+ typedef enum {
243
+ /** Let library pick cluster shape automatically */
244
+ CUBLASLT_CLUSTER_SHAPE_AUTO = 0,
245
+ CUBLASLT_CLUSTER_SHAPE_1x1x1 = 2,
246
+ CUBLASLT_CLUSTER_SHAPE_2x1x1 = 3,
247
+ CUBLASLT_CLUSTER_SHAPE_4x1x1 = 4,
248
+ CUBLASLT_CLUSTER_SHAPE_1x2x1 = 5,
249
+ CUBLASLT_CLUSTER_SHAPE_2x2x1 = 6,
250
+ CUBLASLT_CLUSTER_SHAPE_4x2x1 = 7,
251
+ CUBLASLT_CLUSTER_SHAPE_1x4x1 = 8,
252
+ CUBLASLT_CLUSTER_SHAPE_2x4x1 = 9,
253
+ CUBLASLT_CLUSTER_SHAPE_4x4x1 = 10,
254
+ CUBLASLT_CLUSTER_SHAPE_8x1x1 = 11,
255
+ CUBLASLT_CLUSTER_SHAPE_1x8x1 = 12,
256
+ CUBLASLT_CLUSTER_SHAPE_8x2x1 = 13,
257
+ CUBLASLT_CLUSTER_SHAPE_2x8x1 = 14,
258
+ CUBLASLT_CLUSTER_SHAPE_16x1x1 = 15,
259
+ CUBLASLT_CLUSTER_SHAPE_1x16x1 = 16,
260
+ CUBLASLT_CLUSTER_SHAPE_3x1x1 = 17,
261
+ CUBLASLT_CLUSTER_SHAPE_5x1x1 = 18,
262
+ CUBLASLT_CLUSTER_SHAPE_6x1x1 = 19,
263
+ CUBLASLT_CLUSTER_SHAPE_7x1x1 = 20,
264
+ CUBLASLT_CLUSTER_SHAPE_9x1x1 = 21,
265
+ CUBLASLT_CLUSTER_SHAPE_10x1x1 = 22,
266
+ CUBLASLT_CLUSTER_SHAPE_11x1x1 = 23,
267
+ CUBLASLT_CLUSTER_SHAPE_12x1x1 = 24,
268
+ CUBLASLT_CLUSTER_SHAPE_13x1x1 = 25,
269
+ CUBLASLT_CLUSTER_SHAPE_14x1x1 = 26,
270
+ CUBLASLT_CLUSTER_SHAPE_15x1x1 = 27,
271
+ CUBLASLT_CLUSTER_SHAPE_3x2x1 = 28,
272
+ CUBLASLT_CLUSTER_SHAPE_5x2x1 = 29,
273
+ CUBLASLT_CLUSTER_SHAPE_6x2x1 = 30,
274
+ CUBLASLT_CLUSTER_SHAPE_7x2x1 = 31,
275
+ CUBLASLT_CLUSTER_SHAPE_1x3x1 = 32,
276
+ CUBLASLT_CLUSTER_SHAPE_2x3x1 = 33,
277
+ CUBLASLT_CLUSTER_SHAPE_3x3x1 = 34,
278
+ CUBLASLT_CLUSTER_SHAPE_4x3x1 = 35,
279
+ CUBLASLT_CLUSTER_SHAPE_5x3x1 = 36,
280
+ CUBLASLT_CLUSTER_SHAPE_3x4x1 = 37,
281
+ CUBLASLT_CLUSTER_SHAPE_1x5x1 = 38,
282
+ CUBLASLT_CLUSTER_SHAPE_2x5x1 = 39,
283
+ CUBLASLT_CLUSTER_SHAPE_3x5x1 = 40,
284
+ CUBLASLT_CLUSTER_SHAPE_1x6x1 = 41,
285
+ CUBLASLT_CLUSTER_SHAPE_2x6x1 = 42,
286
+ CUBLASLT_CLUSTER_SHAPE_1x7x1 = 43,
287
+ CUBLASLT_CLUSTER_SHAPE_2x7x1 = 44,
288
+ CUBLASLT_CLUSTER_SHAPE_1x9x1 = 45,
289
+ CUBLASLT_CLUSTER_SHAPE_1x10x1 = 46,
290
+ CUBLASLT_CLUSTER_SHAPE_1x11x1 = 47,
291
+ CUBLASLT_CLUSTER_SHAPE_1x12x1 = 48,
292
+ CUBLASLT_CLUSTER_SHAPE_1x13x1 = 49,
293
+ CUBLASLT_CLUSTER_SHAPE_1x14x1 = 50,
294
+ CUBLASLT_CLUSTER_SHAPE_1x15x1 = 51,
295
+ CUBLASLT_CLUSTER_SHAPE_END
296
+ } cublasLtClusterShape_t;
297
+
298
+ /** Inner size of the kernel
299
+ *
300
+ * Represents various aspects of internal kernel design, that don't impact CUDA grid size but may have other more subtle
301
+ * effects.
302
+ *
303
+ */
304
+ typedef enum {
305
+ CUBLASLT_MATMUL_INNER_SHAPE_UNDEFINED = 0,
306
+ CUBLASLT_MATMUL_INNER_SHAPE_MMA884 = 1,
307
+ CUBLASLT_MATMUL_INNER_SHAPE_MMA1684 = 2,
308
+ CUBLASLT_MATMUL_INNER_SHAPE_MMA1688 = 3,
309
+ CUBLASLT_MATMUL_INNER_SHAPE_MMA16816 = 4,
310
+ CUBLASLT_MATMUL_INNER_SHAPE_END
311
+ } cublasLtMatmulInnerShape_t;
312
+
313
+ /** Pointer mode to use for alpha/beta */
314
+ typedef enum {
315
+ /** matches CUBLAS_POINTER_MODE_HOST, pointer targets a single value host memory */
316
+ CUBLASLT_POINTER_MODE_HOST = CUBLAS_POINTER_MODE_HOST,
317
+ /** matches CUBLAS_POINTER_MODE_DEVICE, pointer targets a single value device memory */
318
+ CUBLASLT_POINTER_MODE_DEVICE = CUBLAS_POINTER_MODE_DEVICE,
319
+ /** pointer targets an array in device memory */
320
+ CUBLASLT_POINTER_MODE_DEVICE_VECTOR = 2,
321
+ /** alpha pointer targets an array in device memory, beta is zero. Note:
322
+ CUBLASLT_MATMUL_DESC_ALPHA_VECTOR_BATCH_STRIDE is not supported, must be 0. */
323
+ CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_ZERO = 3,
324
+ /** alpha pointer targets an array in device memory, beta is a single value in host memory. */
325
+ CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_HOST = 4,
326
+ } cublasLtPointerMode_t;
327
+
328
+ /** Mask to define pointer mode capability */
329
+ typedef enum {
330
+ /** see CUBLASLT_POINTER_MODE_HOST */
331
+ CUBLASLT_POINTER_MODE_MASK_HOST = 1,
332
+ /** see CUBLASLT_POINTER_MODE_DEVICE */
333
+ CUBLASLT_POINTER_MODE_MASK_DEVICE = 2,
334
+ /** see CUBLASLT_POINTER_MODE_DEVICE_VECTOR */
335
+ CUBLASLT_POINTER_MODE_MASK_DEVICE_VECTOR = 4,
336
+ /** see CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_ZERO */
337
+ CUBLASLT_POINTER_MODE_MASK_ALPHA_DEVICE_VECTOR_BETA_ZERO = 8,
338
+ /** see CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_HOST */
339
+ CUBLASLT_POINTER_MODE_MASK_ALPHA_DEVICE_VECTOR_BETA_HOST = 16,
340
+ } cublasLtPointerModeMask_t;
341
+
342
+ /** Implementation details that may affect numerical behavior of algorithms. */
343
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_FMA (0x01ull << 0)
344
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_HMMA (0x02ull << 0)
345
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_IMMA (0x04ull << 0)
346
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_DMMA (0x08ull << 0)
347
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_TENSOR_OP_MASK (0xfeull << 0)
348
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_OP_TYPE_MASK (0xffull << 0)
349
+
350
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_16F (0x01ull << 8)
351
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_32F (0x02ull << 8)
352
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_64F (0x04ull << 8)
353
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_32I (0x08ull << 8)
354
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_TYPE_MASK (0xffull << 8)
355
+
356
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_16F (0x01ull << 16)
357
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_16BF (0x02ull << 16)
358
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_TF32 (0x04ull << 16)
359
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_32F (0x08ull << 16)
360
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_64F (0x10ull << 16)
361
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_8I (0x20ull << 16)
362
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_8F_E4M3 (0x40ull << 16)
363
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_8F_E5M2 (0x80ull << 16)
364
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_OP_INPUT_TYPE_MASK (0xffull << 16)
365
+
366
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_GAUSSIAN (0x01ull << 32)
367
+ typedef uint64_t cublasLtNumericalImplFlags_t;
368
+
369
+ /** Execute matrix multiplication (D = alpha * op(A) * op(B) + beta * C).
370
+ *
371
+ * \retval CUBLAS_STATUS_NOT_INITIALIZED if cuBLASLt handle has not been initialized
372
+ * \retval CUBLAS_STATUS_INVALID_VALUE if parameters are in conflict or in an impossible configuration; e.g.
373
+ * when workspaceSizeInBytes is less than workspace required by configured
374
+ * algo
375
+ * \retval CUBLAS_STATUS_NOT_SUPPORTED if current implementation on selected device doesn't support configured
376
+ * operation
377
+ * \retval CUBLAS_STATUS_ARCH_MISMATCH if configured operation cannot be run using selected device
378
+ * \retval CUBLAS_STATUS_EXECUTION_FAILED if cuda reported execution error from the device
379
+ * \retval CUBLAS_STATUS_SUCCESS if the operation completed successfully
380
+ */
381
+ cublasStatus_t CUBLASWINAPI cublasLtMatmul(cublasLtHandle_t lightHandle,
382
+ cublasLtMatmulDesc_t computeDesc,
383
+ const void* alpha, /* host or device pointer */
384
+ const void* A,
385
+ cublasLtMatrixLayout_t Adesc,
386
+ const void* B,
387
+ cublasLtMatrixLayout_t Bdesc,
388
+ const void* beta, /* host or device pointer */
389
+ const void* C,
390
+ cublasLtMatrixLayout_t Cdesc,
391
+ void* D,
392
+ cublasLtMatrixLayout_t Ddesc,
393
+ const cublasLtMatmulAlgo_t* algo,
394
+ void* workspace,
395
+ size_t workspaceSizeInBytes,
396
+ cudaStream_t stream);
397
+
398
+ /** Matrix layout conversion helper (C = alpha * op(A) + beta * op(B))
399
+ *
400
+ * Can be used to change memory order of data or to scale and shift the values.
401
+ *
402
+ * \retval CUBLAS_STATUS_NOT_INITIALIZED if cuBLASLt handle has not been initialized
403
+ * \retval CUBLAS_STATUS_INVALID_VALUE if parameters are in conflict or in an impossible configuration; e.g.
404
+ * when A is not NULL, but Adesc is NULL
405
+ * \retval CUBLAS_STATUS_NOT_SUPPORTED if current implementation on selected device doesn't support configured
406
+ * operation
407
+ * \retval CUBLAS_STATUS_ARCH_MISMATCH if configured operation cannot be run using selected device
408
+ * \retval CUBLAS_STATUS_EXECUTION_FAILED if cuda reported execution error from the device
409
+ * \retval CUBLAS_STATUS_SUCCESS if the operation completed successfully
410
+ */
411
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixTransform(cublasLtHandle_t lightHandle,
412
+ cublasLtMatrixTransformDesc_t transformDesc,
413
+ const void* alpha, /* host or device pointer */
414
+ const void* A,
415
+ cublasLtMatrixLayout_t Adesc,
416
+ const void* beta, /* host or device pointer */
417
+ const void* B,
418
+ cublasLtMatrixLayout_t Bdesc,
419
+ void* C,
420
+ cublasLtMatrixLayout_t Cdesc,
421
+ cudaStream_t stream);
422
+
423
+ /* ---------------------------------------------------------------------------------------*/
424
+ /* Helper functions for cublasLtMatrixLayout_t */
425
+ /* ---------------------------------------------------------------------------------------*/
426
+
427
+ /** Enum for data ordering */
428
+ typedef enum {
429
+ /** Column-major
430
+ *
431
+ * Leading dimension is the stride (in elements) to the beginning of next column in memory.
432
+ */
433
+ CUBLASLT_ORDER_COL = 0,
434
+ /** Row major
435
+ *
436
+ * Leading dimension is the stride (in elements) to the beginning of next row in memory.
437
+ */
438
+ CUBLASLT_ORDER_ROW = 1,
439
+ /** Column-major ordered tiles of 32 columns.
440
+ *
441
+ * Leading dimension is the stride (in elements) to the beginning of next group of 32-columns. E.g. if matrix has 33
442
+ * columns and 2 rows, ld must be at least (32) * 2 = 64.
443
+ */
444
+ CUBLASLT_ORDER_COL32 = 2,
445
+ /** Column-major ordered tiles of composite tiles with total 32 columns and 8 rows, tile composed of interleaved
446
+ * inner tiles of 4 columns within 4 even or odd rows in an alternating pattern.
447
+ *
448
+ * Leading dimension is the stride (in elements) to the beginning of the first 32 column x 8 row tile for the next
449
+ * 32-wide group of columns. E.g. if matrix has 33 columns and 1 row, ld must be at least (32 * 8) * 1 = 256.
450
+ */
451
+ CUBLASLT_ORDER_COL4_4R2_8C = 3,
452
+ /** Column-major ordered tiles of composite tiles with total 32 columns ands 32 rows.
453
+ * Element offset within the tile is calculated as (((row%8)/2*4+row/8)*2+row%2)*32+col.
454
+ *
455
+ * Leading dimension is the stride (in elements) to the beginning of the first 32 column x 32 row tile for the next
456
+ * 32-wide group of columns. E.g. if matrix has 33 columns and 1 row, ld must be at least (32*32)*1 = 1024.
457
+ */
458
+ CUBLASLT_ORDER_COL32_2R_4R4 = 4,
459
+
460
+ } cublasLtOrder_t;
461
+
462
+ /** Attributes of memory layout */
463
+ typedef enum {
464
+ /** Data type, see cudaDataType.
465
+ *
466
+ * uint32_t
467
+ */
468
+ CUBLASLT_MATRIX_LAYOUT_TYPE = 0,
469
+
470
+ /** Memory order of the data, see cublasLtOrder_t.
471
+ *
472
+ * int32_t, default: CUBLASLT_ORDER_COL
473
+ */
474
+ CUBLASLT_MATRIX_LAYOUT_ORDER = 1,
475
+
476
+ /** Number of rows.
477
+ *
478
+ * Usually only values that can be expressed as int32_t are supported.
479
+ *
480
+ * uint64_t
481
+ */
482
+ CUBLASLT_MATRIX_LAYOUT_ROWS = 2,
483
+
484
+ /** Number of columns.
485
+ *
486
+ * Usually only values that can be expressed as int32_t are supported.
487
+ *
488
+ * uint64_t
489
+ */
490
+ CUBLASLT_MATRIX_LAYOUT_COLS = 3,
491
+
492
+ /** Matrix leading dimension.
493
+ *
494
+ * For CUBLASLT_ORDER_COL this is stride (in elements) of matrix column, for more details and documentation for
495
+ * other memory orders see documentation for cublasLtOrder_t values.
496
+ *
497
+ * Currently only non-negative values are supported, must be large enough so that matrix memory locations are not
498
+ * overlapping (e.g. greater or equal to CUBLASLT_MATRIX_LAYOUT_ROWS in case of CUBLASLT_ORDER_COL).
499
+ *
500
+ * int64_t;
501
+ */
502
+ CUBLASLT_MATRIX_LAYOUT_LD = 4,
503
+
504
+ /** Number of matmul operations to perform in the batch.
505
+ *
506
+ * See also CUBLASLT_ALGO_CAP_STRIDED_BATCH_SUPPORT
507
+ *
508
+ * int32_t, default: 1
509
+ */
510
+ CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT = 5,
511
+
512
+ /** Stride (in elements) to the next matrix for strided batch operation.
513
+ *
514
+ * When matrix type is planar-complex (CUBLASLT_MATRIX_LAYOUT_PLANE_OFFSET != 0), batch stride
515
+ * is interpreted by cublasLtMatmul() in number of real valued sub-elements. E.g. for data of type CUDA_C_16F,
516
+ * offset of 1024B is encoded as a stride of value 512 (since each element of the real and imaginary matrices
517
+ * is a 2B (16bit) floating point type).
518
+ *
519
+ * NOTE: A bug in cublasLtMatrixTransform() causes it to interpret the batch stride for a planar-complex matrix
520
+ * as if it was specified in number of complex elements. Therefore an offset of 1024B must be encoded as stride
521
+ * value 256 when calling cublasLtMatrixTransform() (each complex element is 4B with real and imaginary values 2B
522
+ * each). This behavior is expected to be corrected in the next major cuBLAS version.
523
+ *
524
+ * int64_t, default: 0
525
+ */
526
+ CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET = 6,
527
+
528
+ /** Stride (in bytes) to the imaginary plane for planar complex layout.
529
+ *
530
+ * int64_t, default: 0 - 0 means that layout is regular (real and imaginary parts of complex numbers are interleaved
531
+ * in memory in each element)
532
+ */
533
+ CUBLASLT_MATRIX_LAYOUT_PLANE_OFFSET = 7,
534
+ } cublasLtMatrixLayoutAttribute_t;
535
+
536
+ /** Internal. Do not use directly.
537
+ */
538
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutInit_internal( //
539
+ cublasLtMatrixLayout_t matLayout,
540
+ size_t size,
541
+ cudaDataType type,
542
+ uint64_t rows,
543
+ uint64_t cols,
544
+ int64_t ld);
545
+
546
+ /** Initialize matrix layout descriptor in pre-allocated space.
547
+ *
548
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if size of the pre-allocated space is insufficient
549
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
550
+ */
551
+ static inline cublasStatus_t cublasLtMatrixLayoutInit(
552
+ cublasLtMatrixLayout_t matLayout, cudaDataType type, uint64_t rows, uint64_t cols, int64_t ld) {
553
+ return cublasLtMatrixLayoutInit_internal(matLayout, sizeof(*matLayout), type, rows, cols, ld);
554
+ }
555
+
556
+ /** Create new matrix layout descriptor.
557
+ *
558
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if memory could not be allocated
559
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
560
+ */
561
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutCreate( //
562
+ cublasLtMatrixLayout_t* matLayout,
563
+ cudaDataType type,
564
+ uint64_t rows,
565
+ uint64_t cols,
566
+ int64_t ld);
567
+
568
+ /** Destroy matrix layout descriptor.
569
+ *
570
+ * \retval CUBLAS_STATUS_SUCCESS if operation was successful
571
+ */
572
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutDestroy(cublasLtMatrixLayout_t matLayout);
573
+
574
+ /** Set matrix layout descriptor attribute.
575
+ *
576
+ * \param[in] matLayout The descriptor
577
+ * \param[in] attr The attribute
578
+ * \param[in] buf memory address containing the new value
579
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
580
+ *
581
+ * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for
582
+ * selected attribute
583
+ * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully
584
+ */
585
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutSetAttribute( //
586
+ cublasLtMatrixLayout_t matLayout,
587
+ cublasLtMatrixLayoutAttribute_t attr,
588
+ const void* buf,
589
+ size_t sizeInBytes);
590
+
591
+ /** Get matrix layout descriptor attribute.
592
+ *
593
+ * \param[in] matLayout The descriptor
594
+ * \param[in] attr The attribute
595
+ * \param[out] buf memory address containing the new value
596
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
597
+ * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of
598
+ * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents
599
+ *
600
+ * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero
601
+ * and buf is NULL or sizeInBytes doesn't match size of internal storage for
602
+ * selected attribute
603
+ * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory
604
+ */
605
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutGetAttribute( //
606
+ cublasLtMatrixLayout_t matLayout,
607
+ cublasLtMatrixLayoutAttribute_t attr,
608
+ void* buf,
609
+ size_t sizeInBytes,
610
+ size_t* sizeWritten);
611
+
612
+ /* ---------------------------------------------------------------------------------------*/
613
+ /* Helper functions for cublasLtMatmulDesc_t */
614
+ /* ---------------------------------------------------------------------------------------*/
615
+
616
+ /** Matmul descriptor attributes to define details of the operation. */
617
+ typedef enum {
618
+ /** Compute type, see cudaDataType. Defines data type used for multiply and accumulate operations and the
619
+ * accumulator during matrix multiplication.
620
+ *
621
+ * int32_t
622
+ */
623
+ CUBLASLT_MATMUL_DESC_COMPUTE_TYPE = 0,
624
+
625
+ /** Scale type, see cudaDataType. Defines data type of alpha and beta. Accumulator and value from matrix C are
626
+ * typically converted to scale type before final scaling. Value is then converted from scale type to type of matrix
627
+ * D before being stored in memory.
628
+ *
629
+ * int32_t, default: same as CUBLASLT_MATMUL_DESC_COMPUTE_TYPE
630
+ */
631
+ CUBLASLT_MATMUL_DESC_SCALE_TYPE = 1,
632
+
633
+ /** Pointer mode of alpha and beta, see cublasLtPointerMode_t. When CUBLASLT_POINTER_MODE_DEVICE_VECTOR is in use,
634
+ * alpha/beta vector lenghts must match number of output matrix rows.
635
+ *
636
+ * int32_t, default: CUBLASLT_POINTER_MODE_HOST
637
+ */
638
+ CUBLASLT_MATMUL_DESC_POINTER_MODE = 2,
639
+
640
+ /** Transform of matrix A, see cublasOperation_t.
641
+ *
642
+ * int32_t, default: CUBLAS_OP_N
643
+ */
644
+ CUBLASLT_MATMUL_DESC_TRANSA = 3,
645
+
646
+ /** Transform of matrix B, see cublasOperation_t.
647
+ *
648
+ * int32_t, default: CUBLAS_OP_N
649
+ */
650
+ CUBLASLT_MATMUL_DESC_TRANSB = 4,
651
+
652
+ /** Transform of matrix C, see cublasOperation_t.
653
+ *
654
+ * Currently only CUBLAS_OP_N is supported.
655
+ *
656
+ * int32_t, default: CUBLAS_OP_N
657
+ */
658
+ CUBLASLT_MATMUL_DESC_TRANSC = 5,
659
+
660
+ /** Matrix fill mode, see cublasFillMode_t.
661
+ *
662
+ * int32_t, default: CUBLAS_FILL_MODE_FULL
663
+ */
664
+ CUBLASLT_MATMUL_DESC_FILL_MODE = 6,
665
+
666
+ /** Epilogue function, see cublasLtEpilogue_t.
667
+ *
668
+ * uint32_t, default: CUBLASLT_EPILOGUE_DEFAULT
669
+ */
670
+ CUBLASLT_MATMUL_DESC_EPILOGUE = 7,
671
+
672
+ /** Bias or bias gradient vector pointer in the device memory.
673
+ *
674
+ * Bias case. See CUBLASLT_EPILOGUE_BIAS.
675
+ * For bias data type see CUBLASLT_MATMUL_DESC_BIAS_DATA_TYPE.
676
+ *
677
+ * Bias vector length must match matrix D rows count.
678
+ *
679
+ * Bias gradient case. See CUBLASLT_EPILOGUE_DRELU_BGRAD and CUBLASLT_EPILOGUE_DGELU_BGRAD.
680
+ * Bias gradient vector elements are the same type as the output elements
681
+ * (Ctype) with the exception of IMMA kernels (see above).
682
+ *
683
+ * Routines that don't dereference this pointer, like cublasLtMatmulAlgoGetHeuristic()
684
+ * depend on its value to determine expected pointer alignment.
685
+ *
686
+ * Bias case: const void *, default: NULL
687
+ * Bias gradient case: void *, default: NULL
688
+ */
689
+ CUBLASLT_MATMUL_DESC_BIAS_POINTER = 8,
690
+
691
+ /** Batch stride for bias or bias gradient vector.
692
+ *
693
+ * Used together with CUBLASLT_MATMUL_DESC_BIAS_POINTER when matrix D's CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT > 1.
694
+ *
695
+ * int64_t, default: 0
696
+ */
697
+ CUBLASLT_MATMUL_DESC_BIAS_BATCH_STRIDE = 10,
698
+
699
+ /** Pointer for epilogue auxiliary buffer.
700
+ *
701
+ * - Output vector for ReLu bit-mask in forward pass when CUBLASLT_EPILOGUE_RELU_AUX
702
+ * or CUBLASLT_EPILOGUE_RELU_AUX_BIAS epilogue is used.
703
+ * - Input vector for ReLu bit-mask in backward pass when
704
+ * CUBLASLT_EPILOGUE_DRELU_BGRAD epilogue is used.
705
+ *
706
+ * - Output of GELU input matrix in forward pass when
707
+ * CUBLASLT_EPILOGUE_GELU_AUX_BIAS epilogue is used.
708
+ * - Input of GELU input matrix for backward pass when
709
+ * CUBLASLT_EPILOGUE_DGELU_BGRAD epilogue is used.
710
+ *
711
+ * For aux data type see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_DATA_TYPE.
712
+ *
713
+ * Routines that don't dereference this pointer, like cublasLtMatmulAlgoGetHeuristic()
714
+ * depend on its value to determine expected pointer alignment.
715
+ *
716
+ * Requires setting CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD attribute.
717
+ *
718
+ * Forward pass: void *, default: NULL
719
+ * Backward pass: const void *, default: NULL
720
+ */
721
+ CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER = 11,
722
+
723
+ /** Leading dimension for epilogue auxiliary buffer.
724
+ *
725
+ * - ReLu bit-mask matrix leading dimension in elements (i.e. bits)
726
+ * when CUBLASLT_EPILOGUE_RELU_AUX, CUBLASLT_EPILOGUE_RELU_AUX_BIAS or CUBLASLT_EPILOGUE_DRELU_BGRAD epilogue is
727
+ * used. Must be divisible by 128 and be no less than the number of rows in the output matrix.
728
+ *
729
+ * - GELU input matrix leading dimension in elements
730
+ * when CUBLASLT_EPILOGUE_GELU_AUX_BIAS or CUBLASLT_EPILOGUE_DGELU_BGRAD epilogue used.
731
+ * Must be divisible by 8 and be no less than the number of rows in the output matrix.
732
+ *
733
+ * int64_t, default: 0
734
+ */
735
+ CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD = 12,
736
+
737
+ /** Batch stride for epilogue auxiliary buffer.
738
+ *
739
+ * - ReLu bit-mask matrix batch stride in elements (i.e. bits)
740
+ * when CUBLASLT_EPILOGUE_RELU_AUX, CUBLASLT_EPILOGUE_RELU_AUX_BIAS or CUBLASLT_EPILOGUE_DRELU_BGRAD epilogue is
741
+ * used. Must be divisible by 128.
742
+ *
743
+ * - GELU input matrix batch stride in elements
744
+ * when CUBLASLT_EPILOGUE_GELU_AUX_BIAS or CUBLASLT_EPILOGUE_DGELU_BGRAD epilogue used.
745
+ * Must be divisible by 8.
746
+ *
747
+ * int64_t, default: 0
748
+ */
749
+ CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_BATCH_STRIDE = 13,
750
+
751
+ /** Batch stride for alpha vector.
752
+ *
753
+ * Used together with CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_HOST when matrix D's
754
+ * CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT > 1. If CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_ZERO is set then
755
+ * CUBLASLT_MATMUL_DESC_ALPHA_VECTOR_BATCH_STRIDE must be set to 0 as this mode doesnt supported batched alpha vector.
756
+ *
757
+ * int64_t, default: 0
758
+ */
759
+ CUBLASLT_MATMUL_DESC_ALPHA_VECTOR_BATCH_STRIDE = 14,
760
+
761
+ /** Number of SMs to target for parallel execution. Optimizes heuristics for execution on a different number of SMs
762
+ * when user expects a concurrent stream to be using some of the device resources.
763
+ *
764
+ * int32_t, default: 0 - use the number reported by the device.
765
+ */
766
+ CUBLASLT_MATMUL_DESC_SM_COUNT_TARGET = 15,
767
+
768
+ /** Device pointer to the scale factor value that converts data in matrix A to the compute data type range.
769
+ *
770
+ * The scaling factor value must have the same type as the compute type.
771
+ *
772
+ * If not specified, or set to NULL, the scaling factor is assumed to be 1.
773
+ *
774
+ * If set for an unsupported matrix data, scale, and compute type combination, calling cublasLtMatmul()
775
+ * will return CUBLAS_INVALID_VALUE.
776
+ *
777
+ * const void *, default: NULL
778
+ */
779
+ CUBLASLT_MATMUL_DESC_A_SCALE_POINTER = 17,
780
+
781
+ /** Device pointer to the scale factor value to convert data in matrix B to compute data type range.
782
+ *
783
+ * The scaling factor value must have the same type as the compute type.
784
+ *
785
+ * If not specified, or set to NULL, the scaling factor is assumed to be 1.
786
+ *
787
+ * If set for an unsupported matrix data, scale, and compute type combination, calling cublasLtMatmul()
788
+ * will return CUBLAS_INVALID_VALUE.
789
+ *
790
+ * const void *, default: NULL
791
+ */
792
+ CUBLASLT_MATMUL_DESC_B_SCALE_POINTER = 18,
793
+
794
+ /** Device pointer to the scale factor value to convert data in matrix C to compute data type range.
795
+ *
796
+ * The scaling factor value must have the same type as the compute type.
797
+ *
798
+ * If not specified, or set to NULL, the scaling factor is assumed to be 1.
799
+ *
800
+ * If set for an unsupported matrix data, scale, and compute type combination, calling cublasLtMatmul()
801
+ * will return CUBLAS_INVALID_VALUE.
802
+ *
803
+ * const void *, default: NULL
804
+ */
805
+ CUBLASLT_MATMUL_DESC_C_SCALE_POINTER = 19,
806
+
807
+ /** Device pointer to the scale factor value to convert data in matrix D to compute data type range.
808
+ *
809
+ * The scaling factor value must have the same type as the compute type.
810
+ *
811
+ * If not specified, or set to NULL, the scaling factor is assumed to be 1.
812
+ *
813
+ * If set for an unsupported matrix data, scale, and compute type combination, calling cublasLtMatmul()
814
+ * will return CUBLAS_INVALID_VALUE.
815
+ *
816
+ * const void *, default: NULL
817
+ */
818
+ CUBLASLT_MATMUL_DESC_D_SCALE_POINTER = 20,
819
+
820
+ /** Device pointer to the memory location that on completion will be set to the maximum of absolute values in the
821
+ * output matrix.
822
+ *
823
+ * The computed value has the same type as the compute type.
824
+ *
825
+ * If not specified or set to NULL, the maximum absolute value is not computed. If set for an unsupported matrix
826
+ * data, scale, and compute type combination, calling cublasLtMatmul() will return CUBLAS_INVALID_VALUE.
827
+ *
828
+ * void *, default: NULL
829
+ */
830
+ CUBLASLT_MATMUL_DESC_AMAX_D_POINTER = 21,
831
+
832
+ /** Type of the data to be stored to the memory pointed to by CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
833
+ *
834
+ * If unset, the data type defaults to the type of elements of the output matrix with some exceptions, see details
835
+ * below.
836
+ *
837
+ * ReLu uses a bit-mask.
838
+ *
839
+ * GELU input matrix elements type is the same as the type of elements of
840
+ * the output matrix with some exceptions, see details below.
841
+ *
842
+ * For fp8 kernels with output type CUDA_R_8F_E4M3 the aux data type can be CUDA_R_8F_E4M3 or CUDA_R_16F with some
843
+ * restrictions. See https://docs.nvidia.com/cuda/cublas/index.html#cublasLtMatmulDescAttributes_t for more details.
844
+ *
845
+ * If set for an unsupported matrix data, scale, and compute type combination, calling cublasLtMatmul()
846
+ * will return CUBLAS_INVALID_VALUE.
847
+ *
848
+ * int32_t based on cudaDataType, default: -1
849
+ */
850
+ CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_DATA_TYPE = 22,
851
+
852
+ /** Device pointer to the scaling factor value to convert results from compute type data range to storage
853
+ * data range in the auxiliary matrix that is set via CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
854
+ *
855
+ * The scaling factor value must have the same type as the compute type.
856
+ *
857
+ * If not specified, or set to NULL, the scaling factor is assumed to be 1. If set for an unsupported matrix data,
858
+ * scale, and compute type combination, calling cublasLtMatmul() will return CUBLAS_INVALID_VALUE.
859
+ *
860
+ * void *, default: NULL
861
+ */
862
+ CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_SCALE_POINTER = 23,
863
+
864
+ /** Device pointer to the memory location that on completion will be set to the maximum of absolute values in the
865
+ * buffer that is set via CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
866
+ *
867
+ * The computed value has the same type as the compute type.
868
+ *
869
+ * If not specified or set to NULL, the maximum absolute value is not computed. If set for an unsupported matrix
870
+ * data, scale, and compute type combination, calling cublasLtMatmul() will return CUBLAS_INVALID_VALUE.
871
+ *
872
+ * void *, default: NULL
873
+ */
874
+ CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_AMAX_POINTER = 24,
875
+
876
+ /** Flag for managing fp8 fast accumulation mode.
877
+ * When enabled, problem execution might be faster but at the cost of lower accuracy because intermediate results
878
+ * will not periodically be promoted to a higher precision.
879
+ *
880
+ * int8_t, default: 0 - fast accumulation mode is disabled.
881
+ */
882
+ CUBLASLT_MATMUL_DESC_FAST_ACCUM = 25,
883
+
884
+ /** Type of bias or bias gradient vector in the device memory.
885
+ *
886
+ * Bias case: see CUBLASLT_EPILOGUE_BIAS.
887
+ *
888
+ * Bias vector elements are the same type as the elements of output matrix (Dtype) with the following exceptions:
889
+ * - IMMA kernels with computeType=CUDA_R_32I and Ctype=CUDA_R_8I where the bias vector elements
890
+ * are the same type as alpha, beta (CUBLASLT_MATMUL_DESC_SCALE_TYPE=CUDA_R_32F)
891
+ * - fp8 kernels with an output type of CUDA_R_32F, CUDA_R_8F_E4M3 or CUDA_R_8F_E5M2, See
892
+ * https://docs.nvidia.com/cuda/cublas/index.html#cublasLtMatmul for details.
893
+ *
894
+ * int32_t based on cudaDataType, default: -1
895
+ */
896
+ CUBLASLT_MATMUL_DESC_BIAS_DATA_TYPE = 26,
897
+ } cublasLtMatmulDescAttributes_t;
898
+
899
+ /** Internal. Do not use directly.
900
+ */
901
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulDescInit_internal( //
902
+ cublasLtMatmulDesc_t matmulDesc,
903
+ size_t size,
904
+ cublasComputeType_t computeType,
905
+ cudaDataType_t scaleType);
906
+
907
+ /** Initialize matmul operation descriptor in pre-allocated space.
908
+ *
909
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if size of the pre-allocated space is insufficient
910
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was initialized successfully
911
+ */
912
+ static inline cublasStatus_t cublasLtMatmulDescInit( //
913
+ cublasLtMatmulDesc_t matmulDesc,
914
+ cublasComputeType_t computeType,
915
+ cudaDataType_t scaleType) {
916
+ return cublasLtMatmulDescInit_internal(matmulDesc, sizeof(*matmulDesc), computeType, scaleType);
917
+ }
918
+
919
+ /** Create new matmul operation descriptor.
920
+ *
921
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if memory could not be allocated
922
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
923
+ */
924
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulDescCreate(cublasLtMatmulDesc_t* matmulDesc,
925
+ cublasComputeType_t computeType,
926
+ cudaDataType_t scaleType);
927
+
928
+ /** Destroy matmul operation descriptor.
929
+ *
930
+ * \retval CUBLAS_STATUS_SUCCESS if operation was successful
931
+ */
932
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulDescDestroy(cublasLtMatmulDesc_t matmulDesc);
933
+
934
+ /** Set matmul operation descriptor attribute.
935
+ *
936
+ * \param[in] matmulDesc The descriptor
937
+ * \param[in] attr The attribute
938
+ * \param[in] buf memory address containing the new value
939
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
940
+ *
941
+ * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for
942
+ * selected attribute
943
+ * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully
944
+ */
945
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulDescSetAttribute( //
946
+ cublasLtMatmulDesc_t matmulDesc,
947
+ cublasLtMatmulDescAttributes_t attr,
948
+ const void* buf,
949
+ size_t sizeInBytes);
950
+
951
+ /** Get matmul operation descriptor attribute.
952
+ *
953
+ * \param[in] matmulDesc The descriptor
954
+ * \param[in] attr The attribute
955
+ * \param[out] buf memory address containing the new value
956
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
957
+ * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of
958
+ * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents
959
+ *
960
+ * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero
961
+ * and buf is NULL or sizeInBytes doesn't match size of internal storage for
962
+ * selected attribute
963
+ * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory
964
+ */
965
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulDescGetAttribute( //
966
+ cublasLtMatmulDesc_t matmulDesc,
967
+ cublasLtMatmulDescAttributes_t attr,
968
+ void* buf,
969
+ size_t sizeInBytes,
970
+ size_t* sizeWritten);
971
+
972
+ /* ---------------------------------------------------------------------------------------*/
973
+ /* Helper functions for cublasLtMatrixTransformDesc_t */
974
+ /* ---------------------------------------------------------------------------------------*/
975
+
976
+ /** Matrix transform descriptor attributes to define details of the operation.
977
+ */
978
+ typedef enum {
979
+ /** Scale type, see cudaDataType. Inputs are converted to scale type for scaling and summation and results are then
980
+ * converted to output type to store in memory.
981
+ *
982
+ * int32_t
983
+ */
984
+ CUBLASLT_MATRIX_TRANSFORM_DESC_SCALE_TYPE,
985
+
986
+ /** Pointer mode of alpha and beta, see cublasLtPointerMode_t.
987
+ *
988
+ * int32_t, default: CUBLASLT_POINTER_MODE_HOST
989
+ */
990
+ CUBLASLT_MATRIX_TRANSFORM_DESC_POINTER_MODE,
991
+
992
+ /** Transform of matrix A, see cublasOperation_t.
993
+ *
994
+ * int32_t, default: CUBLAS_OP_N
995
+ */
996
+ CUBLASLT_MATRIX_TRANSFORM_DESC_TRANSA,
997
+
998
+ /** Transform of matrix B, see cublasOperation_t.
999
+ *
1000
+ * int32_t, default: CUBLAS_OP_N
1001
+ */
1002
+ CUBLASLT_MATRIX_TRANSFORM_DESC_TRANSB,
1003
+ } cublasLtMatrixTransformDescAttributes_t;
1004
+
1005
+ /** Internal. Do not use directly.
1006
+ */
1007
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescInit_internal(cublasLtMatrixTransformDesc_t transformDesc,
1008
+ size_t size,
1009
+ cudaDataType scaleType);
1010
+
1011
+ /** Initialize matrix transform operation descriptor in pre-allocated space.
1012
+ *
1013
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if size of the pre-allocated space is insufficient
1014
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
1015
+ */
1016
+ static inline cublasStatus_t cublasLtMatrixTransformDescInit(cublasLtMatrixTransformDesc_t transformDesc,
1017
+ cudaDataType scaleType) {
1018
+ return cublasLtMatrixTransformDescInit_internal(transformDesc, sizeof(*transformDesc), scaleType);
1019
+ }
1020
+
1021
+ /** Create new matrix transform operation descriptor.
1022
+ *
1023
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if memory could not be allocated
1024
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
1025
+ */
1026
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescCreate(cublasLtMatrixTransformDesc_t* transformDesc,
1027
+ cudaDataType scaleType);
1028
+
1029
+ /** Destroy matrix transform operation descriptor.
1030
+ *
1031
+ * \retval CUBLAS_STATUS_SUCCESS if operation was successful
1032
+ */
1033
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescDestroy(cublasLtMatrixTransformDesc_t transformDesc);
1034
+
1035
+ /** Set matrix transform operation descriptor attribute.
1036
+ *
1037
+ * \param[in] transformDesc The descriptor
1038
+ * \param[in] attr The attribute
1039
+ * \param[in] buf memory address containing the new value
1040
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1041
+ *
1042
+ * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for
1043
+ * selected attribute
1044
+ * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully
1045
+ */
1046
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescSetAttribute( //
1047
+ cublasLtMatrixTransformDesc_t transformDesc,
1048
+ cublasLtMatrixTransformDescAttributes_t attr,
1049
+ const void* buf,
1050
+ size_t sizeInBytes);
1051
+
1052
+ /** Get matrix transform operation descriptor attribute.
1053
+ *
1054
+ * \param[in] transformDesc The descriptor
1055
+ * \param[in] attr The attribute
1056
+ * \param[out] buf memory address containing the new value
1057
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1058
+ * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number
1059
+ * of bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents
1060
+ *
1061
+ * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero
1062
+ * and buf is NULL or sizeInBytes doesn't match size of internal storage for
1063
+ * selected attribute
1064
+ * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory
1065
+ */
1066
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescGetAttribute( //
1067
+ cublasLtMatrixTransformDesc_t transformDesc,
1068
+ cublasLtMatrixTransformDescAttributes_t attr,
1069
+ void* buf,
1070
+ size_t sizeInBytes,
1071
+ size_t* sizeWritten);
1072
+
1073
+ /** Reduction scheme for portions of the dot-product calculated in parallel (a. k. a. "split - K").
1074
+ */
1075
+ typedef enum {
1076
+ /** No reduction scheme, dot-product shall be performed in one sequence.
1077
+ */
1078
+ CUBLASLT_REDUCTION_SCHEME_NONE = 0,
1079
+
1080
+ /** Reduction is performed "in place" - using the output buffer (and output data type) and counters (in workspace) to
1081
+ * guarantee the sequentiality.
1082
+ */
1083
+ CUBLASLT_REDUCTION_SCHEME_INPLACE = 1,
1084
+
1085
+ /** Intermediate results are stored in compute type in the workspace and reduced in a separate step.
1086
+ */
1087
+ CUBLASLT_REDUCTION_SCHEME_COMPUTE_TYPE = 2,
1088
+
1089
+ /** Intermediate results are stored in output type in the workspace and reduced in a separate step.
1090
+ */
1091
+ CUBLASLT_REDUCTION_SCHEME_OUTPUT_TYPE = 4,
1092
+
1093
+ CUBLASLT_REDUCTION_SCHEME_MASK = 0x7,
1094
+ } cublasLtReductionScheme_t;
1095
+
1096
+ /** Postprocessing options for the epilogue
1097
+ */
1098
+ typedef enum {
1099
+ /** No special postprocessing, just scale and quantize results if necessary.
1100
+ */
1101
+ CUBLASLT_EPILOGUE_DEFAULT = 1,
1102
+
1103
+ /** ReLu, apply ReLu point-wise transform to the results (x:=max(x, 0)).
1104
+ */
1105
+ CUBLASLT_EPILOGUE_RELU = 2,
1106
+
1107
+ /** ReLu, apply ReLu point-wise transform to the results (x:=max(x, 0)).
1108
+ *
1109
+ * This epilogue mode produces an extra output, a ReLu bit-mask matrix,
1110
+ * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
1111
+ */
1112
+ CUBLASLT_EPILOGUE_RELU_AUX = (CUBLASLT_EPILOGUE_RELU | 128),
1113
+
1114
+ /** Bias, apply (broadcasted) Bias from bias vector. Bias vector length must match matrix D rows, it must be packed
1115
+ * (stride between vector elements is 1). Bias vector is broadcasted to all columns and added before applying final
1116
+ * postprocessing.
1117
+ */
1118
+ CUBLASLT_EPILOGUE_BIAS = 4,
1119
+
1120
+ /** ReLu and Bias, apply Bias and then ReLu transform
1121
+ */
1122
+ CUBLASLT_EPILOGUE_RELU_BIAS = (CUBLASLT_EPILOGUE_RELU | CUBLASLT_EPILOGUE_BIAS),
1123
+
1124
+ /** ReLu and Bias, apply Bias and then ReLu transform
1125
+ *
1126
+ * This epilogue mode produces an extra output, a ReLu bit-mask matrix,
1127
+ * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
1128
+ */
1129
+ CUBLASLT_EPILOGUE_RELU_AUX_BIAS = (CUBLASLT_EPILOGUE_RELU_AUX | CUBLASLT_EPILOGUE_BIAS),
1130
+
1131
+ /* ReLu gradient. Apply ReLu gradient to matmul output. Store ReLu gradient in the output matrix.
1132
+ *
1133
+ * This epilogue mode requires an extra input,
1134
+ * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
1135
+ */
1136
+ CUBLASLT_EPILOGUE_DRELU = 8 | 128,
1137
+
1138
+ /* ReLu and Bias gradients. Apply independently ReLu and Bias gradient to
1139
+ * matmul output. Store ReLu gradient in the output matrix, and Bias gradient
1140
+ * in the auxiliary output (see CUBLASLT_MATMUL_DESC_BIAS_POINTER).
1141
+ *
1142
+ * This epilogue mode requires an extra input,
1143
+ * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
1144
+ */
1145
+ CUBLASLT_EPILOGUE_DRELU_BGRAD = CUBLASLT_EPILOGUE_DRELU | 16,
1146
+
1147
+ /** GELU, apply GELU point-wise transform to the results (x:=GELU(x)).
1148
+ */
1149
+ CUBLASLT_EPILOGUE_GELU = 32,
1150
+
1151
+ /** GELU, apply GELU point-wise transform to the results (x:=GELU(x)).
1152
+ *
1153
+ * This epilogue mode outputs GELU input as a separate matrix (useful for training).
1154
+ * See CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
1155
+ */
1156
+ CUBLASLT_EPILOGUE_GELU_AUX = (CUBLASLT_EPILOGUE_GELU | 128),
1157
+
1158
+ /** GELU and Bias, apply Bias and then GELU transform
1159
+ */
1160
+ CUBLASLT_EPILOGUE_GELU_BIAS = (CUBLASLT_EPILOGUE_GELU | CUBLASLT_EPILOGUE_BIAS),
1161
+
1162
+ /** GELU and Bias, apply Bias and then GELU transform
1163
+ *
1164
+ * This epilogue mode outputs GELU input as a separate matrix (useful for training).
1165
+ * See CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
1166
+ */
1167
+ CUBLASLT_EPILOGUE_GELU_AUX_BIAS = (CUBLASLT_EPILOGUE_GELU_AUX | CUBLASLT_EPILOGUE_BIAS),
1168
+
1169
+ /* GELU gradient. Apply GELU gradient to matmul output. Store GELU gradient in the output matrix.
1170
+ *
1171
+ * This epilogue mode requires an extra input,
1172
+ * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
1173
+ */
1174
+ CUBLASLT_EPILOGUE_DGELU = 64 | 128,
1175
+
1176
+ /* GELU and Bias gradients. Apply independently GELU and Bias gradient to
1177
+ * matmul output. Store GELU gradient in the output matrix, and Bias gradient
1178
+ * in the auxiliary output (see CUBLASLT_MATMUL_DESC_BIAS_POINTER).
1179
+ *
1180
+ * This epilogue mode requires an extra input,
1181
+ * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
1182
+ */
1183
+ CUBLASLT_EPILOGUE_DGELU_BGRAD = CUBLASLT_EPILOGUE_DGELU | 16,
1184
+
1185
+ /** Bias gradient based on the input matrix A.
1186
+ *
1187
+ * The bias size corresponds to the number of rows of the matrix D.
1188
+ * The reduction happens over the GEMM's "k" dimension.
1189
+ *
1190
+ * Stores Bias gradient in the auxiliary output
1191
+ * (see CUBLASLT_MATMUL_DESC_BIAS_POINTER).
1192
+ */
1193
+ CUBLASLT_EPILOGUE_BGRADA = 256,
1194
+
1195
+ /** Bias gradient based on the input matrix B.
1196
+ *
1197
+ * The bias size corresponds to the number of columns of the matrix D.
1198
+ * The reduction happens over the GEMM's "k" dimension.
1199
+ *
1200
+ * Stores Bias gradient in the auxiliary output
1201
+ * (see CUBLASLT_MATMUL_DESC_BIAS_POINTER).
1202
+ */
1203
+ CUBLASLT_EPILOGUE_BGRADB = 512,
1204
+ } cublasLtEpilogue_t;
1205
+
1206
+ /** Matmul heuristic search mode
1207
+ */
1208
+ typedef enum {
1209
+ /** ask heuristics for best algo for given usecase
1210
+ */
1211
+ CUBLASLT_SEARCH_BEST_FIT = 0,
1212
+ /** only try to find best config for preconfigured algo id
1213
+ */
1214
+ CUBLASLT_SEARCH_LIMITED_BY_ALGO_ID = 1,
1215
+ /** reserved for future use
1216
+ */
1217
+ CUBLASLT_SEARCH_RESERVED_02 = 2,
1218
+ /** reserved for future use
1219
+ */
1220
+ CUBLASLT_SEARCH_RESERVED_03 = 3,
1221
+ /** reserved for future use
1222
+ */
1223
+ CUBLASLT_SEARCH_RESERVED_04 = 4,
1224
+ /** reserved for future use
1225
+ */
1226
+ CUBLASLT_SEARCH_RESERVED_05 = 5,
1227
+ } cublasLtMatmulSearch_t;
1228
+
1229
+ /** Algo search preference to fine tune the heuristic function. */
1230
+ typedef enum {
1231
+ /** Search mode, see cublasLtMatmulSearch_t.
1232
+ *
1233
+ * uint32_t, default: CUBLASLT_SEARCH_BEST_FIT
1234
+ */
1235
+ CUBLASLT_MATMUL_PREF_SEARCH_MODE = 0,
1236
+
1237
+ /** Maximum allowed workspace size in bytes.
1238
+ *
1239
+ * uint64_t, default: 0 - no workspace allowed
1240
+ */
1241
+ CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES = 1,
1242
+
1243
+ /** Reduction scheme mask, see cublasLtReductionScheme_t. Filters heuristic result to only include algo configs that
1244
+ * use one of the required modes.
1245
+ *
1246
+ * E.g. mask value of 0x03 will allow only INPLACE and COMPUTE_TYPE reduction schemes.
1247
+ *
1248
+ * uint32_t, default: CUBLASLT_REDUCTION_SCHEME_MASK (allows all reduction schemes)
1249
+ */
1250
+ CUBLASLT_MATMUL_PREF_REDUCTION_SCHEME_MASK = 3,
1251
+
1252
+ /** Minimum buffer alignment for matrix A (in bytes).
1253
+ *
1254
+ * Selecting a smaller value will exclude algorithms that can not work with matrix A that is not as strictly aligned
1255
+ * as they need.
1256
+ *
1257
+ * uint32_t, default: 256
1258
+ */
1259
+ CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_A_BYTES = 5,
1260
+
1261
+ /** Minimum buffer alignment for matrix B (in bytes).
1262
+ *
1263
+ * Selecting a smaller value will exclude algorithms that can not work with matrix B that is not as strictly aligned
1264
+ * as they need.
1265
+ *
1266
+ * uint32_t, default: 256
1267
+ */
1268
+ CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_B_BYTES = 6,
1269
+
1270
+ /** Minimum buffer alignment for matrix C (in bytes).
1271
+ *
1272
+ * Selecting a smaller value will exclude algorithms that can not work with matrix C that is not as strictly aligned
1273
+ * as they need.
1274
+ *
1275
+ * uint32_t, default: 256
1276
+ */
1277
+ CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_C_BYTES = 7,
1278
+
1279
+ /** Minimum buffer alignment for matrix D (in bytes).
1280
+ *
1281
+ * Selecting a smaller value will exclude algorithms that can not work with matrix D that is not as strictly aligned
1282
+ * as they need.
1283
+ *
1284
+ * uint32_t, default: 256
1285
+ */
1286
+ CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_D_BYTES = 8,
1287
+
1288
+ /** Maximum wave count.
1289
+ *
1290
+ * See cublasLtMatmulHeuristicResult_t::wavesCount.
1291
+ *
1292
+ * Selecting a non-zero value will exclude algorithms that report device utilization higher than specified.
1293
+ *
1294
+ * float, default: 0.0f
1295
+ */
1296
+ CUBLASLT_MATMUL_PREF_MAX_WAVES_COUNT = 9,
1297
+
1298
+ /** Numerical implementation details mask, see cublasLtNumericalImplFlags_t. Filters heuristic result to only include
1299
+ * algorithms that use the allowed implementations.
1300
+ *
1301
+ * uint64_t, default: uint64_t(-1) (allow everything)
1302
+ */
1303
+ CUBLASLT_MATMUL_PREF_IMPL_MASK = 12,
1304
+ } cublasLtMatmulPreferenceAttributes_t;
1305
+
1306
+ /** Internal. Do not use directly.
1307
+ */
1308
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceInit_internal(cublasLtMatmulPreference_t pref, size_t size);
1309
+
1310
+ /** Initialize matmul heuristic search preference descriptor in pre-allocated space.
1311
+ *
1312
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if size of the pre-allocated space is insufficient
1313
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
1314
+ */
1315
+ static inline cublasStatus_t cublasLtMatmulPreferenceInit(cublasLtMatmulPreference_t pref) {
1316
+ return cublasLtMatmulPreferenceInit_internal(pref, sizeof(*pref));
1317
+ }
1318
+
1319
+ /** Create new matmul heuristic search preference descriptor.
1320
+ *
1321
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if memory could not be allocated
1322
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
1323
+ */
1324
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceCreate(cublasLtMatmulPreference_t* pref);
1325
+
1326
+ /** Destroy matmul heuristic search preference descriptor.
1327
+ *
1328
+ * \retval CUBLAS_STATUS_SUCCESS if operation was successful
1329
+ */
1330
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceDestroy(cublasLtMatmulPreference_t pref);
1331
+
1332
+ /** Set matmul heuristic search preference descriptor attribute.
1333
+ *
1334
+ * \param[in] pref The descriptor
1335
+ * \param[in] attr The attribute
1336
+ * \param[in] buf memory address containing the new value
1337
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1338
+ *
1339
+ * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for
1340
+ * selected attribute
1341
+ * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully
1342
+ */
1343
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceSetAttribute( //
1344
+ cublasLtMatmulPreference_t pref,
1345
+ cublasLtMatmulPreferenceAttributes_t attr,
1346
+ const void* buf,
1347
+ size_t sizeInBytes);
1348
+
1349
+ /** Get matmul heuristic search preference descriptor attribute.
1350
+ *
1351
+ * \param[in] pref The descriptor
1352
+ * \param[in] attr The attribute
1353
+ * \param[out] buf memory address containing the new value
1354
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1355
+ * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of
1356
+ * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents
1357
+ *
1358
+ * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero
1359
+ * and buf is NULL or sizeInBytes doesn't match size of internal storage for
1360
+ * selected attribute
1361
+ * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory
1362
+ */
1363
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceGetAttribute( //
1364
+ cublasLtMatmulPreference_t pref,
1365
+ cublasLtMatmulPreferenceAttributes_t attr,
1366
+ void* buf,
1367
+ size_t sizeInBytes,
1368
+ size_t* sizeWritten);
1369
+
1370
+ /** Results structure used by cublasLtMatmulGetAlgo.
1371
+ *
1372
+ * Holds returned configured algo descriptor and its runtime properties.
1373
+ */
1374
+ typedef struct {
1375
+ /** Matmul algorithm descriptor.
1376
+ *
1377
+ * Must be initialized with cublasLtMatmulAlgoInit() if preferences' CUBLASLT_MATMUL_PERF_SEARCH_MODE is set to
1378
+ * CUBLASLT_SEARCH_LIMITED_BY_ALGO_ID
1379
+ */
1380
+ cublasLtMatmulAlgo_t algo;
1381
+
1382
+ /** Actual size of workspace memory required.
1383
+ */
1384
+ size_t workspaceSize;
1385
+
1386
+ /** Result status, other fields are only valid if after call to cublasLtMatmulAlgoGetHeuristic() this member is set to
1387
+ * CUBLAS_STATUS_SUCCESS.
1388
+ */
1389
+ cublasStatus_t state;
1390
+
1391
+ /** Waves count - a device utilization metric.
1392
+ *
1393
+ * wavesCount value of 1.0f suggests that when kernel is launched it will fully occupy the GPU.
1394
+ */
1395
+ float wavesCount;
1396
+
1397
+ int reserved[4];
1398
+ } cublasLtMatmulHeuristicResult_t;
1399
+
1400
+ /** Query cublasLt heuristic for algorithm appropriate for given use case.
1401
+ *
1402
+ * \param[in] lightHandle Pointer to the allocated cuBLASLt handle for the cuBLASLt
1403
+ * context. See cublasLtHandle_t.
1404
+ * \param[in] operationDesc Handle to the matrix multiplication descriptor.
1405
+ * \param[in] Adesc Handle to the layout descriptors for matrix A.
1406
+ * \param[in] Bdesc Handle to the layout descriptors for matrix B.
1407
+ * \param[in] Cdesc Handle to the layout descriptors for matrix C.
1408
+ * \param[in] Ddesc Handle to the layout descriptors for matrix D.
1409
+ * \param[in] preference Pointer to the structure holding the heuristic search
1410
+ * preferences descriptor. See cublasLtMatrixLayout_t.
1411
+ * \param[in] requestedAlgoCount Size of heuristicResultsArray (in elements) and requested
1412
+ * maximum number of algorithms to return.
1413
+ * \param[in, out] heuristicResultsArray Output algorithms and associated runtime characteristics,
1414
+ * ordered in increasing estimated compute time.
1415
+ * \param[out] returnAlgoCount The number of heuristicResultsArray elements written.
1416
+ *
1417
+ * \retval CUBLAS_STATUS_INVALID_VALUE if requestedAlgoCount is less or equal to zero
1418
+ * \retval CUBLAS_STATUS_NOT_SUPPORTED if no heuristic function available for current configuration
1419
+ * \retval CUBLAS_STATUS_SUCCESS if query was successful, inspect
1420
+ * heuristicResultsArray[0 to (returnAlgoCount - 1)].state
1421
+ * for detail status of results
1422
+ */
1423
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoGetHeuristic(cublasLtHandle_t lightHandle,
1424
+ cublasLtMatmulDesc_t operationDesc,
1425
+ cublasLtMatrixLayout_t Adesc,
1426
+ cublasLtMatrixLayout_t Bdesc,
1427
+ cublasLtMatrixLayout_t Cdesc,
1428
+ cublasLtMatrixLayout_t Ddesc,
1429
+ cublasLtMatmulPreference_t preference,
1430
+ int requestedAlgoCount,
1431
+ cublasLtMatmulHeuristicResult_t heuristicResultsArray[],
1432
+ int* returnAlgoCount);
1433
+
1434
+ /* ---------------------------------------------------------------------------------------*/
1435
+ /* Lower level API to be able to implement own Heuristic and Find routines */
1436
+ /* ---------------------------------------------------------------------------------------*/
1437
+
1438
+ /** Routine to get all algo IDs that can potentially run
1439
+ *
1440
+ * \param[in] int requestedAlgoCount requested number of algos (must be less or equal to size of algoIdsA
1441
+ * (in elements)) \param[out] algoIdsA array to write algoIds to \param[out] returnAlgoCount number of algoIds
1442
+ * actually written
1443
+ *
1444
+ * \retval CUBLAS_STATUS_INVALID_VALUE if requestedAlgoCount is less or equal to zero
1445
+ * \retval CUBLAS_STATUS_SUCCESS if query was successful, inspect returnAlgoCount to get actual number of IDs
1446
+ * available
1447
+ */
1448
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoGetIds(cublasLtHandle_t lightHandle,
1449
+ cublasComputeType_t computeType,
1450
+ cudaDataType_t scaleType,
1451
+ cudaDataType_t Atype,
1452
+ cudaDataType_t Btype,
1453
+ cudaDataType_t Ctype,
1454
+ cudaDataType_t Dtype,
1455
+ int requestedAlgoCount,
1456
+ int algoIdsArray[],
1457
+ int* returnAlgoCount);
1458
+
1459
+ /** Initialize algo structure
1460
+ *
1461
+ * \retval CUBLAS_STATUS_INVALID_VALUE if algo is NULL or algoId is outside of recognized range
1462
+ * \retval CUBLAS_STATUS_NOT_SUPPORTED if algoId is not supported for given combination of data types
1463
+ * \retval CUBLAS_STATUS_SUCCESS if the structure was successfully initialized
1464
+ */
1465
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoInit(cublasLtHandle_t lightHandle,
1466
+ cublasComputeType_t computeType,
1467
+ cudaDataType_t scaleType,
1468
+ cudaDataType_t Atype,
1469
+ cudaDataType_t Btype,
1470
+ cudaDataType_t Ctype,
1471
+ cudaDataType_t Dtype,
1472
+ int algoId,
1473
+ cublasLtMatmulAlgo_t* algo);
1474
+
1475
+ /** Check configured algo descriptor for correctness and support on current device.
1476
+ *
1477
+ * Result includes required workspace size and calculated wave count.
1478
+ *
1479
+ * CUBLAS_STATUS_SUCCESS doesn't fully guarantee algo will run (will fail if e.g. buffers are not correctly aligned);
1480
+ * but if cublasLtMatmulAlgoCheck fails, the algo will not run.
1481
+ *
1482
+ * \param[in] algo algo configuration to check
1483
+ * \param[out] result result structure to report algo runtime characteristics; algo field is never updated
1484
+ *
1485
+ * \retval CUBLAS_STATUS_INVALID_VALUE if matrix layout descriptors or operation descriptor don't match algo
1486
+ * descriptor
1487
+ * \retval CUBLAS_STATUS_NOT_SUPPORTED if algo configuration or data type combination is not currently supported on
1488
+ * given device
1489
+ * \retval CUBLAS_STATUS_ARCH_MISMATCH if algo configuration cannot be run using the selected device
1490
+ * \retval CUBLAS_STATUS_SUCCESS if check was successful
1491
+ */
1492
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoCheck( //
1493
+ cublasLtHandle_t lightHandle,
1494
+ cublasLtMatmulDesc_t operationDesc,
1495
+ cublasLtMatrixLayout_t Adesc,
1496
+ cublasLtMatrixLayout_t Bdesc,
1497
+ cublasLtMatrixLayout_t Cdesc,
1498
+ cublasLtMatrixLayout_t Ddesc,
1499
+ const cublasLtMatmulAlgo_t* algo, ///< may point to result->algo
1500
+ cublasLtMatmulHeuristicResult_t* result);
1501
+
1502
+ /** Capabilities Attributes that can be retrieved from an initialized Algo structure
1503
+ */
1504
+ typedef enum {
1505
+ /** support for split K, see CUBLASLT_ALGO_CONFIG_SPLITK_NUM
1506
+ *
1507
+ * int32_t, 0 means no support, supported otherwise
1508
+ */
1509
+ CUBLASLT_ALGO_CAP_SPLITK_SUPPORT = 0,
1510
+
1511
+ /** reduction scheme mask, see cublasLtReductionScheme_t; shows supported reduction schemes, if reduction scheme is
1512
+ * not masked out it is supported.
1513
+ *
1514
+ * e.g. int isReductionSchemeComputeTypeSupported ? (reductionSchemeMask & CUBLASLT_REDUCTION_SCHEME_COMPUTE_TYPE) ==
1515
+ * CUBLASLT_REDUCTION_SCHEME_COMPUTE_TYPE ? 1 : 0;
1516
+ *
1517
+ * uint32_t
1518
+ */
1519
+ CUBLASLT_ALGO_CAP_REDUCTION_SCHEME_MASK = 1,
1520
+
1521
+ /** support for cta swizzling, see CUBLASLT_ALGO_CONFIG_CTA_SWIZZLING
1522
+ *
1523
+ * uint32_t, 0 means no support, 1 means supported value of 1, other values are reserved
1524
+ */
1525
+ CUBLASLT_ALGO_CAP_CTA_SWIZZLING_SUPPORT = 2,
1526
+
1527
+ /** support strided batch
1528
+ *
1529
+ * int32_t, 0 means no support, supported otherwise
1530
+ */
1531
+ CUBLASLT_ALGO_CAP_STRIDED_BATCH_SUPPORT = 3,
1532
+
1533
+ /** support results out of place (D != C in D = alpha.A.B + beta.C)
1534
+ *
1535
+ * int32_t, 0 means no support, supported otherwise
1536
+ */
1537
+ CUBLASLT_ALGO_CAP_OUT_OF_PLACE_RESULT_SUPPORT = 4,
1538
+
1539
+ /** syrk/herk support (on top of regular gemm)
1540
+ *
1541
+ * int32_t, 0 means no support, supported otherwise
1542
+ */
1543
+ CUBLASLT_ALGO_CAP_UPLO_SUPPORT = 5,
1544
+
1545
+ /** tile ids possible to use, see cublasLtMatmulTile_t; if no tile ids are supported use
1546
+ * CUBLASLT_MATMUL_TILE_UNDEFINED
1547
+ *
1548
+ * use cublasLtMatmulAlgoCapGetAttribute() with sizeInBytes=0 to query actual count
1549
+ *
1550
+ * array of uint32_t
1551
+ */
1552
+ CUBLASLT_ALGO_CAP_TILE_IDS = 6,
1553
+
1554
+ /** custom option range is from 0 to CUBLASLT_ALGO_CAP_CUSTOM_OPTION_MAX (inclusive), see
1555
+ * CUBLASLT_ALGO_CONFIG_CUSTOM_OPTION
1556
+ *
1557
+ * int32_t
1558
+ */
1559
+ CUBLASLT_ALGO_CAP_CUSTOM_OPTION_MAX = 7,
1560
+
1561
+ /** whether algorithm supports custom (not COL or ROW memory order), see cublasLtOrder_t
1562
+ *
1563
+ * int32_t 0 means only COL and ROW memory order is allowed, non-zero means that algo might have different
1564
+ * requirements;
1565
+ */
1566
+ CUBLASLT_ALGO_CAP_CUSTOM_MEMORY_ORDER = 10,
1567
+
1568
+ /** bitmask enumerating pointer modes algorithm supports
1569
+ *
1570
+ * uint32_t, see cublasLtPointerModeMask_t
1571
+ */
1572
+ CUBLASLT_ALGO_CAP_POINTER_MODE_MASK = 11,
1573
+
1574
+ /** bitmask enumerating kinds of postprocessing algorithm supports in the epilogue
1575
+ *
1576
+ * uint32_t, see cublasLtEpilogue_t
1577
+ */
1578
+ CUBLASLT_ALGO_CAP_EPILOGUE_MASK = 12,
1579
+
1580
+ /** stages ids possible to use, see cublasLtMatmulStages_t; if no stages ids are supported use
1581
+ * CUBLASLT_MATMUL_STAGES_UNDEFINED
1582
+ *
1583
+ * use cublasLtMatmulAlgoCapGetAttribute() with sizeInBytes=0 to query actual count
1584
+ *
1585
+ * array of uint32_t
1586
+ */
1587
+ CUBLASLT_ALGO_CAP_STAGES_IDS = 13,
1588
+
1589
+ /** support for nagative ld for all of the matrices
1590
+ *
1591
+ * int32_t 0 means no support, supported otherwise
1592
+ */
1593
+ CUBLASLT_ALGO_CAP_LD_NEGATIVE = 14,
1594
+
1595
+ /** details about algorithm's implementation that affect it's numerical behavior
1596
+ *
1597
+ * uint64_t, see cublasLtNumericalImplFlags_t
1598
+ */
1599
+ CUBLASLT_ALGO_CAP_NUMERICAL_IMPL_FLAGS = 15,
1600
+
1601
+ /** minimum alignment required for A matrix in bytes
1602
+ * (required for buffer pointer, leading dimension, and possibly other strides defined for matrix memory order)
1603
+ *
1604
+ * uint32_t
1605
+ */
1606
+ CUBLASLT_ALGO_CAP_MIN_ALIGNMENT_A_BYTES = 16,
1607
+
1608
+ /** minimum alignment required for B matrix in bytes
1609
+ * (required for buffer pointer, leading dimension, and possibly other strides defined for matrix memory order)
1610
+ *
1611
+ * uint32_t
1612
+ */
1613
+ CUBLASLT_ALGO_CAP_MIN_ALIGNMENT_B_BYTES = 17,
1614
+
1615
+ /** minimum alignment required for C matrix in bytes
1616
+ * (required for buffer pointer, leading dimension, and possibly other strides defined for matrix memory order)
1617
+ *
1618
+ * uint32_t
1619
+ */
1620
+ CUBLASLT_ALGO_CAP_MIN_ALIGNMENT_C_BYTES = 18,
1621
+
1622
+ /** minimum alignment required for D matrix in bytes
1623
+ * (required for buffer pointer, leading dimension, and possibly other strides defined for matrix memory order)
1624
+ *
1625
+ * uint32_t
1626
+ */
1627
+ CUBLASLT_ALGO_CAP_MIN_ALIGNMENT_D_BYTES = 19,
1628
+ } cublasLtMatmulAlgoCapAttributes_t;
1629
+
1630
+ /** Get algo capability attribute.
1631
+ *
1632
+ * E.g. to get list of supported Tile IDs:
1633
+ * cublasLtMatmulTile_t tiles[CUBLASLT_MATMUL_TILE_END];
1634
+ * size_t num_tiles, size_written;
1635
+ * if (cublasLtMatmulAlgoCapGetAttribute(algo, CUBLASLT_ALGO_CAP_TILE_IDS, tiles, sizeof(tiles), size_written) ==
1636
+ * CUBLAS_STATUS_SUCCESS) { num_tiles = size_written / sizeof(tiles[0]);
1637
+ * }
1638
+ *
1639
+ * \param[in] algo The algo descriptor
1640
+ * \param[in] attr The attribute
1641
+ * \param[out] buf memory address containing the new value
1642
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1643
+ * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of
1644
+ * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents
1645
+ *
1646
+ * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero
1647
+ * and buf is NULL or sizeInBytes doesn't match size of internal storage for
1648
+ * selected attribute
1649
+ * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory
1650
+ */
1651
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoCapGetAttribute(const cublasLtMatmulAlgo_t* algo,
1652
+ cublasLtMatmulAlgoCapAttributes_t attr,
1653
+ void* buf,
1654
+ size_t sizeInBytes,
1655
+ size_t* sizeWritten);
1656
+
1657
+ /** Algo Configuration Attributes that can be set according to the Algo capabilities
1658
+ */
1659
+ typedef enum {
1660
+ /** algorithm index, see cublasLtMatmulAlgoGetIds()
1661
+ *
1662
+ * readonly, set by cublasLtMatmulAlgoInit()
1663
+ * int32_t
1664
+ */
1665
+ CUBLASLT_ALGO_CONFIG_ID = 0,
1666
+ /** tile id, see cublasLtMatmulTile_t
1667
+ *
1668
+ * uint32_t, default: CUBLASLT_MATMUL_TILE_UNDEFINED
1669
+ */
1670
+ CUBLASLT_ALGO_CONFIG_TILE_ID = 1,
1671
+ /** Number of K splits. If the number of K splits is greater than one, SPLITK_NUM parts
1672
+ * of matrix multiplication will be computed in parallel. The results will be accumulated
1673
+ * according to CUBLASLT_ALGO_CONFIG_REDUCTION_SCHEME
1674
+ *
1675
+ * int32_t, default: 1
1676
+ */
1677
+ CUBLASLT_ALGO_CONFIG_SPLITK_NUM = 2,
1678
+ /** reduction scheme, see cublasLtReductionScheme_t
1679
+ *
1680
+ * uint32_t, default: CUBLASLT_REDUCTION_SCHEME_NONE
1681
+ */
1682
+ CUBLASLT_ALGO_CONFIG_REDUCTION_SCHEME = 3,
1683
+ /** cta swizzling, change mapping from CUDA grid coordinates to parts of the matrices
1684
+ *
1685
+ * possible values: 0, 1, other values reserved
1686
+ *
1687
+ * uint32_t, default: 0
1688
+ */
1689
+ CUBLASLT_ALGO_CONFIG_CTA_SWIZZLING = 4,
1690
+ /** custom option, each algorithm can support some custom options that don't fit description of the other config
1691
+ * attributes, see CUBLASLT_ALGO_CAP_CUSTOM_OPTION_MAX to get accepted range for any specific case
1692
+ *
1693
+ * uint32_t, default: 0
1694
+ */
1695
+ CUBLASLT_ALGO_CONFIG_CUSTOM_OPTION = 5,
1696
+ /** stages id, see cublasLtMatmulStages_t
1697
+ *
1698
+ * uint32_t, default: CUBLASLT_MATMUL_STAGES_UNDEFINED
1699
+ */
1700
+ CUBLASLT_ALGO_CONFIG_STAGES_ID = 6,
1701
+ /** inner shape id, see cublasLtMatmulInnerShape_t
1702
+ *
1703
+ * uint16_t, default: 0 (CUBLASLT_MATMUL_INNER_SHAPE_UNDEFINED)
1704
+ */
1705
+ CUBLASLT_ALGO_CONFIG_INNER_SHAPE_ID = 7,
1706
+ /** Thread Block Cluster shape id, see cublasLtClusterShape_t. Defines cluster size to use.
1707
+ *
1708
+ * uint16_t, default: 0 (CUBLASLT_CLUSTER_SHAPE_AUTO)
1709
+ */
1710
+ CUBLASLT_ALGO_CONFIG_CLUSTER_SHAPE_ID = 8,
1711
+ } cublasLtMatmulAlgoConfigAttributes_t;
1712
+
1713
+ /** Set algo configuration attribute.
1714
+ *
1715
+ * \param[in] algo The algo descriptor
1716
+ * \param[in] attr The attribute
1717
+ * \param[in] buf memory address containing the new value
1718
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1719
+ *
1720
+ * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for
1721
+ * selected attribute
1722
+ * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully
1723
+ */
1724
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoConfigSetAttribute(cublasLtMatmulAlgo_t* algo,
1725
+ cublasLtMatmulAlgoConfigAttributes_t attr,
1726
+ const void* buf,
1727
+ size_t sizeInBytes);
1728
+
1729
+ /** Get algo configuration attribute.
1730
+ *
1731
+ * \param[in] algo The algo descriptor
1732
+ * \param[in] attr The attribute
1733
+ * \param[out] buf memory address containing the new value
1734
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1735
+ * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of
1736
+ * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents
1737
+ *
1738
+ * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero
1739
+ * and buf is NULL or sizeInBytes doesn't match size of internal storage for
1740
+ * selected attribute
1741
+ * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory
1742
+ */
1743
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoConfigGetAttribute(const cublasLtMatmulAlgo_t* algo,
1744
+ cublasLtMatmulAlgoConfigAttributes_t attr,
1745
+ void* buf,
1746
+ size_t sizeInBytes,
1747
+ size_t* sizeWritten);
1748
+
1749
+ /** Experimental: Logger callback type.
1750
+ */
1751
+ typedef void (*cublasLtLoggerCallback_t)(int logLevel, const char* functionName, const char* message);
1752
+
1753
+ /** Experimental: Logger callback setter.
1754
+ *
1755
+ * \param[in] callback a user defined callback function to be called by the logger
1756
+ *
1757
+ * \retval CUBLAS_STATUS_SUCCESS if callback was set successfully
1758
+ */
1759
+ cublasStatus_t CUBLASWINAPI cublasLtLoggerSetCallback(cublasLtLoggerCallback_t callback);
1760
+
1761
+ /** Experimental: Log file setter.
1762
+ *
1763
+ * \param[in] file an open file with write permissions
1764
+ *
1765
+ * \retval CUBLAS_STATUS_SUCCESS if log file was set successfully
1766
+ */
1767
+ cublasStatus_t CUBLASWINAPI cublasLtLoggerSetFile(FILE* file);
1768
+
1769
+ /** Experimental: Open log file.
1770
+ *
1771
+ * \param[in] logFile log file path. if the log file does not exist, it will be created
1772
+ *
1773
+ * \retval CUBLAS_STATUS_SUCCESS if log file was created successfully
1774
+ */
1775
+ cublasStatus_t CUBLASWINAPI cublasLtLoggerOpenFile(const char* logFile);
1776
+
1777
+ /** Experimental: Log level setter.
1778
+ *
1779
+ * \param[in] level log level, should be one of the following:
1780
+ * 0. Off
1781
+ * 1. Errors
1782
+ * 2. Performance Trace
1783
+ * 3. Performance Hints
1784
+ * 4. Heuristics Trace
1785
+ * 5. API Trace
1786
+ *
1787
+ * \retval CUBLAS_STATUS_INVALID_VALUE if log level is not one of the above levels
1788
+ *
1789
+ * \retval CUBLAS_STATUS_SUCCESS if log level was set successfully
1790
+ */
1791
+ cublasStatus_t CUBLASWINAPI cublasLtLoggerSetLevel(int level);
1792
+
1793
+ /** Experimental: Log mask setter.
1794
+ *
1795
+ * \param[in] mask log mask, should be a combination of the following masks:
1796
+ * 0. Off
1797
+ * 1. Errors
1798
+ * 2. Performance Trace
1799
+ * 4. Performance Hints
1800
+ * 8. Heuristics Trace
1801
+ * 16. API Trace
1802
+ *
1803
+ * \retval CUBLAS_STATUS_SUCCESS if log mask was set successfully
1804
+ */
1805
+ cublasStatus_t CUBLASWINAPI cublasLtLoggerSetMask(int mask);
1806
+
1807
+ /** Experimental: Disable logging for the entire session.
1808
+ *
1809
+ * \retval CUBLAS_STATUS_SUCCESS if disabled logging
1810
+ */
1811
+ cublasStatus_t CUBLASWINAPI cublasLtLoggerForceDisable();
1812
+
1813
+ #if defined(__cplusplus)
1814
+ }
1815
+ #endif /* __cplusplus */
llmeval-env/lib/python3.10/site-packages/nvidia/cublas/include/cublasXt.h ADDED
@@ -0,0 +1,693 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /* cublasXt : Host API, Out of Core and Multi-GPU BLAS Library
51
+
52
+ */
53
+
54
+ #if !defined(CUBLAS_XT_H_)
55
+ #define CUBLAS_XT_H_
56
+
57
+ #include "driver_types.h"
58
+ #include "cuComplex.h" /* import complex data type */
59
+
60
+ #include "cublas_v2.h"
61
+
62
+ #if defined(__cplusplus)
63
+ extern "C" {
64
+ #endif /* __cplusplus */
65
+
66
+ struct cublasXtContext;
67
+ typedef struct cublasXtContext* cublasXtHandle_t;
68
+
69
+ cublasStatus_t CUBLASWINAPI cublasXtCreate(cublasXtHandle_t* handle);
70
+ cublasStatus_t CUBLASWINAPI cublasXtDestroy(cublasXtHandle_t handle);
71
+ cublasStatus_t CUBLASWINAPI cublasXtGetNumBoards(int nbDevices, int deviceId[], int* nbBoards);
72
+ cublasStatus_t CUBLASWINAPI cublasXtMaxBoards(int* nbGpuBoards);
73
+ /* This routine selects the Gpus that the user want to use for CUBLAS-XT */
74
+ cublasStatus_t CUBLASWINAPI cublasXtDeviceSelect(cublasXtHandle_t handle, int nbDevices, int deviceId[]);
75
+
76
+ /* This routine allows to change the dimension of the tiles ( blockDim x blockDim ) */
77
+ cublasStatus_t CUBLASWINAPI cublasXtSetBlockDim(cublasXtHandle_t handle, int blockDim);
78
+ cublasStatus_t CUBLASWINAPI cublasXtGetBlockDim(cublasXtHandle_t handle, int* blockDim);
79
+
80
+ typedef enum { CUBLASXT_PINNING_DISABLED = 0, CUBLASXT_PINNING_ENABLED = 1 } cublasXtPinnedMemMode_t;
81
+ /* This routine allows to CUBLAS-XT to pin the Host memory if it find out that some of the matrix passed
82
+ are not pinned : Pinning/Unpinning the Host memory is still a costly operation
83
+ It is better if the user controls the memory on its own (by pinning/unpinning oly when necessary)
84
+ */
85
+ cublasStatus_t CUBLASWINAPI cublasXtGetPinningMemMode(cublasXtHandle_t handle, cublasXtPinnedMemMode_t* mode);
86
+ cublasStatus_t CUBLASWINAPI cublasXtSetPinningMemMode(cublasXtHandle_t handle, cublasXtPinnedMemMode_t mode);
87
+
88
+ /* This routines is to provide a CPU Blas routines, used for too small sizes or hybrid computation */
89
+ typedef enum {
90
+ CUBLASXT_FLOAT = 0,
91
+ CUBLASXT_DOUBLE = 1,
92
+ CUBLASXT_COMPLEX = 2,
93
+ CUBLASXT_DOUBLECOMPLEX = 3,
94
+ } cublasXtOpType_t;
95
+
96
+ typedef enum {
97
+ CUBLASXT_GEMM = 0,
98
+ CUBLASXT_SYRK = 1,
99
+ CUBLASXT_HERK = 2,
100
+ CUBLASXT_SYMM = 3,
101
+ CUBLASXT_HEMM = 4,
102
+ CUBLASXT_TRSM = 5,
103
+ CUBLASXT_SYR2K = 6,
104
+ CUBLASXT_HER2K = 7,
105
+
106
+ CUBLASXT_SPMM = 8,
107
+ CUBLASXT_SYRKX = 9,
108
+ CUBLASXT_HERKX = 10,
109
+ CUBLASXT_TRMM = 11,
110
+ CUBLASXT_ROUTINE_MAX = 12,
111
+ } cublasXtBlasOp_t;
112
+
113
+ /* Currently only 32-bit integer BLAS routines are supported */
114
+ cublasStatus_t CUBLASWINAPI cublasXtSetCpuRoutine(cublasXtHandle_t handle,
115
+ cublasXtBlasOp_t blasOp,
116
+ cublasXtOpType_t type,
117
+ void* blasFunctor);
118
+
119
+ /* Specified the percentage of work that should done by the CPU, default is 0 (no work) */
120
+ cublasStatus_t CUBLASWINAPI cublasXtSetCpuRatio(cublasXtHandle_t handle,
121
+ cublasXtBlasOp_t blasOp,
122
+ cublasXtOpType_t type,
123
+ float ratio);
124
+
125
+ /* GEMM */
126
+ cublasStatus_t CUBLASWINAPI cublasXtSgemm(cublasXtHandle_t handle,
127
+ cublasOperation_t transa,
128
+ cublasOperation_t transb,
129
+ size_t m,
130
+ size_t n,
131
+ size_t k,
132
+ const float* alpha,
133
+ const float* A,
134
+ size_t lda,
135
+ const float* B,
136
+ size_t ldb,
137
+ const float* beta,
138
+ float* C,
139
+ size_t ldc);
140
+
141
+ cublasStatus_t CUBLASWINAPI cublasXtDgemm(cublasXtHandle_t handle,
142
+ cublasOperation_t transa,
143
+ cublasOperation_t transb,
144
+ size_t m,
145
+ size_t n,
146
+ size_t k,
147
+ const double* alpha,
148
+ const double* A,
149
+ size_t lda,
150
+ const double* B,
151
+ size_t ldb,
152
+ const double* beta,
153
+ double* C,
154
+ size_t ldc);
155
+
156
+ cublasStatus_t CUBLASWINAPI cublasXtCgemm(cublasXtHandle_t handle,
157
+ cublasOperation_t transa,
158
+ cublasOperation_t transb,
159
+ size_t m,
160
+ size_t n,
161
+ size_t k,
162
+ const cuComplex* alpha,
163
+ const cuComplex* A,
164
+ size_t lda,
165
+ const cuComplex* B,
166
+ size_t ldb,
167
+ const cuComplex* beta,
168
+ cuComplex* C,
169
+ size_t ldc);
170
+
171
+ cublasStatus_t CUBLASWINAPI cublasXtZgemm(cublasXtHandle_t handle,
172
+ cublasOperation_t transa,
173
+ cublasOperation_t transb,
174
+ size_t m,
175
+ size_t n,
176
+ size_t k,
177
+ const cuDoubleComplex* alpha,
178
+ const cuDoubleComplex* A,
179
+ size_t lda,
180
+ const cuDoubleComplex* B,
181
+ size_t ldb,
182
+ const cuDoubleComplex* beta,
183
+ cuDoubleComplex* C,
184
+ size_t ldc);
185
+ /* ------------------------------------------------------- */
186
+ /* SYRK */
187
+ cublasStatus_t CUBLASWINAPI cublasXtSsyrk(cublasXtHandle_t handle,
188
+ cublasFillMode_t uplo,
189
+ cublasOperation_t trans,
190
+ size_t n,
191
+ size_t k,
192
+ const float* alpha,
193
+ const float* A,
194
+ size_t lda,
195
+ const float* beta,
196
+ float* C,
197
+ size_t ldc);
198
+
199
+ cublasStatus_t CUBLASWINAPI cublasXtDsyrk(cublasXtHandle_t handle,
200
+ cublasFillMode_t uplo,
201
+ cublasOperation_t trans,
202
+ size_t n,
203
+ size_t k,
204
+ const double* alpha,
205
+ const double* A,
206
+ size_t lda,
207
+ const double* beta,
208
+ double* C,
209
+ size_t ldc);
210
+
211
+ cublasStatus_t CUBLASWINAPI cublasXtCsyrk(cublasXtHandle_t handle,
212
+ cublasFillMode_t uplo,
213
+ cublasOperation_t trans,
214
+ size_t n,
215
+ size_t k,
216
+ const cuComplex* alpha,
217
+ const cuComplex* A,
218
+ size_t lda,
219
+ const cuComplex* beta,
220
+ cuComplex* C,
221
+ size_t ldc);
222
+
223
+ cublasStatus_t CUBLASWINAPI cublasXtZsyrk(cublasXtHandle_t handle,
224
+ cublasFillMode_t uplo,
225
+ cublasOperation_t trans,
226
+ size_t n,
227
+ size_t k,
228
+ const cuDoubleComplex* alpha,
229
+ const cuDoubleComplex* A,
230
+ size_t lda,
231
+ const cuDoubleComplex* beta,
232
+ cuDoubleComplex* C,
233
+ size_t ldc);
234
+ /* -------------------------------------------------------------------- */
235
+ /* HERK */
236
+ cublasStatus_t CUBLASWINAPI cublasXtCherk(cublasXtHandle_t handle,
237
+ cublasFillMode_t uplo,
238
+ cublasOperation_t trans,
239
+ size_t n,
240
+ size_t k,
241
+ const float* alpha,
242
+ const cuComplex* A,
243
+ size_t lda,
244
+ const float* beta,
245
+ cuComplex* C,
246
+ size_t ldc);
247
+
248
+ cublasStatus_t CUBLASWINAPI cublasXtZherk(cublasXtHandle_t handle,
249
+ cublasFillMode_t uplo,
250
+ cublasOperation_t trans,
251
+ size_t n,
252
+ size_t k,
253
+ const double* alpha,
254
+ const cuDoubleComplex* A,
255
+ size_t lda,
256
+ const double* beta,
257
+ cuDoubleComplex* C,
258
+ size_t ldc);
259
+ /* -------------------------------------------------------------------- */
260
+ /* SYR2K */
261
+ cublasStatus_t CUBLASWINAPI cublasXtSsyr2k(cublasXtHandle_t handle,
262
+ cublasFillMode_t uplo,
263
+ cublasOperation_t trans,
264
+ size_t n,
265
+ size_t k,
266
+ const float* alpha,
267
+ const float* A,
268
+ size_t lda,
269
+ const float* B,
270
+ size_t ldb,
271
+ const float* beta,
272
+ float* C,
273
+ size_t ldc);
274
+
275
+ cublasStatus_t CUBLASWINAPI cublasXtDsyr2k(cublasXtHandle_t handle,
276
+ cublasFillMode_t uplo,
277
+ cublasOperation_t trans,
278
+ size_t n,
279
+ size_t k,
280
+ const double* alpha,
281
+ const double* A,
282
+ size_t lda,
283
+ const double* B,
284
+ size_t ldb,
285
+ const double* beta,
286
+ double* C,
287
+ size_t ldc);
288
+
289
+ cublasStatus_t CUBLASWINAPI cublasXtCsyr2k(cublasXtHandle_t handle,
290
+ cublasFillMode_t uplo,
291
+ cublasOperation_t trans,
292
+ size_t n,
293
+ size_t k,
294
+ const cuComplex* alpha,
295
+ const cuComplex* A,
296
+ size_t lda,
297
+ const cuComplex* B,
298
+ size_t ldb,
299
+ const cuComplex* beta,
300
+ cuComplex* C,
301
+ size_t ldc);
302
+
303
+ cublasStatus_t CUBLASWINAPI cublasXtZsyr2k(cublasXtHandle_t handle,
304
+ cublasFillMode_t uplo,
305
+ cublasOperation_t trans,
306
+ size_t n,
307
+ size_t k,
308
+ const cuDoubleComplex* alpha,
309
+ const cuDoubleComplex* A,
310
+ size_t lda,
311
+ const cuDoubleComplex* B,
312
+ size_t ldb,
313
+ const cuDoubleComplex* beta,
314
+ cuDoubleComplex* C,
315
+ size_t ldc);
316
+ /* -------------------------------------------------------------------- */
317
+ /* HERKX : variant extension of HERK */
318
+ cublasStatus_t CUBLASWINAPI cublasXtCherkx(cublasXtHandle_t handle,
319
+ cublasFillMode_t uplo,
320
+ cublasOperation_t trans,
321
+ size_t n,
322
+ size_t k,
323
+ const cuComplex* alpha,
324
+ const cuComplex* A,
325
+ size_t lda,
326
+ const cuComplex* B,
327
+ size_t ldb,
328
+ const float* beta,
329
+ cuComplex* C,
330
+ size_t ldc);
331
+
332
+ cublasStatus_t CUBLASWINAPI cublasXtZherkx(cublasXtHandle_t handle,
333
+ cublasFillMode_t uplo,
334
+ cublasOperation_t trans,
335
+ size_t n,
336
+ size_t k,
337
+ const cuDoubleComplex* alpha,
338
+ const cuDoubleComplex* A,
339
+ size_t lda,
340
+ const cuDoubleComplex* B,
341
+ size_t ldb,
342
+ const double* beta,
343
+ cuDoubleComplex* C,
344
+ size_t ldc);
345
+
346
+ /* -------------------------------------------------------------------- */
347
+ /* TRSM */
348
+ cublasStatus_t CUBLASWINAPI cublasXtStrsm(cublasXtHandle_t handle,
349
+ cublasSideMode_t side,
350
+ cublasFillMode_t uplo,
351
+ cublasOperation_t trans,
352
+ cublasDiagType_t diag,
353
+ size_t m,
354
+ size_t n,
355
+ const float* alpha,
356
+ const float* A,
357
+ size_t lda,
358
+ float* B,
359
+ size_t ldb);
360
+
361
+ cublasStatus_t CUBLASWINAPI cublasXtDtrsm(cublasXtHandle_t handle,
362
+ cublasSideMode_t side,
363
+ cublasFillMode_t uplo,
364
+ cublasOperation_t trans,
365
+ cublasDiagType_t diag,
366
+ size_t m,
367
+ size_t n,
368
+ const double* alpha,
369
+ const double* A,
370
+ size_t lda,
371
+ double* B,
372
+ size_t ldb);
373
+
374
+ cublasStatus_t CUBLASWINAPI cublasXtCtrsm(cublasXtHandle_t handle,
375
+ cublasSideMode_t side,
376
+ cublasFillMode_t uplo,
377
+ cublasOperation_t trans,
378
+ cublasDiagType_t diag,
379
+ size_t m,
380
+ size_t n,
381
+ const cuComplex* alpha,
382
+ const cuComplex* A,
383
+ size_t lda,
384
+ cuComplex* B,
385
+ size_t ldb);
386
+
387
+ cublasStatus_t CUBLASWINAPI cublasXtZtrsm(cublasXtHandle_t handle,
388
+ cublasSideMode_t side,
389
+ cublasFillMode_t uplo,
390
+ cublasOperation_t trans,
391
+ cublasDiagType_t diag,
392
+ size_t m,
393
+ size_t n,
394
+ const cuDoubleComplex* alpha,
395
+ const cuDoubleComplex* A,
396
+ size_t lda,
397
+ cuDoubleComplex* B,
398
+ size_t ldb);
399
+ /* -------------------------------------------------------------------- */
400
+ /* SYMM : Symmetric Multiply Matrix*/
401
+ cublasStatus_t CUBLASWINAPI cublasXtSsymm(cublasXtHandle_t handle,
402
+ cublasSideMode_t side,
403
+ cublasFillMode_t uplo,
404
+ size_t m,
405
+ size_t n,
406
+ const float* alpha,
407
+ const float* A,
408
+ size_t lda,
409
+ const float* B,
410
+ size_t ldb,
411
+ const float* beta,
412
+ float* C,
413
+ size_t ldc);
414
+
415
+ cublasStatus_t CUBLASWINAPI cublasXtDsymm(cublasXtHandle_t handle,
416
+ cublasSideMode_t side,
417
+ cublasFillMode_t uplo,
418
+ size_t m,
419
+ size_t n,
420
+ const double* alpha,
421
+ const double* A,
422
+ size_t lda,
423
+ const double* B,
424
+ size_t ldb,
425
+ const double* beta,
426
+ double* C,
427
+ size_t ldc);
428
+
429
+ cublasStatus_t CUBLASWINAPI cublasXtCsymm(cublasXtHandle_t handle,
430
+ cublasSideMode_t side,
431
+ cublasFillMode_t uplo,
432
+ size_t m,
433
+ size_t n,
434
+ const cuComplex* alpha,
435
+ const cuComplex* A,
436
+ size_t lda,
437
+ const cuComplex* B,
438
+ size_t ldb,
439
+ const cuComplex* beta,
440
+ cuComplex* C,
441
+ size_t ldc);
442
+
443
+ cublasStatus_t CUBLASWINAPI cublasXtZsymm(cublasXtHandle_t handle,
444
+ cublasSideMode_t side,
445
+ cublasFillMode_t uplo,
446
+ size_t m,
447
+ size_t n,
448
+ const cuDoubleComplex* alpha,
449
+ const cuDoubleComplex* A,
450
+ size_t lda,
451
+ const cuDoubleComplex* B,
452
+ size_t ldb,
453
+ const cuDoubleComplex* beta,
454
+ cuDoubleComplex* C,
455
+ size_t ldc);
456
+ /* -------------------------------------------------------------------- */
457
+ /* HEMM : Hermitian Matrix Multiply */
458
+ cublasStatus_t CUBLASWINAPI cublasXtChemm(cublasXtHandle_t handle,
459
+ cublasSideMode_t side,
460
+ cublasFillMode_t uplo,
461
+ size_t m,
462
+ size_t n,
463
+ const cuComplex* alpha,
464
+ const cuComplex* A,
465
+ size_t lda,
466
+ const cuComplex* B,
467
+ size_t ldb,
468
+ const cuComplex* beta,
469
+ cuComplex* C,
470
+ size_t ldc);
471
+
472
+ cublasStatus_t CUBLASWINAPI cublasXtZhemm(cublasXtHandle_t handle,
473
+ cublasSideMode_t side,
474
+ cublasFillMode_t uplo,
475
+ size_t m,
476
+ size_t n,
477
+ const cuDoubleComplex* alpha,
478
+ const cuDoubleComplex* A,
479
+ size_t lda,
480
+ const cuDoubleComplex* B,
481
+ size_t ldb,
482
+ const cuDoubleComplex* beta,
483
+ cuDoubleComplex* C,
484
+ size_t ldc);
485
+
486
+ /* -------------------------------------------------------------------- */
487
+ /* SYRKX : variant extension of SYRK */
488
+ cublasStatus_t CUBLASWINAPI cublasXtSsyrkx(cublasXtHandle_t handle,
489
+ cublasFillMode_t uplo,
490
+ cublasOperation_t trans,
491
+ size_t n,
492
+ size_t k,
493
+ const float* alpha,
494
+ const float* A,
495
+ size_t lda,
496
+ const float* B,
497
+ size_t ldb,
498
+ const float* beta,
499
+ float* C,
500
+ size_t ldc);
501
+
502
+ cublasStatus_t CUBLASWINAPI cublasXtDsyrkx(cublasXtHandle_t handle,
503
+ cublasFillMode_t uplo,
504
+ cublasOperation_t trans,
505
+ size_t n,
506
+ size_t k,
507
+ const double* alpha,
508
+ const double* A,
509
+ size_t lda,
510
+ const double* B,
511
+ size_t ldb,
512
+ const double* beta,
513
+ double* C,
514
+ size_t ldc);
515
+
516
+ cublasStatus_t CUBLASWINAPI cublasXtCsyrkx(cublasXtHandle_t handle,
517
+ cublasFillMode_t uplo,
518
+ cublasOperation_t trans,
519
+ size_t n,
520
+ size_t k,
521
+ const cuComplex* alpha,
522
+ const cuComplex* A,
523
+ size_t lda,
524
+ const cuComplex* B,
525
+ size_t ldb,
526
+ const cuComplex* beta,
527
+ cuComplex* C,
528
+ size_t ldc);
529
+
530
+ cublasStatus_t CUBLASWINAPI cublasXtZsyrkx(cublasXtHandle_t handle,
531
+ cublasFillMode_t uplo,
532
+ cublasOperation_t trans,
533
+ size_t n,
534
+ size_t k,
535
+ const cuDoubleComplex* alpha,
536
+ const cuDoubleComplex* A,
537
+ size_t lda,
538
+ const cuDoubleComplex* B,
539
+ size_t ldb,
540
+ const cuDoubleComplex* beta,
541
+ cuDoubleComplex* C,
542
+ size_t ldc);
543
+ /* -------------------------------------------------------------------- */
544
+ /* HER2K : variant extension of HERK */
545
+ cublasStatus_t CUBLASWINAPI cublasXtCher2k(cublasXtHandle_t handle,
546
+ cublasFillMode_t uplo,
547
+ cublasOperation_t trans,
548
+ size_t n,
549
+ size_t k,
550
+ const cuComplex* alpha,
551
+ const cuComplex* A,
552
+ size_t lda,
553
+ const cuComplex* B,
554
+ size_t ldb,
555
+ const float* beta,
556
+ cuComplex* C,
557
+ size_t ldc);
558
+
559
+ cublasStatus_t CUBLASWINAPI cublasXtZher2k(cublasXtHandle_t handle,
560
+ cublasFillMode_t uplo,
561
+ cublasOperation_t trans,
562
+ size_t n,
563
+ size_t k,
564
+ const cuDoubleComplex* alpha,
565
+ const cuDoubleComplex* A,
566
+ size_t lda,
567
+ const cuDoubleComplex* B,
568
+ size_t ldb,
569
+ const double* beta,
570
+ cuDoubleComplex* C,
571
+ size_t ldc);
572
+
573
+ /* -------------------------------------------------------------------- */
574
+ /* SPMM : Symmetric Packed Multiply Matrix*/
575
+ cublasStatus_t CUBLASWINAPI cublasXtSspmm(cublasXtHandle_t handle,
576
+ cublasSideMode_t side,
577
+ cublasFillMode_t uplo,
578
+ size_t m,
579
+ size_t n,
580
+ const float* alpha,
581
+ const float* AP,
582
+ const float* B,
583
+ size_t ldb,
584
+ const float* beta,
585
+ float* C,
586
+ size_t ldc);
587
+
588
+ cublasStatus_t CUBLASWINAPI cublasXtDspmm(cublasXtHandle_t handle,
589
+ cublasSideMode_t side,
590
+ cublasFillMode_t uplo,
591
+ size_t m,
592
+ size_t n,
593
+ const double* alpha,
594
+ const double* AP,
595
+ const double* B,
596
+ size_t ldb,
597
+ const double* beta,
598
+ double* C,
599
+ size_t ldc);
600
+
601
+ cublasStatus_t CUBLASWINAPI cublasXtCspmm(cublasXtHandle_t handle,
602
+ cublasSideMode_t side,
603
+ cublasFillMode_t uplo,
604
+ size_t m,
605
+ size_t n,
606
+ const cuComplex* alpha,
607
+ const cuComplex* AP,
608
+ const cuComplex* B,
609
+ size_t ldb,
610
+ const cuComplex* beta,
611
+ cuComplex* C,
612
+ size_t ldc);
613
+
614
+ cublasStatus_t CUBLASWINAPI cublasXtZspmm(cublasXtHandle_t handle,
615
+ cublasSideMode_t side,
616
+ cublasFillMode_t uplo,
617
+ size_t m,
618
+ size_t n,
619
+ const cuDoubleComplex* alpha,
620
+ const cuDoubleComplex* AP,
621
+ const cuDoubleComplex* B,
622
+ size_t ldb,
623
+ const cuDoubleComplex* beta,
624
+ cuDoubleComplex* C,
625
+ size_t ldc);
626
+
627
+ /* -------------------------------------------------------------------- */
628
+ /* TRMM */
629
+ cublasStatus_t CUBLASWINAPI cublasXtStrmm(cublasXtHandle_t handle,
630
+ cublasSideMode_t side,
631
+ cublasFillMode_t uplo,
632
+ cublasOperation_t trans,
633
+ cublasDiagType_t diag,
634
+ size_t m,
635
+ size_t n,
636
+ const float* alpha,
637
+ const float* A,
638
+ size_t lda,
639
+ const float* B,
640
+ size_t ldb,
641
+ float* C,
642
+ size_t ldc);
643
+
644
+ cublasStatus_t CUBLASWINAPI cublasXtDtrmm(cublasXtHandle_t handle,
645
+ cublasSideMode_t side,
646
+ cublasFillMode_t uplo,
647
+ cublasOperation_t trans,
648
+ cublasDiagType_t diag,
649
+ size_t m,
650
+ size_t n,
651
+ const double* alpha,
652
+ const double* A,
653
+ size_t lda,
654
+ const double* B,
655
+ size_t ldb,
656
+ double* C,
657
+ size_t ldc);
658
+
659
+ cublasStatus_t CUBLASWINAPI cublasXtCtrmm(cublasXtHandle_t handle,
660
+ cublasSideMode_t side,
661
+ cublasFillMode_t uplo,
662
+ cublasOperation_t trans,
663
+ cublasDiagType_t diag,
664
+ size_t m,
665
+ size_t n,
666
+ const cuComplex* alpha,
667
+ const cuComplex* A,
668
+ size_t lda,
669
+ const cuComplex* B,
670
+ size_t ldb,
671
+ cuComplex* C,
672
+ size_t ldc);
673
+
674
+ cublasStatus_t CUBLASWINAPI cublasXtZtrmm(cublasXtHandle_t handle,
675
+ cublasSideMode_t side,
676
+ cublasFillMode_t uplo,
677
+ cublasOperation_t trans,
678
+ cublasDiagType_t diag,
679
+ size_t m,
680
+ size_t n,
681
+ const cuDoubleComplex* alpha,
682
+ const cuDoubleComplex* A,
683
+ size_t lda,
684
+ const cuDoubleComplex* B,
685
+ size_t ldb,
686
+ cuDoubleComplex* C,
687
+ size_t ldc);
688
+
689
+ #if defined(__cplusplus)
690
+ }
691
+ #endif /* __cplusplus */
692
+
693
+ #endif /* !defined(CUBLAS_XT_H_) */
llmeval-env/lib/python3.10/site-packages/nvidia/cublas/include/cublas_api.h ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/nvidia/cublas/include/cublas_v2.h ADDED
@@ -0,0 +1,478 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * This is the public header file for the new CUBLAS library API, it mapped the generic
52
+ * Cublas name functions to the actual _v2 implementations.
53
+ */
54
+
55
+ #if !defined(CUBLAS_V2_H_)
56
+ #define CUBLAS_V2_H_
57
+
58
+ #if defined(CUBLAS_H_)
59
+ #error "It is an error to include both cublas.h and cublas_v2.h"
60
+ #endif
61
+
62
+ #undef CUBLASAPI
63
+ #ifdef __CUDACC__
64
+ #define CUBLASAPI __host__ __device__
65
+ #else
66
+ #define CUBLASAPI
67
+ #endif
68
+
69
+ #include "cublas_api.h"
70
+
71
+ #define cublasCreate cublasCreate_v2
72
+ #define cublasDestroy cublasDestroy_v2
73
+ #define cublasGetVersion cublasGetVersion_v2
74
+ #define cublasSetWorkspace cublasSetWorkspace_v2
75
+ #define cublasSetStream cublasSetStream_v2
76
+ #define cublasGetStream cublasGetStream_v2
77
+ #define cublasGetPointerMode cublasGetPointerMode_v2
78
+ #define cublasSetPointerMode cublasSetPointerMode_v2
79
+
80
+ /* 32-bit integer */
81
+
82
+ /* Blas1 Routines */
83
+
84
+ #define cublasSnrm2 cublasSnrm2_v2
85
+ #define cublasDnrm2 cublasDnrm2_v2
86
+ #define cublasScnrm2 cublasScnrm2_v2
87
+ #define cublasDznrm2 cublasDznrm2_v2
88
+
89
+ #define cublasSdot cublasSdot_v2
90
+ #define cublasDdot cublasDdot_v2
91
+ #define cublasCdotu cublasCdotu_v2
92
+ #define cublasCdotc cublasCdotc_v2
93
+ #define cublasZdotu cublasZdotu_v2
94
+ #define cublasZdotc cublasZdotc_v2
95
+
96
+ #define cublasSscal cublasSscal_v2
97
+ #define cublasDscal cublasDscal_v2
98
+ #define cublasCscal cublasCscal_v2
99
+ #define cublasCsscal cublasCsscal_v2
100
+ #define cublasZscal cublasZscal_v2
101
+ #define cublasZdscal cublasZdscal_v2
102
+
103
+ #define cublasSaxpy cublasSaxpy_v2
104
+ #define cublasDaxpy cublasDaxpy_v2
105
+ #define cublasCaxpy cublasCaxpy_v2
106
+ #define cublasZaxpy cublasZaxpy_v2
107
+
108
+ #define cublasScopy cublasScopy_v2
109
+ #define cublasDcopy cublasDcopy_v2
110
+ #define cublasCcopy cublasCcopy_v2
111
+ #define cublasZcopy cublasZcopy_v2
112
+
113
+ #define cublasSswap cublasSswap_v2
114
+ #define cublasDswap cublasDswap_v2
115
+ #define cublasCswap cublasCswap_v2
116
+ #define cublasZswap cublasZswap_v2
117
+
118
+ #define cublasIsamax cublasIsamax_v2
119
+ #define cublasIdamax cublasIdamax_v2
120
+ #define cublasIcamax cublasIcamax_v2
121
+ #define cublasIzamax cublasIzamax_v2
122
+
123
+ #define cublasIsamin cublasIsamin_v2
124
+ #define cublasIdamin cublasIdamin_v2
125
+ #define cublasIcamin cublasIcamin_v2
126
+ #define cublasIzamin cublasIzamin_v2
127
+
128
+ #define cublasSasum cublasSasum_v2
129
+ #define cublasDasum cublasDasum_v2
130
+ #define cublasScasum cublasScasum_v2
131
+ #define cublasDzasum cublasDzasum_v2
132
+
133
+ #define cublasSrot cublasSrot_v2
134
+ #define cublasDrot cublasDrot_v2
135
+ #define cublasCrot cublasCrot_v2
136
+ #define cublasCsrot cublasCsrot_v2
137
+ #define cublasZrot cublasZrot_v2
138
+ #define cublasZdrot cublasZdrot_v2
139
+
140
+ #define cublasSrotg cublasSrotg_v2
141
+ #define cublasDrotg cublasDrotg_v2
142
+ #define cublasCrotg cublasCrotg_v2
143
+ #define cublasZrotg cublasZrotg_v2
144
+
145
+ #define cublasSrotm cublasSrotm_v2
146
+ #define cublasDrotm cublasDrotm_v2
147
+
148
+ #define cublasSrotmg cublasSrotmg_v2
149
+ #define cublasDrotmg cublasDrotmg_v2
150
+
151
+ /* Blas2 Routines */
152
+
153
+ #define cublasSgemv cublasSgemv_v2
154
+ #define cublasDgemv cublasDgemv_v2
155
+ #define cublasCgemv cublasCgemv_v2
156
+ #define cublasZgemv cublasZgemv_v2
157
+
158
+ #define cublasSgbmv cublasSgbmv_v2
159
+ #define cublasDgbmv cublasDgbmv_v2
160
+ #define cublasCgbmv cublasCgbmv_v2
161
+ #define cublasZgbmv cublasZgbmv_v2
162
+
163
+ #define cublasStrmv cublasStrmv_v2
164
+ #define cublasDtrmv cublasDtrmv_v2
165
+ #define cublasCtrmv cublasCtrmv_v2
166
+ #define cublasZtrmv cublasZtrmv_v2
167
+
168
+ #define cublasStbmv cublasStbmv_v2
169
+ #define cublasDtbmv cublasDtbmv_v2
170
+ #define cublasCtbmv cublasCtbmv_v2
171
+ #define cublasZtbmv cublasZtbmv_v2
172
+
173
+ #define cublasStpmv cublasStpmv_v2
174
+ #define cublasDtpmv cublasDtpmv_v2
175
+ #define cublasCtpmv cublasCtpmv_v2
176
+ #define cublasZtpmv cublasZtpmv_v2
177
+
178
+ #define cublasStrsv cublasStrsv_v2
179
+ #define cublasDtrsv cublasDtrsv_v2
180
+ #define cublasCtrsv cublasCtrsv_v2
181
+ #define cublasZtrsv cublasZtrsv_v2
182
+
183
+ #define cublasStpsv cublasStpsv_v2
184
+ #define cublasDtpsv cublasDtpsv_v2
185
+ #define cublasCtpsv cublasCtpsv_v2
186
+ #define cublasZtpsv cublasZtpsv_v2
187
+
188
+ #define cublasStbsv cublasStbsv_v2
189
+ #define cublasDtbsv cublasDtbsv_v2
190
+ #define cublasCtbsv cublasCtbsv_v2
191
+ #define cublasZtbsv cublasZtbsv_v2
192
+
193
+ #define cublasSsymv cublasSsymv_v2
194
+ #define cublasDsymv cublasDsymv_v2
195
+ #define cublasCsymv cublasCsymv_v2
196
+ #define cublasZsymv cublasZsymv_v2
197
+ #define cublasChemv cublasChemv_v2
198
+ #define cublasZhemv cublasZhemv_v2
199
+
200
+ #define cublasSsbmv cublasSsbmv_v2
201
+ #define cublasDsbmv cublasDsbmv_v2
202
+ #define cublasChbmv cublasChbmv_v2
203
+ #define cublasZhbmv cublasZhbmv_v2
204
+
205
+ #define cublasSspmv cublasSspmv_v2
206
+ #define cublasDspmv cublasDspmv_v2
207
+ #define cublasChpmv cublasChpmv_v2
208
+ #define cublasZhpmv cublasZhpmv_v2
209
+
210
+ #define cublasSger cublasSger_v2
211
+ #define cublasDger cublasDger_v2
212
+ #define cublasCgeru cublasCgeru_v2
213
+ #define cublasCgerc cublasCgerc_v2
214
+ #define cublasZgeru cublasZgeru_v2
215
+ #define cublasZgerc cublasZgerc_v2
216
+
217
+ #define cublasSsyr cublasSsyr_v2
218
+ #define cublasDsyr cublasDsyr_v2
219
+ #define cublasCsyr cublasCsyr_v2
220
+ #define cublasZsyr cublasZsyr_v2
221
+ #define cublasCher cublasCher_v2
222
+ #define cublasZher cublasZher_v2
223
+
224
+ #define cublasSspr cublasSspr_v2
225
+ #define cublasDspr cublasDspr_v2
226
+ #define cublasChpr cublasChpr_v2
227
+ #define cublasZhpr cublasZhpr_v2
228
+
229
+ #define cublasSsyr2 cublasSsyr2_v2
230
+ #define cublasDsyr2 cublasDsyr2_v2
231
+ #define cublasCsyr2 cublasCsyr2_v2
232
+ #define cublasZsyr2 cublasZsyr2_v2
233
+ #define cublasCher2 cublasCher2_v2
234
+ #define cublasZher2 cublasZher2_v2
235
+
236
+ #define cublasSspr2 cublasSspr2_v2
237
+ #define cublasDspr2 cublasDspr2_v2
238
+ #define cublasChpr2 cublasChpr2_v2
239
+ #define cublasZhpr2 cublasZhpr2_v2
240
+
241
+ /* Blas3 Routines */
242
+
243
+ #define cublasSgemm cublasSgemm_v2
244
+ #define cublasDgemm cublasDgemm_v2
245
+ #define cublasCgemm cublasCgemm_v2
246
+ #define cublasZgemm cublasZgemm_v2
247
+
248
+ #define cublasSsyrk cublasSsyrk_v2
249
+ #define cublasDsyrk cublasDsyrk_v2
250
+ #define cublasCsyrk cublasCsyrk_v2
251
+ #define cublasZsyrk cublasZsyrk_v2
252
+ #define cublasCherk cublasCherk_v2
253
+ #define cublasZherk cublasZherk_v2
254
+
255
+ #define cublasSsyr2k cublasSsyr2k_v2
256
+ #define cublasDsyr2k cublasDsyr2k_v2
257
+ #define cublasCsyr2k cublasCsyr2k_v2
258
+ #define cublasZsyr2k cublasZsyr2k_v2
259
+ #define cublasCher2k cublasCher2k_v2
260
+ #define cublasZher2k cublasZher2k_v2
261
+
262
+ #define cublasSsymm cublasSsymm_v2
263
+ #define cublasDsymm cublasDsymm_v2
264
+ #define cublasCsymm cublasCsymm_v2
265
+ #define cublasZsymm cublasZsymm_v2
266
+ #define cublasChemm cublasChemm_v2
267
+ #define cublasZhemm cublasZhemm_v2
268
+
269
+ #define cublasStrsm cublasStrsm_v2
270
+ #define cublasDtrsm cublasDtrsm_v2
271
+ #define cublasCtrsm cublasCtrsm_v2
272
+ #define cublasZtrsm cublasZtrsm_v2
273
+
274
+ #define cublasStrmm cublasStrmm_v2
275
+ #define cublasDtrmm cublasDtrmm_v2
276
+ #define cublasCtrmm cublasCtrmm_v2
277
+ #define cublasZtrmm cublasZtrmm_v2
278
+
279
+ /* 64-bit integer */
280
+
281
+ /* Blas1 Routines */
282
+
283
+ #define cublasSnrm2_64 cublasSnrm2_v2_64
284
+ #define cublasDnrm2_64 cublasDnrm2_v2_64
285
+ #define cublasScnrm2_64 cublasScnrm2_v2_64
286
+ #define cublasDznrm2_64 cublasDznrm2_v2_64
287
+
288
+ #define cublasSdot_64 cublasSdot_v2_64
289
+ #define cublasDdot_64 cublasDdot_v2_64
290
+ #define cublasCdotu_64 cublasCdotu_v2_64
291
+ #define cublasCdotc_64 cublasCdotc_v2_64
292
+ #define cublasZdotu_64 cublasZdotu_v2_64
293
+ #define cublasZdotc_64 cublasZdotc_v2_64
294
+
295
+ #define cublasSscal_64 cublasSscal_v2_64
296
+ #define cublasDscal_64 cublasDscal_v2_64
297
+ #define cublasCscal_64 cublasCscal_v2_64
298
+ #define cublasCsscal_64 cublasCsscal_v2_64
299
+ #define cublasZscal_64 cublasZscal_v2_64
300
+ #define cublasZdscal_64 cublasZdscal_v2_64
301
+
302
+ #define cublasSaxpy_64 cublasSaxpy_v2_64
303
+ #define cublasDaxpy_64 cublasDaxpy_v2_64
304
+ #define cublasCaxpy_64 cublasCaxpy_v2_64
305
+ #define cublasZaxpy_64 cublasZaxpy_v2_64
306
+
307
+ #define cublasScopy_64 cublasScopy_v2_64
308
+ #define cublasDcopy_64 cublasDcopy_v2_64
309
+ #define cublasCcopy_64 cublasCcopy_v2_64
310
+ #define cublasZcopy_64 cublasZcopy_v2_64
311
+
312
+ #define cublasSswap_64 cublasSswap_v2_64
313
+ #define cublasDswap_64 cublasDswap_v2_64
314
+ #define cublasCswap_64 cublasCswap_v2_64
315
+ #define cublasZswap_64 cublasZswap_v2_64
316
+
317
+ #define cublasIsamax_64 cublasIsamax_v2_64
318
+ #define cublasIdamax_64 cublasIdamax_v2_64
319
+ #define cublasIcamax_64 cublasIcamax_v2_64
320
+ #define cublasIzamax_64 cublasIzamax_v2_64
321
+
322
+ #define cublasIsamin_64 cublasIsamin_v2_64
323
+ #define cublasIdamin_64 cublasIdamin_v2_64
324
+ #define cublasIcamin_64 cublasIcamin_v2_64
325
+ #define cublasIzamin_64 cublasIzamin_v2_64
326
+
327
+ #define cublasSasum_64 cublasSasum_v2_64
328
+ #define cublasDasum_64 cublasDasum_v2_64
329
+ #define cublasScasum_64 cublasScasum_v2_64
330
+ #define cublasDzasum_64 cublasDzasum_v2_64
331
+
332
+ #define cublasSrot_64 cublasSrot_v2_64
333
+ #define cublasDrot_64 cublasDrot_v2_64
334
+ #define cublasCrot_64 cublasCrot_v2_64
335
+ #define cublasCsrot_64 cublasCsrot_v2_64
336
+ #define cublasZrot_64 cublasZrot_v2_64
337
+ #define cublasZdrot_64 cublasZdrot_v2_64
338
+
339
+ #define cublasSrotg_64 cublasSrotg_v2_64
340
+ #define cublasDrotg_64 cublasDrotg_v2_64
341
+ #define cublasCrotg_64 cublasCrotg_v2_64
342
+ #define cublasZrotg_64 cublasZrotg_v2_64
343
+
344
+ #define cublasSrotm_64 cublasSrotm_v2_64
345
+ #define cublasDrotm_64 cublasDrotm_v2_64
346
+
347
+ #define cublasSrotmg_64 cublasSrotmg_v2_64
348
+ #define cublasDrotmg_64 cublasDrotmg_v2_64
349
+
350
+ /* Blas2 Routines */
351
+
352
+ #define cublasSgemv_64 cublasSgemv_v2_64
353
+ #define cublasDgemv_64 cublasDgemv_v2_64
354
+ #define cublasCgemv_64 cublasCgemv_v2_64
355
+ #define cublasZgemv_64 cublasZgemv_v2_64
356
+
357
+ #define cublasSgbmv_64 cublasSgbmv_v2_64
358
+ #define cublasDgbmv_64 cublasDgbmv_v2_64
359
+ #define cublasCgbmv_64 cublasCgbmv_v2_64
360
+ #define cublasZgbmv_64 cublasZgbmv_v2_64
361
+
362
+ #define cublasStrmv_64 cublasStrmv_v2_64
363
+ #define cublasDtrmv_64 cublasDtrmv_v2_64
364
+ #define cublasCtrmv_64 cublasCtrmv_v2_64
365
+ #define cublasZtrmv_64 cublasZtrmv_v2_64
366
+
367
+ #define cublasStbmv_64 cublasStbmv_v2_64
368
+ #define cublasDtbmv_64 cublasDtbmv_v2_64
369
+ #define cublasCtbmv_64 cublasCtbmv_v2_64
370
+ #define cublasZtbmv_64 cublasZtbmv_v2_64
371
+
372
+ #define cublasStpmv_64 cublasStpmv_v2_64
373
+ #define cublasDtpmv_64 cublasDtpmv_v2_64
374
+ #define cublasCtpmv_64 cublasCtpmv_v2_64
375
+ #define cublasZtpmv_64 cublasZtpmv_v2_64
376
+
377
+ #define cublasStrsv_64 cublasStrsv_v2_64
378
+ #define cublasDtrsv_64 cublasDtrsv_v2_64
379
+ #define cublasCtrsv_64 cublasCtrsv_v2_64
380
+ #define cublasZtrsv_64 cublasZtrsv_v2_64
381
+
382
+ #define cublasStpsv_64 cublasStpsv_v2_64
383
+ #define cublasDtpsv_64 cublasDtpsv_v2_64
384
+ #define cublasCtpsv_64 cublasCtpsv_v2_64
385
+ #define cublasZtpsv_64 cublasZtpsv_v2_64
386
+
387
+ #define cublasStbsv_64 cublasStbsv_v2_64
388
+ #define cublasDtbsv_64 cublasDtbsv_v2_64
389
+ #define cublasCtbsv_64 cublasCtbsv_v2_64
390
+ #define cublasZtbsv_64 cublasZtbsv_v2_64
391
+
392
+ #define cublasSsymv_64 cublasSsymv_v2_64
393
+ #define cublasDsymv_64 cublasDsymv_v2_64
394
+ #define cublasCsymv_64 cublasCsymv_v2_64
395
+ #define cublasZsymv_64 cublasZsymv_v2_64
396
+ #define cublasChemv_64 cublasChemv_v2_64
397
+ #define cublasZhemv_64 cublasZhemv_v2_64
398
+
399
+ #define cublasSsbmv_64 cublasSsbmv_v2_64
400
+ #define cublasDsbmv_64 cublasDsbmv_v2_64
401
+ #define cublasChbmv_64 cublasChbmv_v2_64
402
+ #define cublasZhbmv_64 cublasZhbmv_v2_64
403
+
404
+ #define cublasSspmv_64 cublasSspmv_v2_64
405
+ #define cublasDspmv_64 cublasDspmv_v2_64
406
+ #define cublasChpmv_64 cublasChpmv_v2_64
407
+ #define cublasZhpmv_64 cublasZhpmv_v2_64
408
+
409
+ #define cublasSger_64 cublasSger_v2_64
410
+ #define cublasDger_64 cublasDger_v2_64
411
+ #define cublasCgeru_64 cublasCgeru_v2_64
412
+ #define cublasCgerc_64 cublasCgerc_v2_64
413
+ #define cublasZgeru_64 cublasZgeru_v2_64
414
+ #define cublasZgerc_64 cublasZgerc_v2_64
415
+
416
+ #define cublasSsyr_64 cublasSsyr_v2_64
417
+ #define cublasDsyr_64 cublasDsyr_v2_64
418
+ #define cublasCsyr_64 cublasCsyr_v2_64
419
+ #define cublasZsyr_64 cublasZsyr_v2_64
420
+ #define cublasCher_64 cublasCher_v2_64
421
+ #define cublasZher_64 cublasZher_v2_64
422
+
423
+ #define cublasSspr_64 cublasSspr_v2_64
424
+ #define cublasDspr_64 cublasDspr_v2_64
425
+ #define cublasChpr_64 cublasChpr_v2_64
426
+ #define cublasZhpr_64 cublasZhpr_v2_64
427
+
428
+ #define cublasSsyr2_64 cublasSsyr2_v2_64
429
+ #define cublasDsyr2_64 cublasDsyr2_v2_64
430
+ #define cublasCsyr2_64 cublasCsyr2_v2_64
431
+ #define cublasZsyr2_64 cublasZsyr2_v2_64
432
+ #define cublasCher2_64 cublasCher2_v2_64
433
+ #define cublasZher2_64 cublasZher2_v2_64
434
+
435
+ #define cublasSspr2_64 cublasSspr2_v2_64
436
+ #define cublasDspr2_64 cublasDspr2_v2_64
437
+ #define cublasChpr2_64 cublasChpr2_v2_64
438
+ #define cublasZhpr2_64 cublasZhpr2_v2_64
439
+
440
+ /* Blas3 Routines */
441
+
442
+ #define cublasSgemm_64 cublasSgemm_v2_64
443
+ #define cublasDgemm_64 cublasDgemm_v2_64
444
+ #define cublasCgemm_64 cublasCgemm_v2_64
445
+ #define cublasZgemm_64 cublasZgemm_v2_64
446
+
447
+ #define cublasSsyrk_64 cublasSsyrk_v2_64
448
+ #define cublasDsyrk_64 cublasDsyrk_v2_64
449
+ #define cublasCsyrk_64 cublasCsyrk_v2_64
450
+ #define cublasZsyrk_64 cublasZsyrk_v2_64
451
+ #define cublasCherk_64 cublasCherk_v2_64
452
+ #define cublasZherk_64 cublasZherk_v2_64
453
+
454
+ #define cublasSsyr2k_64 cublasSsyr2k_v2_64
455
+ #define cublasDsyr2k_64 cublasDsyr2k_v2_64
456
+ #define cublasCsyr2k_64 cublasCsyr2k_v2_64
457
+ #define cublasZsyr2k_64 cublasZsyr2k_v2_64
458
+ #define cublasCher2k_64 cublasCher2k_v2_64
459
+ #define cublasZher2k_64 cublasZher2k_v2_64
460
+
461
+ #define cublasSsymm_64 cublasSsymm_v2_64
462
+ #define cublasDsymm_64 cublasDsymm_v2_64
463
+ #define cublasCsymm_64 cublasCsymm_v2_64
464
+ #define cublasZsymm_64 cublasZsymm_v2_64
465
+ #define cublasChemm_64 cublasChemm_v2_64
466
+ #define cublasZhemm_64 cublasZhemm_v2_64
467
+
468
+ #define cublasStrsm_64 cublasStrsm_v2_64
469
+ #define cublasDtrsm_64 cublasDtrsm_v2_64
470
+ #define cublasCtrsm_64 cublasCtrsm_v2_64
471
+ #define cublasZtrsm_64 cublasZtrsm_v2_64
472
+
473
+ #define cublasStrmm_64 cublasStrmm_v2_64
474
+ #define cublasDtrmm_64 cublasDtrmm_v2_64
475
+ #define cublasCtrmm_64 cublasCtrmm_v2_64
476
+ #define cublasZtrmm_64 cublasZtrmm_v2_64
477
+
478
+ #endif /* !defined(CUBLAS_V2_H_) */
llmeval-env/lib/python3.10/site-packages/nvidia/cublas/include/nvblas.h ADDED
@@ -0,0 +1,824 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(NVBLAS_H_)
51
+ #define NVBLAS_H_
52
+
53
+ #include "driver_types.h"
54
+ #include "cuComplex.h" /* import complex data type */
55
+
56
+ #if defined(__cplusplus)
57
+ extern "C" {
58
+ #endif
59
+
60
+ /* GEMM */
61
+ void sgemm_(const char* transa,
62
+ const char* transb,
63
+ const int* m,
64
+ const int* n,
65
+ const int* k,
66
+ const float* alpha,
67
+ const float* a,
68
+ const int* lda,
69
+ const float* b,
70
+ const int* ldb,
71
+ const float* beta,
72
+ float* c,
73
+ const int* ldc);
74
+
75
+ void dgemm_(const char* transa,
76
+ const char* transb,
77
+ const int* m,
78
+ const int* n,
79
+ const int* k,
80
+ const double* alpha,
81
+ const double* a,
82
+ const int* lda,
83
+ const double* b,
84
+ const int* ldb,
85
+ const double* beta,
86
+ double* c,
87
+ const int* ldc);
88
+
89
+ void cgemm_(const char* transa,
90
+ const char* transb,
91
+ const int* m,
92
+ const int* n,
93
+ const int* k,
94
+ const cuComplex* alpha,
95
+ const cuComplex* a,
96
+ const int* lda,
97
+ const cuComplex* b,
98
+ const int* ldb,
99
+ const cuComplex* beta,
100
+ cuComplex* c,
101
+ const int* ldc);
102
+
103
+ void zgemm_(const char* transa,
104
+ const char* transb,
105
+ const int* m,
106
+ const int* n,
107
+ const int* k,
108
+ const cuDoubleComplex* alpha,
109
+ const cuDoubleComplex* a,
110
+ const int* lda,
111
+ const cuDoubleComplex* b,
112
+ const int* ldb,
113
+ const cuDoubleComplex* beta,
114
+ cuDoubleComplex* c,
115
+ const int* ldc);
116
+
117
+ void sgemm(const char* transa,
118
+ const char* transb,
119
+ const int* m,
120
+ const int* n,
121
+ const int* k,
122
+ const float* alpha,
123
+ const float* a,
124
+ const int* lda,
125
+ const float* b,
126
+ const int* ldb,
127
+ const float* beta,
128
+ float* c,
129
+ const int* ldc);
130
+
131
+ void dgemm(const char* transa,
132
+ const char* transb,
133
+ const int* m,
134
+ const int* n,
135
+ const int* k,
136
+ const double* alpha,
137
+ const double* a,
138
+ const int* lda,
139
+ const double* b,
140
+ const int* ldb,
141
+ const double* beta,
142
+ double* c,
143
+ const int* ldc);
144
+
145
+ void cgemm(const char* transa,
146
+ const char* transb,
147
+ const int* m,
148
+ const int* n,
149
+ const int* k,
150
+ const cuComplex* alpha,
151
+ const cuComplex* a,
152
+ const int* lda,
153
+ const cuComplex* b,
154
+ const int* ldb,
155
+ const cuComplex* beta,
156
+ cuComplex* c,
157
+ const int* ldc);
158
+
159
+ void zgemm(const char* transa,
160
+ const char* transb,
161
+ const int* m,
162
+ const int* n,
163
+ const int* k,
164
+ const cuDoubleComplex* alpha,
165
+ const cuDoubleComplex* a,
166
+ const int* lda,
167
+ const cuDoubleComplex* b,
168
+ const int* ldb,
169
+ const cuDoubleComplex* beta,
170
+ cuDoubleComplex* c,
171
+ const int* ldc);
172
+
173
+ /* SYRK */
174
+ void ssyrk_(const char* uplo,
175
+ const char* trans,
176
+ const int* n,
177
+ const int* k,
178
+ const float* alpha,
179
+ const float* a,
180
+ const int* lda,
181
+ const float* beta,
182
+ float* c,
183
+ const int* ldc);
184
+
185
+ void dsyrk_(const char* uplo,
186
+ const char* trans,
187
+ const int* n,
188
+ const int* k,
189
+ const double* alpha,
190
+ const double* a,
191
+ const int* lda,
192
+ const double* beta,
193
+ double* c,
194
+ const int* ldc);
195
+
196
+ void csyrk_(const char* uplo,
197
+ const char* trans,
198
+ const int* n,
199
+ const int* k,
200
+ const cuComplex* alpha,
201
+ const cuComplex* a,
202
+ const int* lda,
203
+ const cuComplex* beta,
204
+ cuComplex* c,
205
+ const int* ldc);
206
+
207
+ void zsyrk_(const char* uplo,
208
+ const char* trans,
209
+ const int* n,
210
+ const int* k,
211
+ const cuDoubleComplex* alpha,
212
+ const cuDoubleComplex* a,
213
+ const int* lda,
214
+ const cuDoubleComplex* beta,
215
+ cuDoubleComplex* c,
216
+ const int* ldc);
217
+
218
+ void ssyrk(const char* uplo,
219
+ const char* trans,
220
+ const int* n,
221
+ const int* k,
222
+ const float* alpha,
223
+ const float* a,
224
+ const int* lda,
225
+ const float* beta,
226
+ float* c,
227
+ const int* ldc);
228
+
229
+ void dsyrk(const char* uplo,
230
+ const char* trans,
231
+ const int* n,
232
+ const int* k,
233
+ const double* alpha,
234
+ const double* a,
235
+ const int* lda,
236
+ const double* beta,
237
+ double* c,
238
+ const int* ldc);
239
+
240
+ void csyrk(const char* uplo,
241
+ const char* trans,
242
+ const int* n,
243
+ const int* k,
244
+ const cuComplex* alpha,
245
+ const cuComplex* a,
246
+ const int* lda,
247
+ const cuComplex* beta,
248
+ cuComplex* c,
249
+ const int* ldc);
250
+
251
+ void zsyrk(const char* uplo,
252
+ const char* trans,
253
+ const int* n,
254
+ const int* k,
255
+ const cuDoubleComplex* alpha,
256
+ const cuDoubleComplex* a,
257
+ const int* lda,
258
+ const cuDoubleComplex* beta,
259
+ cuDoubleComplex* c,
260
+ const int* ldc);
261
+
262
+ /* HERK */
263
+ void cherk_(const char* uplo,
264
+ const char* trans,
265
+ const int* n,
266
+ const int* k,
267
+ const float* alpha,
268
+ const cuComplex* a,
269
+ const int* lda,
270
+ const float* beta,
271
+ cuComplex* c,
272
+ const int* ldc);
273
+
274
+ void zherk_(const char* uplo,
275
+ const char* trans,
276
+ const int* n,
277
+ const int* k,
278
+ const double* alpha,
279
+ const cuDoubleComplex* a,
280
+ const int* lda,
281
+ const double* beta,
282
+ cuDoubleComplex* c,
283
+ const int* ldc);
284
+
285
+ void cherk(const char* uplo,
286
+ const char* trans,
287
+ const int* n,
288
+ const int* k,
289
+ const float* alpha,
290
+ const cuComplex* a,
291
+ const int* lda,
292
+ const float* beta,
293
+ cuComplex* c,
294
+ const int* ldc);
295
+
296
+ void zherk(const char* uplo,
297
+ const char* trans,
298
+ const int* n,
299
+ const int* k,
300
+ const double* alpha,
301
+ const cuDoubleComplex* a,
302
+ const int* lda,
303
+ const double* beta,
304
+ cuDoubleComplex* c,
305
+ const int* ldc);
306
+
307
+ /* TRSM */
308
+ void strsm_(const char* side,
309
+ const char* uplo,
310
+ const char* transa,
311
+ const char* diag,
312
+ const int* m,
313
+ const int* n,
314
+ const float* alpha,
315
+ const float* a,
316
+ const int* lda,
317
+ float* b,
318
+ const int* ldb);
319
+
320
+ void dtrsm_(const char* side,
321
+ const char* uplo,
322
+ const char* transa,
323
+ const char* diag,
324
+ const int* m,
325
+ const int* n,
326
+ const double* alpha,
327
+ const double* a,
328
+ const int* lda,
329
+ double* b,
330
+ const int* ldb);
331
+
332
+ void ctrsm_(const char* side,
333
+ const char* uplo,
334
+ const char* transa,
335
+ const char* diag,
336
+ const int* m,
337
+ const int* n,
338
+ const cuComplex* alpha,
339
+ const cuComplex* a,
340
+ const int* lda,
341
+ cuComplex* b,
342
+ const int* ldb);
343
+
344
+ void ztrsm_(const char* side,
345
+ const char* uplo,
346
+ const char* transa,
347
+ const char* diag,
348
+ const int* m,
349
+ const int* n,
350
+ const cuDoubleComplex* alpha,
351
+ const cuDoubleComplex* a,
352
+ const int* lda,
353
+ cuDoubleComplex* b,
354
+ const int* ldb);
355
+
356
+ void strsm(const char* side,
357
+ const char* uplo,
358
+ const char* transa,
359
+ const char* diag,
360
+ const int* m,
361
+ const int* n,
362
+ const float* alpha,
363
+ const float* a,
364
+ const int* lda,
365
+ float* b,
366
+ const int* ldb);
367
+
368
+ void dtrsm(const char* side,
369
+ const char* uplo,
370
+ const char* transa,
371
+ const char* diag,
372
+ const int* m,
373
+ const int* n,
374
+ const double* alpha,
375
+ const double* a,
376
+ const int* lda,
377
+ double* b,
378
+ const int* ldb);
379
+
380
+ void ctrsm(const char* side,
381
+ const char* uplo,
382
+ const char* transa,
383
+ const char* diag,
384
+ const int* m,
385
+ const int* n,
386
+ const cuComplex* alpha,
387
+ const cuComplex* a,
388
+ const int* lda,
389
+ cuComplex* b,
390
+ const int* ldb);
391
+
392
+ void ztrsm(const char* side,
393
+ const char* uplo,
394
+ const char* transa,
395
+ const char* diag,
396
+ const int* m,
397
+ const int* n,
398
+ const cuDoubleComplex* alpha,
399
+ const cuDoubleComplex* a,
400
+ const int* lda,
401
+ cuDoubleComplex* b,
402
+ const int* ldb);
403
+
404
+ /* SYMM */
405
+ void ssymm_(const char* side,
406
+ const char* uplo,
407
+ const int* m,
408
+ const int* n,
409
+ const float* alpha,
410
+ const float* a,
411
+ const int* lda,
412
+ const float* b,
413
+ const int* ldb,
414
+ const float* beta,
415
+ float* c,
416
+ const int* ldc);
417
+
418
+ void dsymm_(const char* side,
419
+ const char* uplo,
420
+ const int* m,
421
+ const int* n,
422
+ const double* alpha,
423
+ const double* a,
424
+ const int* lda,
425
+ const double* b,
426
+ const int* ldb,
427
+ const double* beta,
428
+ double* c,
429
+ const int* ldc);
430
+
431
+ void csymm_(const char* side,
432
+ const char* uplo,
433
+ const int* m,
434
+ const int* n,
435
+ const cuComplex* alpha,
436
+ const cuComplex* a,
437
+ const int* lda,
438
+ const cuComplex* b,
439
+ const int* ldb,
440
+ const cuComplex* beta,
441
+ cuComplex* c,
442
+ const int* ldc);
443
+
444
+ void zsymm_(const char* side,
445
+ const char* uplo,
446
+ const int* m,
447
+ const int* n,
448
+ const cuDoubleComplex* alpha,
449
+ const cuDoubleComplex* a,
450
+ const int* lda,
451
+ const cuDoubleComplex* b,
452
+ const int* ldb,
453
+ const cuDoubleComplex* beta,
454
+ cuDoubleComplex* c,
455
+ const int* ldc);
456
+
457
+ void ssymm(const char* side,
458
+ const char* uplo,
459
+ const int* m,
460
+ const int* n,
461
+ const float* alpha,
462
+ const float* a,
463
+ const int* lda,
464
+ const float* b,
465
+ const int* ldb,
466
+ const float* beta,
467
+ float* c,
468
+ const int* ldc);
469
+
470
+ void dsymm(const char* side,
471
+ const char* uplo,
472
+ const int* m,
473
+ const int* n,
474
+ const double* alpha,
475
+ const double* a,
476
+ const int* lda,
477
+ const double* b,
478
+ const int* ldb,
479
+ const double* beta,
480
+ double* c,
481
+ const int* ldc);
482
+
483
+ void csymm(const char* side,
484
+ const char* uplo,
485
+ const int* m,
486
+ const int* n,
487
+ const cuComplex* alpha,
488
+ const cuComplex* a,
489
+ const int* lda,
490
+ const cuComplex* b,
491
+ const int* ldb,
492
+ const cuComplex* beta,
493
+ cuComplex* c,
494
+ const int* ldc);
495
+
496
+ void zsymm(const char* side,
497
+ const char* uplo,
498
+ const int* m,
499
+ const int* n,
500
+ const cuDoubleComplex* alpha,
501
+ const cuDoubleComplex* a,
502
+ const int* lda,
503
+ const cuDoubleComplex* b,
504
+ const int* ldb,
505
+ const cuDoubleComplex* beta,
506
+ cuDoubleComplex* c,
507
+ const int* ldc);
508
+
509
+ /* HEMM */
510
+ void chemm_(const char* side,
511
+ const char* uplo,
512
+ const int* m,
513
+ const int* n,
514
+ const cuComplex* alpha,
515
+ const cuComplex* a,
516
+ const int* lda,
517
+ const cuComplex* b,
518
+ const int* ldb,
519
+ const cuComplex* beta,
520
+ cuComplex* c,
521
+ const int* ldc);
522
+
523
+ void zhemm_(const char* side,
524
+ const char* uplo,
525
+ const int* m,
526
+ const int* n,
527
+ const cuDoubleComplex* alpha,
528
+ const cuDoubleComplex* a,
529
+ const int* lda,
530
+ const cuDoubleComplex* b,
531
+ const int* ldb,
532
+ const cuDoubleComplex* beta,
533
+ cuDoubleComplex* c,
534
+ const int* ldc);
535
+
536
+ /* HEMM with no underscore*/
537
+ void chemm(const char* side,
538
+ const char* uplo,
539
+ const int* m,
540
+ const int* n,
541
+ const cuComplex* alpha,
542
+ const cuComplex* a,
543
+ const int* lda,
544
+ const cuComplex* b,
545
+ const int* ldb,
546
+ const cuComplex* beta,
547
+ cuComplex* c,
548
+ const int* ldc);
549
+
550
+ void zhemm(const char* side,
551
+ const char* uplo,
552
+ const int* m,
553
+ const int* n,
554
+ const cuDoubleComplex* alpha,
555
+ const cuDoubleComplex* a,
556
+ const int* lda,
557
+ const cuDoubleComplex* b,
558
+ const int* ldb,
559
+ const cuDoubleComplex* beta,
560
+ cuDoubleComplex* c,
561
+ const int* ldc);
562
+
563
+ /* SYR2K */
564
+ void ssyr2k_(const char* uplo,
565
+ const char* trans,
566
+ const int* n,
567
+ const int* k,
568
+ const float* alpha,
569
+ const float* a,
570
+ const int* lda,
571
+ const float* b,
572
+ const int* ldb,
573
+ const float* beta,
574
+ float* c,
575
+ const int* ldc);
576
+
577
+ void dsyr2k_(const char* uplo,
578
+ const char* trans,
579
+ const int* n,
580
+ const int* k,
581
+ const double* alpha,
582
+ const double* a,
583
+ const int* lda,
584
+ const double* b,
585
+ const int* ldb,
586
+ const double* beta,
587
+ double* c,
588
+ const int* ldc);
589
+
590
+ void csyr2k_(const char* uplo,
591
+ const char* trans,
592
+ const int* n,
593
+ const int* k,
594
+ const cuComplex* alpha,
595
+ const cuComplex* a,
596
+ const int* lda,
597
+ const cuComplex* b,
598
+ const int* ldb,
599
+ const cuComplex* beta,
600
+ cuComplex* c,
601
+ const int* ldc);
602
+
603
+ void zsyr2k_(const char* uplo,
604
+ const char* trans,
605
+ const int* n,
606
+ const int* k,
607
+ const cuDoubleComplex* alpha,
608
+ const cuDoubleComplex* a,
609
+ const int* lda,
610
+ const cuDoubleComplex* b,
611
+ const int* ldb,
612
+ const cuDoubleComplex* beta,
613
+ cuDoubleComplex* c,
614
+ const int* ldc);
615
+
616
+ /* SYR2K no_underscore*/
617
+ void ssyr2k(const char* uplo,
618
+ const char* trans,
619
+ const int* n,
620
+ const int* k,
621
+ const float* alpha,
622
+ const float* a,
623
+ const int* lda,
624
+ const float* b,
625
+ const int* ldb,
626
+ const float* beta,
627
+ float* c,
628
+ const int* ldc);
629
+
630
+ void dsyr2k(const char* uplo,
631
+ const char* trans,
632
+ const int* n,
633
+ const int* k,
634
+ const double* alpha,
635
+ const double* a,
636
+ const int* lda,
637
+ const double* b,
638
+ const int* ldb,
639
+ const double* beta,
640
+ double* c,
641
+ const int* ldc);
642
+
643
+ void csyr2k(const char* uplo,
644
+ const char* trans,
645
+ const int* n,
646
+ const int* k,
647
+ const cuComplex* alpha,
648
+ const cuComplex* a,
649
+ const int* lda,
650
+ const cuComplex* b,
651
+ const int* ldb,
652
+ const cuComplex* beta,
653
+ cuComplex* c,
654
+ const int* ldc);
655
+
656
+ void zsyr2k(const char* uplo,
657
+ const char* trans,
658
+ const int* n,
659
+ const int* k,
660
+ const cuDoubleComplex* alpha,
661
+ const cuDoubleComplex* a,
662
+ const int* lda,
663
+ const cuDoubleComplex* b,
664
+ const int* ldb,
665
+ const cuDoubleComplex* beta,
666
+ cuDoubleComplex* c,
667
+ const int* ldc);
668
+
669
+ /* HERK */
670
+ void cher2k_(const char* uplo,
671
+ const char* trans,
672
+ const int* n,
673
+ const int* k,
674
+ const cuComplex* alpha,
675
+ const cuComplex* a,
676
+ const int* lda,
677
+ const cuComplex* b,
678
+ const int* ldb,
679
+ const float* beta,
680
+ cuComplex* c,
681
+ const int* ldc);
682
+
683
+ void zher2k_(const char* uplo,
684
+ const char* trans,
685
+ const int* n,
686
+ const int* k,
687
+ const cuDoubleComplex* alpha,
688
+ const cuDoubleComplex* a,
689
+ const int* lda,
690
+ const cuDoubleComplex* b,
691
+ const int* ldb,
692
+ const double* beta,
693
+ cuDoubleComplex* c,
694
+ const int* ldc);
695
+
696
+ /* HER2K with no underscore */
697
+ void cher2k(const char* uplo,
698
+ const char* trans,
699
+ const int* n,
700
+ const int* k,
701
+ const cuComplex* alpha,
702
+ const cuComplex* a,
703
+ const int* lda,
704
+ const cuComplex* b,
705
+ const int* ldb,
706
+ const float* beta,
707
+ cuComplex* c,
708
+ const int* ldc);
709
+
710
+ void zher2k(const char* uplo,
711
+ const char* trans,
712
+ const int* n,
713
+ const int* k,
714
+ const cuDoubleComplex* alpha,
715
+ const cuDoubleComplex* a,
716
+ const int* lda,
717
+ const cuDoubleComplex* b,
718
+ const int* ldb,
719
+ const double* beta,
720
+ cuDoubleComplex* c,
721
+ const int* ldc);
722
+
723
+ /* TRMM */
724
+ void strmm_(const char* side,
725
+ const char* uplo,
726
+ const char* transa,
727
+ const char* diag,
728
+ const int* m,
729
+ const int* n,
730
+ const float* alpha,
731
+ const float* a,
732
+ const int* lda,
733
+ float* b,
734
+ const int* ldb);
735
+
736
+ void dtrmm_(const char* side,
737
+ const char* uplo,
738
+ const char* transa,
739
+ const char* diag,
740
+ const int* m,
741
+ const int* n,
742
+ const double* alpha,
743
+ const double* a,
744
+ const int* lda,
745
+ double* b,
746
+ const int* ldb);
747
+
748
+ void ctrmm_(const char* side,
749
+ const char* uplo,
750
+ const char* transa,
751
+ const char* diag,
752
+ const int* m,
753
+ const int* n,
754
+ const cuComplex* alpha,
755
+ const cuComplex* a,
756
+ const int* lda,
757
+ cuComplex* b,
758
+ const int* ldb);
759
+
760
+ void ztrmm_(const char* side,
761
+ const char* uplo,
762
+ const char* transa,
763
+ const char* diag,
764
+ const int* m,
765
+ const int* n,
766
+ const cuDoubleComplex* alpha,
767
+ const cuDoubleComplex* a,
768
+ const int* lda,
769
+ cuDoubleComplex* b,
770
+ const int* ldb);
771
+
772
+ void strmm(const char* side,
773
+ const char* uplo,
774
+ const char* transa,
775
+ const char* diag,
776
+ const int* m,
777
+ const int* n,
778
+ const float* alpha,
779
+ const float* a,
780
+ const int* lda,
781
+ float* b,
782
+ const int* ldb);
783
+
784
+ void dtrmm(const char* side,
785
+ const char* uplo,
786
+ const char* transa,
787
+ const char* diag,
788
+ const int* m,
789
+ const int* n,
790
+ const double* alpha,
791
+ const double* a,
792
+ const int* lda,
793
+ double* b,
794
+ const int* ldb);
795
+
796
+ void ctrmm(const char* side,
797
+ const char* uplo,
798
+ const char* transa,
799
+ const char* diag,
800
+ const int* m,
801
+ const int* n,
802
+ const cuComplex* alpha,
803
+ const cuComplex* a,
804
+ const int* lda,
805
+ cuComplex* b,
806
+ const int* ldb);
807
+
808
+ void ztrmm(const char* side,
809
+ const char* uplo,
810
+ const char* transa,
811
+ const char* diag,
812
+ const int* m,
813
+ const int* n,
814
+ const cuDoubleComplex* alpha,
815
+ const cuDoubleComplex* a,
816
+ const int* lda,
817
+ cuDoubleComplex* b,
818
+ const int* ldb);
819
+
820
+ #if defined(__cplusplus)
821
+ }
822
+ #endif /* __cplusplus */
823
+
824
+ #endif /* !defined(NVBLAS_H_) */
llmeval-env/lib/python3.10/site-packages/nvidia/cublas/lib/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/nvidia/cublas/lib/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (190 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nvidia/cublas/lib/libnvblas.so.12 ADDED
Binary file (737 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/include/Openacc/cupti_openacc.h ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #include <cuda_stdint.h>
51
+
52
+ #if !defined(_CUPTI_OPENACC_H_)
53
+ #define _CUPTI_OPENACC_H_
54
+
55
+ #ifndef CUPTIAPI
56
+ #ifdef _WIN32
57
+ #define CUPTIAPI __stdcall
58
+ #else
59
+ #define CUPTIAPI
60
+ #endif
61
+ #endif
62
+
63
+ #if defined(__LP64__)
64
+ #define CUPTILP64 1
65
+ #elif defined(_WIN64)
66
+ #define CUPTILP64 1
67
+ #else
68
+ #undef CUPTILP64
69
+ #endif
70
+
71
+ #if defined(__cplusplus)
72
+ extern "C" {
73
+ #endif
74
+
75
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
76
+ #pragma GCC visibility push(default)
77
+ #endif
78
+
79
+ /**
80
+ * \brief Initialize OpenACC support
81
+ *
82
+ * \param profRegister function of type acc_prof_reg as obtained from acc_register_library
83
+ * \param profUnregister function of type acc_prof_reg as obtained from acc_register_library
84
+ * \param profLookup function of type acc_prof_lookup as obtained from acc_register_library
85
+ */
86
+ CUptiResult CUPTIAPI
87
+ cuptiOpenACCInitialize(void *profRegister, void *profUnregister, void *profLookup);
88
+
89
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
90
+ #pragma GCC visibility pop
91
+ #endif
92
+
93
+ #if defined(__cplusplus)
94
+ }
95
+ #endif
96
+
97
+ #endif /*_CUPTI_OPENACC_H_*/
98
+
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/include/Openmp/cupti_openmp.h ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #include <cuda_stdint.h>
51
+ #include "Openmp/omp-tools.h"
52
+
53
+ #if !defined(_CUPTI_OPENMP_H_)
54
+ #define _CUPTI_OPENMP_H_
55
+
56
+ #ifndef CUPTIAPI
57
+ #ifdef _WIN32
58
+ #define CUPTIAPI __stdcall
59
+ #else
60
+ #define CUPTIAPI
61
+ #endif
62
+ #endif
63
+
64
+ #if defined(__LP64__)
65
+ #define CUPTILP64 1
66
+ #elif defined(_WIN64)
67
+ #define CUPTILP64 1
68
+ #else
69
+ #undef CUPTILP64
70
+ #endif
71
+
72
+ #if defined(__cplusplus)
73
+ extern "C" {
74
+ #endif
75
+
76
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
77
+ #pragma GCC visibility push(default)
78
+ #endif
79
+
80
+ /**
81
+ * \brief Initialize OPENMP support (deprecated, used before OpenMP 5.0)
82
+ *
83
+ */
84
+ int CUPTIAPI cuptiOpenMpInitialize(ompt_function_lookup_t ompt_fn_lookup, const char *runtime_version, unsigned int ompt_version);
85
+
86
+ /**
87
+ * \brief Initialize OPENMP support
88
+ *
89
+ */
90
+ int CUPTIAPI cuptiOpenMpInitialize_v2(ompt_function_lookup_t lookup, int initial_device_num, ompt_data_t *tool_data);
91
+
92
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
93
+ #pragma GCC visibility pop
94
+ #endif
95
+
96
+ #if defined(__cplusplus)
97
+ }
98
+ #endif
99
+
100
+ #endif /*_CUPTI_OPENMP_H_*/
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/include/Openmp/omp-tools.h ADDED
@@ -0,0 +1,1083 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * include/50/omp-tools.h.var
3
+ */
4
+
5
+ //===----------------------------------------------------------------------===//
6
+ //
7
+ // The LLVM Compiler Infrastructure
8
+ //
9
+ // This file is dual licensed under the MIT and the University of Illinois Open
10
+ // Source Licenses. See LICENSE.txt for details.
11
+ //
12
+ //===----------------------------------------------------------------------===//
13
+
14
+ #ifndef __OMPT__
15
+ #define __OMPT__
16
+
17
+ /*****************************************************************************
18
+ * system include files
19
+ *****************************************************************************/
20
+
21
+ #include <stdint.h>
22
+ #include <stddef.h>
23
+
24
+ /*****************************************************************************
25
+ * iteration macros
26
+ *****************************************************************************/
27
+
28
+ #define FOREACH_OMPT_INQUIRY_FN(macro) \
29
+ macro (ompt_enumerate_states) \
30
+ macro (ompt_enumerate_mutex_impls) \
31
+ \
32
+ macro (ompt_set_callback) \
33
+ macro (ompt_get_callback) \
34
+ \
35
+ macro (ompt_get_state) \
36
+ \
37
+ macro (ompt_get_parallel_info) \
38
+ macro (ompt_get_task_info) \
39
+ macro (ompt_get_task_memory) \
40
+ macro (ompt_get_thread_data) \
41
+ macro (ompt_get_unique_id) \
42
+ macro (ompt_finalize_tool) \
43
+ \
44
+ macro(ompt_get_num_procs) \
45
+ macro(ompt_get_num_places) \
46
+ macro(ompt_get_place_proc_ids) \
47
+ macro(ompt_get_place_num) \
48
+ macro(ompt_get_partition_place_nums) \
49
+ macro(ompt_get_proc_id) \
50
+ \
51
+ macro(ompt_get_target_info) \
52
+ macro(ompt_get_num_devices)
53
+
54
+ #define FOREACH_OMPT_STATE(macro) \
55
+ \
56
+ /* first available state */ \
57
+ macro (ompt_state_undefined, 0x102) /* undefined thread state */ \
58
+ \
59
+ /* work states (0..15) */ \
60
+ macro (ompt_state_work_serial, 0x000) /* working outside parallel */ \
61
+ macro (ompt_state_work_parallel, 0x001) /* working within parallel */ \
62
+ macro (ompt_state_work_reduction, 0x002) /* performing a reduction */ \
63
+ \
64
+ /* barrier wait states (16..31) */ \
65
+ macro (ompt_state_wait_barrier, 0x010) /* waiting at a barrier */ \
66
+ macro (ompt_state_wait_barrier_implicit_parallel, 0x011) \
67
+ /* implicit barrier at the end of parallel region */\
68
+ macro (ompt_state_wait_barrier_implicit_workshare, 0x012) \
69
+ /* implicit barrier at the end of worksharing */ \
70
+ macro (ompt_state_wait_barrier_implicit, 0x013) /* implicit barrier */ \
71
+ macro (ompt_state_wait_barrier_explicit, 0x014) /* explicit barrier */ \
72
+ \
73
+ /* task wait states (32..63) */ \
74
+ macro (ompt_state_wait_taskwait, 0x020) /* waiting at a taskwait */ \
75
+ macro (ompt_state_wait_taskgroup, 0x021) /* waiting at a taskgroup */ \
76
+ \
77
+ /* mutex wait states (64..127) */ \
78
+ macro (ompt_state_wait_mutex, 0x040) \
79
+ macro (ompt_state_wait_lock, 0x041) /* waiting for lock */ \
80
+ macro (ompt_state_wait_critical, 0x042) /* waiting for critical */ \
81
+ macro (ompt_state_wait_atomic, 0x043) /* waiting for atomic */ \
82
+ macro (ompt_state_wait_ordered, 0x044) /* waiting for ordered */ \
83
+ \
84
+ /* target wait states (128..255) */ \
85
+ macro (ompt_state_wait_target, 0x080) /* waiting for target region */ \
86
+ macro (ompt_state_wait_target_map, 0x081) /* waiting for target data mapping operation */ \
87
+ macro (ompt_state_wait_target_update, 0x082) /* waiting for target update operation */ \
88
+ \
89
+ /* misc (256..511) */ \
90
+ macro (ompt_state_idle, 0x100) /* waiting for work */ \
91
+ macro (ompt_state_overhead, 0x101) /* overhead excluding wait states */ \
92
+ \
93
+ /* implementation-specific states (512..) */
94
+
95
+
96
+ #define FOREACH_KMP_MUTEX_IMPL(macro) \
97
+ macro (kmp_mutex_impl_none, 0) /* unknown implementation */ \
98
+ macro (kmp_mutex_impl_spin, 1) /* based on spin */ \
99
+ macro (kmp_mutex_impl_queuing, 2) /* based on some fair policy */ \
100
+ macro (kmp_mutex_impl_speculative, 3) /* based on HW-supported speculation */
101
+
102
+ #define FOREACH_OMPT_EVENT(macro) \
103
+ \
104
+ /*--- Mandatory Events ---*/ \
105
+ macro (ompt_callback_thread_begin, ompt_callback_thread_begin_t, 1) /* thread begin */ \
106
+ macro (ompt_callback_thread_end, ompt_callback_thread_end_t, 2) /* thread end */ \
107
+ \
108
+ macro (ompt_callback_parallel_begin, ompt_callback_parallel_begin_t, 3) /* parallel begin */ \
109
+ macro (ompt_callback_parallel_end, ompt_callback_parallel_end_t, 4) /* parallel end */ \
110
+ \
111
+ macro (ompt_callback_task_create, ompt_callback_task_create_t, 5) /* task begin */ \
112
+ macro (ompt_callback_task_schedule, ompt_callback_task_schedule_t, 6) /* task schedule */ \
113
+ macro (ompt_callback_implicit_task, ompt_callback_implicit_task_t, 7) /* implicit task */ \
114
+ \
115
+ macro (ompt_callback_target, ompt_callback_target_t, 8) /* target */ \
116
+ macro (ompt_callback_target_data_op, ompt_callback_target_data_op_t, 9) /* target data op */ \
117
+ macro (ompt_callback_target_submit, ompt_callback_target_submit_t, 10) /* target submit */ \
118
+ \
119
+ macro (ompt_callback_control_tool, ompt_callback_control_tool_t, 11) /* control tool */ \
120
+ \
121
+ macro (ompt_callback_device_initialize, ompt_callback_device_initialize_t, 12) /* device initialize */ \
122
+ macro (ompt_callback_device_finalize, ompt_callback_device_finalize_t, 13) /* device finalize */ \
123
+ \
124
+ macro (ompt_callback_device_load, ompt_callback_device_load_t, 14) /* device load */ \
125
+ macro (ompt_callback_device_unload, ompt_callback_device_unload_t, 15) /* device unload */ \
126
+ \
127
+ /* Optional Events */ \
128
+ macro (ompt_callback_sync_region_wait, ompt_callback_sync_region_t, 16) /* sync region wait begin or end */ \
129
+ \
130
+ macro (ompt_callback_mutex_released, ompt_callback_mutex_t, 17) /* mutex released */ \
131
+ \
132
+ macro (ompt_callback_dependences, ompt_callback_dependences_t, 18) /* report task dependences */ \
133
+ macro (ompt_callback_task_dependence, ompt_callback_task_dependence_t, 19) /* report task dependence */ \
134
+ \
135
+ macro (ompt_callback_work, ompt_callback_work_t, 20) /* task at work begin or end */ \
136
+ \
137
+ macro (ompt_callback_master, ompt_callback_master_t, 21) /* task at master begin or end */ \
138
+ \
139
+ macro (ompt_callback_target_map, ompt_callback_target_map_t, 22) /* target map */ \
140
+ \
141
+ macro (ompt_callback_sync_region, ompt_callback_sync_region_t, 23) /* sync region begin or end */ \
142
+ \
143
+ macro (ompt_callback_lock_init, ompt_callback_mutex_acquire_t, 24) /* lock init */ \
144
+ macro (ompt_callback_lock_destroy, ompt_callback_mutex_t, 25) /* lock destroy */ \
145
+ \
146
+ macro (ompt_callback_mutex_acquire, ompt_callback_mutex_acquire_t, 26) /* mutex acquire */ \
147
+ macro (ompt_callback_mutex_acquired, ompt_callback_mutex_t, 27) /* mutex acquired */ \
148
+ \
149
+ macro (ompt_callback_nest_lock, ompt_callback_nest_lock_t, 28) /* nest lock */ \
150
+ \
151
+ macro (ompt_callback_flush, ompt_callback_flush_t, 29) /* after executing flush */ \
152
+ \
153
+ macro (ompt_callback_cancel, ompt_callback_cancel_t, 30) /* cancel innermost binding region */ \
154
+ \
155
+ macro (ompt_callback_reduction, ompt_callback_sync_region_t, 31) /* reduction */ \
156
+ \
157
+ macro (ompt_callback_dispatch, ompt_callback_dispatch_t, 32) /* dispatch of work */
158
+
159
+ /*****************************************************************************
160
+ * implementation specific types
161
+ *****************************************************************************/
162
+
163
+ typedef enum kmp_mutex_impl_t {
164
+ #define kmp_mutex_impl_macro(impl, code) impl = code,
165
+ FOREACH_KMP_MUTEX_IMPL(kmp_mutex_impl_macro)
166
+ #undef kmp_mutex_impl_macro
167
+ } kmp_mutex_impl_t;
168
+
169
+ /*****************************************************************************
170
+ * definitions generated from spec
171
+ *****************************************************************************/
172
+
173
+ typedef enum ompt_callbacks_t {
174
+ ompt_callback_thread_begin = 1,
175
+ ompt_callback_thread_end = 2,
176
+ ompt_callback_parallel_begin = 3,
177
+ ompt_callback_parallel_end = 4,
178
+ ompt_callback_task_create = 5,
179
+ ompt_callback_task_schedule = 6,
180
+ ompt_callback_implicit_task = 7,
181
+ ompt_callback_target = 8,
182
+ ompt_callback_target_data_op = 9,
183
+ ompt_callback_target_submit = 10,
184
+ ompt_callback_control_tool = 11,
185
+ ompt_callback_device_initialize = 12,
186
+ ompt_callback_device_finalize = 13,
187
+ ompt_callback_device_load = 14,
188
+ ompt_callback_device_unload = 15,
189
+ ompt_callback_sync_region_wait = 16,
190
+ ompt_callback_mutex_released = 17,
191
+ ompt_callback_dependences = 18,
192
+ ompt_callback_task_dependence = 19,
193
+ ompt_callback_work = 20,
194
+ ompt_callback_master = 21,
195
+ ompt_callback_target_map = 22,
196
+ ompt_callback_sync_region = 23,
197
+ ompt_callback_lock_init = 24,
198
+ ompt_callback_lock_destroy = 25,
199
+ ompt_callback_mutex_acquire = 26,
200
+ ompt_callback_mutex_acquired = 27,
201
+ ompt_callback_nest_lock = 28,
202
+ ompt_callback_flush = 29,
203
+ ompt_callback_cancel = 30,
204
+ ompt_callback_reduction = 31,
205
+ ompt_callback_dispatch = 32
206
+ } ompt_callbacks_t;
207
+
208
+ typedef enum ompt_record_t {
209
+ ompt_record_ompt = 1,
210
+ ompt_record_native = 2,
211
+ ompt_record_invalid = 3
212
+ } ompt_record_t;
213
+
214
+ typedef enum ompt_record_native_t {
215
+ ompt_record_native_info = 1,
216
+ ompt_record_native_event = 2
217
+ } ompt_record_native_t;
218
+
219
+ typedef enum ompt_set_result_t {
220
+ ompt_set_error = 0,
221
+ ompt_set_never = 1,
222
+ ompt_set_impossible = 2,
223
+ ompt_set_sometimes = 3,
224
+ ompt_set_sometimes_paired = 4,
225
+ ompt_set_always = 5
226
+ } ompt_set_result_t;
227
+
228
+ typedef uint64_t ompt_id_t;
229
+
230
+ typedef uint64_t ompt_device_time_t;
231
+
232
+ typedef uint64_t ompt_buffer_cursor_t;
233
+
234
+ typedef enum ompt_thread_t {
235
+ ompt_thread_initial = 1,
236
+ ompt_thread_worker = 2,
237
+ ompt_thread_other = 3,
238
+ ompt_thread_unknown = 4
239
+ } ompt_thread_t;
240
+
241
+ typedef enum ompt_scope_endpoint_t {
242
+ ompt_scope_begin = 1,
243
+ ompt_scope_end = 2
244
+ } ompt_scope_endpoint_t;
245
+
246
+ typedef enum ompt_dispatch_t {
247
+ ompt_dispatch_iteration = 1,
248
+ ompt_dispatch_section = 2
249
+ } ompt_dispatch_t;
250
+
251
+ typedef enum ompt_sync_region_t {
252
+ ompt_sync_region_barrier = 1,
253
+ ompt_sync_region_barrier_implicit = 2,
254
+ ompt_sync_region_barrier_explicit = 3,
255
+ ompt_sync_region_barrier_implementation = 4,
256
+ ompt_sync_region_taskwait = 5,
257
+ ompt_sync_region_taskgroup = 6,
258
+ ompt_sync_region_reduction = 7
259
+ } ompt_sync_region_t;
260
+
261
+ typedef enum ompt_target_data_op_t {
262
+ ompt_target_data_alloc = 1,
263
+ ompt_target_data_transfer_to_device = 2,
264
+ ompt_target_data_transfer_from_device = 3,
265
+ ompt_target_data_delete = 4,
266
+ ompt_target_data_associate = 5,
267
+ ompt_target_data_disassociate = 6
268
+ } ompt_target_data_op_t;
269
+
270
+ typedef enum ompt_work_t {
271
+ ompt_work_loop = 1,
272
+ ompt_work_sections = 2,
273
+ ompt_work_single_executor = 3,
274
+ ompt_work_single_other = 4,
275
+ ompt_work_workshare = 5,
276
+ ompt_work_distribute = 6,
277
+ ompt_work_taskloop = 7
278
+ } ompt_work_t;
279
+
280
+ typedef enum ompt_mutex_t {
281
+ ompt_mutex_lock = 1,
282
+ ompt_mutex_test_lock = 2,
283
+ ompt_mutex_nest_lock = 3,
284
+ ompt_mutex_test_nest_lock = 4,
285
+ ompt_mutex_critical = 5,
286
+ ompt_mutex_atomic = 6,
287
+ ompt_mutex_ordered = 7
288
+ } ompt_mutex_t;
289
+
290
+ typedef enum ompt_native_mon_flag_t {
291
+ ompt_native_data_motion_explicit = 0x01,
292
+ ompt_native_data_motion_implicit = 0x02,
293
+ ompt_native_kernel_invocation = 0x04,
294
+ ompt_native_kernel_execution = 0x08,
295
+ ompt_native_driver = 0x10,
296
+ ompt_native_runtime = 0x20,
297
+ ompt_native_overhead = 0x40,
298
+ ompt_native_idleness = 0x80
299
+ } ompt_native_mon_flag_t;
300
+
301
+ typedef enum ompt_task_flag_t {
302
+ ompt_task_initial = 0x00000001,
303
+ ompt_task_implicit = 0x00000002,
304
+ ompt_task_explicit = 0x00000004,
305
+ ompt_task_target = 0x00000008,
306
+ ompt_task_undeferred = 0x08000000,
307
+ ompt_task_untied = 0x10000000,
308
+ ompt_task_final = 0x20000000,
309
+ ompt_task_mergeable = 0x40000000,
310
+ ompt_task_merged = 0x80000000
311
+ } ompt_task_flag_t;
312
+
313
+ typedef enum ompt_task_status_t {
314
+ ompt_task_complete = 1,
315
+ ompt_task_yield = 2,
316
+ ompt_task_cancel = 3,
317
+ ompt_task_detach = 4,
318
+ ompt_task_early_fulfill = 5,
319
+ ompt_task_late_fulfill = 6,
320
+ ompt_task_switch = 7
321
+ } ompt_task_status_t;
322
+
323
+ typedef enum ompt_target_t {
324
+ ompt_target = 1,
325
+ ompt_target_enter_data = 2,
326
+ ompt_target_exit_data = 3,
327
+ ompt_target_update = 4
328
+ } ompt_target_t;
329
+
330
+ typedef enum ompt_parallel_flag_t {
331
+ ompt_parallel_invoker_program = 0x00000001,
332
+ ompt_parallel_invoker_runtime = 0x00000002,
333
+ ompt_parallel_league = 0x40000000,
334
+ ompt_parallel_team = 0x80000000
335
+ } ompt_parallel_flag_t;
336
+
337
+ typedef enum ompt_target_map_flag_t {
338
+ ompt_target_map_flag_to = 0x01,
339
+ ompt_target_map_flag_from = 0x02,
340
+ ompt_target_map_flag_alloc = 0x04,
341
+ ompt_target_map_flag_release = 0x08,
342
+ ompt_target_map_flag_delete = 0x10,
343
+ ompt_target_map_flag_implicit = 0x20
344
+ } ompt_target_map_flag_t;
345
+
346
+ typedef enum ompt_dependence_type_t {
347
+ ompt_dependence_type_in = 1,
348
+ ompt_dependence_type_out = 2,
349
+ ompt_dependence_type_inout = 3,
350
+ ompt_dependence_type_mutexinoutset = 4,
351
+ ompt_dependence_type_source = 5,
352
+ ompt_dependence_type_sink = 6
353
+ } ompt_dependence_type_t;
354
+
355
+ typedef enum ompt_cancel_flag_t {
356
+ ompt_cancel_parallel = 0x01,
357
+ ompt_cancel_sections = 0x02,
358
+ ompt_cancel_loop = 0x04,
359
+ ompt_cancel_taskgroup = 0x08,
360
+ ompt_cancel_activated = 0x10,
361
+ ompt_cancel_detected = 0x20,
362
+ ompt_cancel_discarded_task = 0x40
363
+ } ompt_cancel_flag_t;
364
+
365
+ typedef uint64_t ompt_hwid_t;
366
+
367
+ typedef uint64_t ompt_wait_id_t;
368
+
369
+ typedef enum ompt_frame_flag_t {
370
+ ompt_frame_runtime = 0x00,
371
+ ompt_frame_application = 0x01,
372
+ ompt_frame_cfa = 0x10,
373
+ ompt_frame_framepointer = 0x20,
374
+ ompt_frame_stackaddress = 0x30
375
+ } ompt_frame_flag_t;
376
+
377
+ typedef enum ompt_state_t {
378
+ ompt_state_work_serial = 0x000,
379
+ ompt_state_work_parallel = 0x001,
380
+ ompt_state_work_reduction = 0x002,
381
+
382
+ ompt_state_wait_barrier = 0x010,
383
+ ompt_state_wait_barrier_implicit_parallel = 0x011,
384
+ ompt_state_wait_barrier_implicit_workshare = 0x012,
385
+ ompt_state_wait_barrier_implicit = 0x013,
386
+ ompt_state_wait_barrier_explicit = 0x014,
387
+
388
+ ompt_state_wait_taskwait = 0x020,
389
+ ompt_state_wait_taskgroup = 0x021,
390
+
391
+ ompt_state_wait_mutex = 0x040,
392
+ ompt_state_wait_lock = 0x041,
393
+ ompt_state_wait_critical = 0x042,
394
+ ompt_state_wait_atomic = 0x043,
395
+ ompt_state_wait_ordered = 0x044,
396
+
397
+ ompt_state_wait_target = 0x080,
398
+ ompt_state_wait_target_map = 0x081,
399
+ ompt_state_wait_target_update = 0x082,
400
+
401
+ ompt_state_idle = 0x100,
402
+ ompt_state_overhead = 0x101,
403
+ ompt_state_undefined = 0x102
404
+ } ompt_state_t;
405
+
406
+ typedef uint64_t (*ompt_get_unique_id_t) (void);
407
+
408
+ typedef uint64_t ompd_size_t;
409
+
410
+ typedef uint64_t ompd_wait_id_t;
411
+
412
+ typedef uint64_t ompd_addr_t;
413
+ typedef int64_t ompd_word_t;
414
+ typedef uint64_t ompd_seg_t;
415
+
416
+ typedef uint64_t ompd_device_t;
417
+
418
+ typedef uint64_t ompd_thread_id_t;
419
+
420
+ typedef enum ompd_scope_t {
421
+ ompd_scope_global = 1,
422
+ ompd_scope_address_space = 2,
423
+ ompd_scope_thread = 3,
424
+ ompd_scope_parallel = 4,
425
+ ompd_scope_implicit_task = 5,
426
+ ompd_scope_task = 6
427
+ } ompd_scope_t;
428
+
429
+ typedef uint64_t ompd_icv_id_t;
430
+
431
+ typedef enum ompd_rc_t {
432
+ ompd_rc_ok = 0,
433
+ ompd_rc_unavailable = 1,
434
+ ompd_rc_stale_handle = 2,
435
+ ompd_rc_bad_input = 3,
436
+ ompd_rc_error = 4,
437
+ ompd_rc_unsupported = 5,
438
+ ompd_rc_needs_state_tracking = 6,
439
+ ompd_rc_incompatible = 7,
440
+ ompd_rc_device_read_error = 8,
441
+ ompd_rc_device_write_error = 9,
442
+ ompd_rc_nomem = 10,
443
+ } ompd_rc_t;
444
+
445
+ typedef void (*ompt_interface_fn_t) (void);
446
+
447
+ typedef ompt_interface_fn_t (*ompt_function_lookup_t) (
448
+ const char *interface_function_name
449
+ );
450
+
451
+ typedef union ompt_data_t {
452
+ uint64_t value;
453
+ void *ptr;
454
+ } ompt_data_t;
455
+
456
+ typedef struct ompt_frame_t {
457
+ ompt_data_t exit_frame;
458
+ ompt_data_t enter_frame;
459
+ int exit_frame_flags;
460
+ int enter_frame_flags;
461
+ } ompt_frame_t;
462
+
463
+ typedef void (*ompt_callback_t) (void);
464
+
465
+ typedef void ompt_device_t;
466
+
467
+ typedef void ompt_buffer_t;
468
+
469
+ typedef void (*ompt_callback_buffer_request_t) (
470
+ int device_num,
471
+ ompt_buffer_t **buffer,
472
+ size_t *bytes
473
+ );
474
+
475
+ typedef void (*ompt_callback_buffer_complete_t) (
476
+ int device_num,
477
+ ompt_buffer_t *buffer,
478
+ size_t bytes,
479
+ ompt_buffer_cursor_t begin,
480
+ int buffer_owned
481
+ );
482
+
483
+ typedef void (*ompt_finalize_t) (
484
+ ompt_data_t *tool_data
485
+ );
486
+
487
+ typedef int (*ompt_initialize_t) (
488
+ ompt_function_lookup_t lookup,
489
+ int initial_device_num,
490
+ ompt_data_t *tool_data
491
+ );
492
+
493
+ typedef struct ompt_start_tool_result_t {
494
+ ompt_initialize_t initialize;
495
+ ompt_finalize_t finalize;
496
+ ompt_data_t tool_data;
497
+ } ompt_start_tool_result_t;
498
+
499
+ typedef struct ompt_record_abstract_t {
500
+ ompt_record_native_t rclass;
501
+ const char *type;
502
+ ompt_device_time_t start_time;
503
+ ompt_device_time_t end_time;
504
+ ompt_hwid_t hwid;
505
+ } ompt_record_abstract_t;
506
+
507
+ typedef struct ompt_dependence_t {
508
+ ompt_data_t variable;
509
+ ompt_dependence_type_t dependence_type;
510
+ } ompt_dependence_t;
511
+
512
+ typedef int (*ompt_enumerate_states_t) (
513
+ int current_state,
514
+ int *next_state,
515
+ const char **next_state_name
516
+ );
517
+
518
+ typedef int (*ompt_enumerate_mutex_impls_t) (
519
+ int current_impl,
520
+ int *next_impl,
521
+ const char **next_impl_name
522
+ );
523
+
524
+ typedef ompt_set_result_t (*ompt_set_callback_t) (
525
+ ompt_callbacks_t event,
526
+ ompt_callback_t callback
527
+ );
528
+
529
+ typedef int (*ompt_get_callback_t) (
530
+ ompt_callbacks_t event,
531
+ ompt_callback_t *callback
532
+ );
533
+
534
+ typedef ompt_data_t *(*ompt_get_thread_data_t) (void);
535
+
536
+ typedef int (*ompt_get_num_procs_t) (void);
537
+
538
+ typedef int (*ompt_get_num_places_t) (void);
539
+
540
+ typedef int (*ompt_get_place_proc_ids_t) (
541
+ int place_num,
542
+ int ids_size,
543
+ int *ids
544
+ );
545
+
546
+ typedef int (*ompt_get_place_num_t) (void);
547
+
548
+ typedef int (*ompt_get_partition_place_nums_t) (
549
+ int place_nums_size,
550
+ int *place_nums
551
+ );
552
+
553
+ typedef int (*ompt_get_proc_id_t) (void);
554
+
555
+ typedef int (*ompt_get_state_t) (
556
+ ompt_wait_id_t *wait_id
557
+ );
558
+
559
+ typedef int (*ompt_get_parallel_info_t) (
560
+ int ancestor_level,
561
+ ompt_data_t **parallel_data,
562
+ int *team_size
563
+ );
564
+
565
+ typedef int (*ompt_get_task_info_t) (
566
+ int ancestor_level,
567
+ int *flags,
568
+ ompt_data_t **task_data,
569
+ ompt_frame_t **task_frame,
570
+ ompt_data_t **parallel_data,
571
+ int *thread_num
572
+ );
573
+
574
+ typedef int (*ompt_get_task_memory_t)(
575
+ void **addr,
576
+ size_t *size,
577
+ int block
578
+ );
579
+
580
+ typedef int (*ompt_get_target_info_t) (
581
+ uint64_t *device_num,
582
+ ompt_id_t *target_id,
583
+ ompt_id_t *host_op_id
584
+ );
585
+
586
+ typedef int (*ompt_get_num_devices_t) (void);
587
+
588
+ typedef void (*ompt_finalize_tool_t) (void);
589
+
590
+ typedef int (*ompt_get_device_num_procs_t) (
591
+ ompt_device_t *device
592
+ );
593
+
594
+ typedef ompt_device_time_t (*ompt_get_device_time_t) (
595
+ ompt_device_t *device
596
+ );
597
+
598
+ typedef double (*ompt_translate_time_t) (
599
+ ompt_device_t *device,
600
+ ompt_device_time_t time
601
+ );
602
+
603
+ typedef ompt_set_result_t (*ompt_set_trace_ompt_t) (
604
+ ompt_device_t *device,
605
+ unsigned int enable,
606
+ unsigned int etype
607
+ );
608
+
609
+ typedef ompt_set_result_t (*ompt_set_trace_native_t) (
610
+ ompt_device_t *device,
611
+ int enable,
612
+ int flags
613
+ );
614
+
615
+ typedef int (*ompt_start_trace_t) (
616
+ ompt_device_t *device,
617
+ ompt_callback_buffer_request_t request,
618
+ ompt_callback_buffer_complete_t complete
619
+ );
620
+
621
+ typedef int (*ompt_pause_trace_t) (
622
+ ompt_device_t *device,
623
+ int begin_pause
624
+ );
625
+
626
+ typedef int (*ompt_flush_trace_t) (
627
+ ompt_device_t *device
628
+ );
629
+
630
+ typedef int (*ompt_stop_trace_t) (
631
+ ompt_device_t *device
632
+ );
633
+
634
+ typedef int (*ompt_advance_buffer_cursor_t) (
635
+ ompt_device_t *device,
636
+ ompt_buffer_t *buffer,
637
+ size_t size,
638
+ ompt_buffer_cursor_t current,
639
+ ompt_buffer_cursor_t *next
640
+ );
641
+
642
+ typedef ompt_record_t (*ompt_get_record_type_t) (
643
+ ompt_buffer_t *buffer,
644
+ ompt_buffer_cursor_t current
645
+ );
646
+
647
+ typedef void *(*ompt_get_record_native_t) (
648
+ ompt_buffer_t *buffer,
649
+ ompt_buffer_cursor_t current,
650
+ ompt_id_t *host_op_id
651
+ );
652
+
653
+ typedef ompt_record_abstract_t *
654
+ (*ompt_get_record_abstract_t) (
655
+ void *native_record
656
+ );
657
+
658
+ typedef void (*ompt_callback_thread_begin_t) (
659
+ ompt_thread_t thread_type,
660
+ ompt_data_t *thread_data
661
+ );
662
+
663
+ typedef struct ompt_record_thread_begin_t {
664
+ ompt_thread_t thread_type;
665
+ } ompt_record_thread_begin_t;
666
+
667
+ typedef void (*ompt_callback_thread_end_t) (
668
+ ompt_data_t *thread_data
669
+ );
670
+
671
+ typedef void (*ompt_callback_parallel_begin_t) (
672
+ ompt_data_t *encountering_task_data,
673
+ const ompt_frame_t *encountering_task_frame,
674
+ ompt_data_t *parallel_data,
675
+ unsigned int requested_parallelism,
676
+ int flags,
677
+ const void *codeptr_ra
678
+ );
679
+
680
+ typedef struct ompt_record_parallel_begin_t {
681
+ ompt_id_t encountering_task_id;
682
+ ompt_id_t parallel_id;
683
+ unsigned int requested_parallelism;
684
+ int flags;
685
+ const void *codeptr_ra;
686
+ } ompt_record_parallel_begin_t;
687
+
688
+ typedef void (*ompt_callback_parallel_end_t) (
689
+ ompt_data_t *parallel_data,
690
+ ompt_data_t *encountering_task_data,
691
+ int flags,
692
+ const void *codeptr_ra
693
+ );
694
+
695
+ typedef struct ompt_record_parallel_end_t {
696
+ ompt_id_t parallel_id;
697
+ ompt_id_t encountering_task_id;
698
+ int flags;
699
+ const void *codeptr_ra;
700
+ } ompt_record_parallel_end_t;
701
+
702
+ typedef void (*ompt_callback_work_t) (
703
+ ompt_work_t wstype,
704
+ ompt_scope_endpoint_t endpoint,
705
+ ompt_data_t *parallel_data,
706
+ ompt_data_t *task_data,
707
+ uint64_t count,
708
+ const void *codeptr_ra
709
+ );
710
+
711
+ typedef struct ompt_record_work_t {
712
+ ompt_work_t wstype;
713
+ ompt_scope_endpoint_t endpoint;
714
+ ompt_id_t parallel_id;
715
+ ompt_id_t task_id;
716
+ uint64_t count;
717
+ const void *codeptr_ra;
718
+ } ompt_record_work_t;
719
+
720
+ typedef void (*ompt_callback_dispatch_t) (
721
+ ompt_data_t *parallel_data,
722
+ ompt_data_t *task_data,
723
+ ompt_dispatch_t kind,
724
+ ompt_data_t instance
725
+ );
726
+
727
+ typedef struct ompt_record_dispatch_t {
728
+ ompt_id_t parallel_id;
729
+ ompt_id_t task_id;
730
+ ompt_dispatch_t kind;
731
+ ompt_data_t instance;
732
+ } ompt_record_dispatch_t;
733
+
734
+ typedef void (*ompt_callback_task_create_t) (
735
+ ompt_data_t *encountering_task_data,
736
+ const ompt_frame_t *encountering_task_frame,
737
+ ompt_data_t *new_task_data,
738
+ int flags,
739
+ int has_dependences,
740
+ const void *codeptr_ra
741
+ );
742
+
743
+ typedef struct ompt_record_task_create_t {
744
+ ompt_id_t encountering_task_id;
745
+ ompt_id_t new_task_id;
746
+ int flags;
747
+ int has_dependences;
748
+ const void *codeptr_ra;
749
+ } ompt_record_task_create_t;
750
+
751
+ typedef void (*ompt_callback_dependences_t) (
752
+ ompt_data_t *task_data,
753
+ const ompt_dependence_t *deps,
754
+ int ndeps
755
+ );
756
+
757
+ typedef struct ompt_record_dependences_t {
758
+ ompt_id_t task_id;
759
+ ompt_dependence_t dep;
760
+ int ndeps;
761
+ } ompt_record_dependences_t;
762
+
763
+ typedef void (*ompt_callback_task_dependence_t) (
764
+ ompt_data_t *src_task_data,
765
+ ompt_data_t *sink_task_data
766
+ );
767
+
768
+ typedef struct ompt_record_task_dependence_t {
769
+ ompt_id_t src_task_id;
770
+ ompt_id_t sink_task_id;
771
+ } ompt_record_task_dependence_t;
772
+
773
+ typedef void (*ompt_callback_task_schedule_t) (
774
+ ompt_data_t *prior_task_data,
775
+ ompt_task_status_t prior_task_status,
776
+ ompt_data_t *next_task_data
777
+ );
778
+
779
+ typedef struct ompt_record_task_schedule_t {
780
+ ompt_id_t prior_task_id;
781
+ ompt_task_status_t prior_task_status;
782
+ ompt_id_t next_task_id;
783
+ } ompt_record_task_schedule_t;
784
+
785
+ typedef void (*ompt_callback_implicit_task_t) (
786
+ ompt_scope_endpoint_t endpoint,
787
+ ompt_data_t *parallel_data,
788
+ ompt_data_t *task_data,
789
+ unsigned int actual_parallelism,
790
+ unsigned int index,
791
+ int flags
792
+ );
793
+
794
+ typedef struct ompt_record_implicit_task_t {
795
+ ompt_scope_endpoint_t endpoint;
796
+ ompt_id_t parallel_id;
797
+ ompt_id_t task_id;
798
+ unsigned int actual_parallelism;
799
+ unsigned int index;
800
+ int flags;
801
+ } ompt_record_implicit_task_t;
802
+
803
+ typedef void (*ompt_callback_master_t) (
804
+ ompt_scope_endpoint_t endpoint,
805
+ ompt_data_t *parallel_data,
806
+ ompt_data_t *task_data,
807
+ const void *codeptr_ra
808
+ );
809
+
810
+ typedef struct ompt_record_master_t {
811
+ ompt_scope_endpoint_t endpoint;
812
+ ompt_id_t parallel_id;
813
+ ompt_id_t task_id;
814
+ const void *codeptr_ra;
815
+ } ompt_record_master_t;
816
+
817
+ typedef void (*ompt_callback_sync_region_t) (
818
+ ompt_sync_region_t kind,
819
+ ompt_scope_endpoint_t endpoint,
820
+ ompt_data_t *parallel_data,
821
+ ompt_data_t *task_data,
822
+ const void *codeptr_ra
823
+ );
824
+
825
+ typedef struct ompt_record_sync_region_t {
826
+ ompt_sync_region_t kind;
827
+ ompt_scope_endpoint_t endpoint;
828
+ ompt_id_t parallel_id;
829
+ ompt_id_t task_id;
830
+ const void *codeptr_ra;
831
+ } ompt_record_sync_region_t;
832
+
833
+ typedef void (*ompt_callback_mutex_acquire_t) (
834
+ ompt_mutex_t kind,
835
+ unsigned int hint,
836
+ unsigned int impl,
837
+ ompt_wait_id_t wait_id,
838
+ const void *codeptr_ra
839
+ );
840
+
841
+ typedef struct ompt_record_mutex_acquire_t {
842
+ ompt_mutex_t kind;
843
+ unsigned int hint;
844
+ unsigned int impl;
845
+ ompt_wait_id_t wait_id;
846
+ const void *codeptr_ra;
847
+ } ompt_record_mutex_acquire_t;
848
+
849
+ typedef void (*ompt_callback_mutex_t) (
850
+ ompt_mutex_t kind,
851
+ ompt_wait_id_t wait_id,
852
+ const void *codeptr_ra
853
+ );
854
+
855
+ typedef struct ompt_record_mutex_t {
856
+ ompt_mutex_t kind;
857
+ ompt_wait_id_t wait_id;
858
+ const void *codeptr_ra;
859
+ } ompt_record_mutex_t;
860
+
861
+ typedef void (*ompt_callback_nest_lock_t) (
862
+ ompt_scope_endpoint_t endpoint,
863
+ ompt_wait_id_t wait_id,
864
+ const void *codeptr_ra
865
+ );
866
+
867
+ typedef struct ompt_record_nest_lock_t {
868
+ ompt_scope_endpoint_t endpoint;
869
+ ompt_wait_id_t wait_id;
870
+ const void *codeptr_ra;
871
+ } ompt_record_nest_lock_t;
872
+
873
+ typedef void (*ompt_callback_flush_t) (
874
+ ompt_data_t *thread_data,
875
+ const void *codeptr_ra
876
+ );
877
+
878
+ typedef struct ompt_record_flush_t {
879
+ const void *codeptr_ra;
880
+ } ompt_record_flush_t;
881
+
882
+ typedef void (*ompt_callback_cancel_t) (
883
+ ompt_data_t *task_data,
884
+ int flags,
885
+ const void *codeptr_ra
886
+ );
887
+
888
+ typedef struct ompt_record_cancel_t {
889
+ ompt_id_t task_id;
890
+ int flags;
891
+ const void *codeptr_ra;
892
+ } ompt_record_cancel_t;
893
+
894
+ typedef void (*ompt_callback_device_initialize_t) (
895
+ int device_num,
896
+ const char *type,
897
+ ompt_device_t *device,
898
+ ompt_function_lookup_t lookup,
899
+ const char *documentation
900
+ );
901
+
902
+ typedef void (*ompt_callback_device_finalize_t) (
903
+ int device_num
904
+ );
905
+
906
+ typedef void (*ompt_callback_device_load_t) (
907
+ int device_num,
908
+ const char *filename,
909
+ int64_t offset_in_file,
910
+ void *vma_in_file,
911
+ size_t bytes,
912
+ void *host_addr,
913
+ void *device_addr,
914
+ uint64_t module_id
915
+ );
916
+
917
+ typedef void (*ompt_callback_device_unload_t) (
918
+ int device_num,
919
+ uint64_t module_id
920
+ );
921
+
922
+ typedef void (*ompt_callback_target_data_op_t) (
923
+ ompt_id_t target_id,
924
+ ompt_id_t host_op_id,
925
+ ompt_target_data_op_t optype,
926
+ void *src_addr,
927
+ int src_device_num,
928
+ void *dest_addr,
929
+ int dest_device_num,
930
+ size_t bytes,
931
+ const void *codeptr_ra
932
+ );
933
+
934
+ typedef struct ompt_record_target_data_op_t {
935
+ ompt_id_t host_op_id;
936
+ ompt_target_data_op_t optype;
937
+ void *src_addr;
938
+ int src_device_num;
939
+ void *dest_addr;
940
+ int dest_device_num;
941
+ size_t bytes;
942
+ ompt_device_time_t end_time;
943
+ const void *codeptr_ra;
944
+ } ompt_record_target_data_op_t;
945
+
946
+ typedef void (*ompt_callback_target_t) (
947
+ ompt_target_t kind,
948
+ ompt_scope_endpoint_t endpoint,
949
+ int device_num,
950
+ ompt_data_t *task_data,
951
+ ompt_id_t target_id,
952
+ const void *codeptr_ra
953
+ );
954
+
955
+ typedef struct ompt_record_target_t {
956
+ ompt_target_t kind;
957
+ ompt_scope_endpoint_t endpoint;
958
+ int device_num;
959
+ ompt_id_t task_id;
960
+ ompt_id_t target_id;
961
+ const void *codeptr_ra;
962
+ } ompt_record_target_t;
963
+
964
+ typedef void (*ompt_callback_target_map_t) (
965
+ ompt_id_t target_id,
966
+ unsigned int nitems,
967
+ void **host_addr,
968
+ void **device_addr,
969
+ size_t *bytes,
970
+ unsigned int *mapping_flags,
971
+ const void *codeptr_ra
972
+ );
973
+
974
+ typedef struct ompt_record_target_map_t {
975
+ ompt_id_t target_id;
976
+ unsigned int nitems;
977
+ void **host_addr;
978
+ void **device_addr;
979
+ size_t *bytes;
980
+ unsigned int *mapping_flags;
981
+ const void *codeptr_ra;
982
+ } ompt_record_target_map_t;
983
+
984
+ typedef void (*ompt_callback_target_submit_t) (
985
+ ompt_id_t target_id,
986
+ ompt_id_t host_op_id,
987
+ unsigned int requested_num_teams
988
+ );
989
+
990
+ typedef struct ompt_record_target_kernel_t {
991
+ ompt_id_t host_op_id;
992
+ unsigned int requested_num_teams;
993
+ unsigned int granted_num_teams;
994
+ ompt_device_time_t end_time;
995
+ } ompt_record_target_kernel_t;
996
+
997
+ typedef int (*ompt_callback_control_tool_t) (
998
+ uint64_t command,
999
+ uint64_t modifier,
1000
+ void *arg,
1001
+ const void *codeptr_ra
1002
+ );
1003
+
1004
+ typedef struct ompt_record_control_tool_t {
1005
+ uint64_t command;
1006
+ uint64_t modifier;
1007
+ const void *codeptr_ra;
1008
+ } ompt_record_control_tool_t;
1009
+
1010
+ typedef struct ompd_address_t {
1011
+ ompd_seg_t segment;
1012
+ ompd_addr_t address;
1013
+ } ompd_address_t;
1014
+
1015
+ typedef struct ompd_frame_info_t {
1016
+ ompd_address_t frame_address;
1017
+ ompd_word_t frame_flag;
1018
+ } ompd_frame_info_t;
1019
+
1020
+ typedef struct _ompd_aspace_handle ompd_address_space_handle_t;
1021
+ typedef struct _ompd_thread_handle ompd_thread_handle_t;
1022
+ typedef struct _ompd_parallel_handle ompd_parallel_handle_t;
1023
+ typedef struct _ompd_task_handle ompd_task_handle_t;
1024
+
1025
+ typedef struct _ompd_aspace_cont ompd_address_space_context_t;
1026
+ typedef struct _ompd_thread_cont ompd_thread_context_t;
1027
+
1028
+ typedef struct ompd_device_type_sizes_t {
1029
+ uint8_t sizeof_char;
1030
+ uint8_t sizeof_short;
1031
+ uint8_t sizeof_int;
1032
+ uint8_t sizeof_long;
1033
+ uint8_t sizeof_long_long;
1034
+ uint8_t sizeof_pointer;
1035
+ } ompd_device_type_sizes_t;
1036
+
1037
+ typedef struct ompt_record_ompt_t {
1038
+ ompt_callbacks_t type;
1039
+ ompt_device_time_t time;
1040
+ ompt_id_t thread_id;
1041
+ ompt_id_t target_id;
1042
+ union {
1043
+ ompt_record_thread_begin_t thread_begin;
1044
+ ompt_record_parallel_begin_t parallel_begin;
1045
+ ompt_record_parallel_end_t parallel_end;
1046
+ ompt_record_work_t work;
1047
+ ompt_record_dispatch_t dispatch;
1048
+ ompt_record_task_create_t task_create;
1049
+ ompt_record_dependences_t dependences;
1050
+ ompt_record_task_dependence_t task_dependence;
1051
+ ompt_record_task_schedule_t task_schedule;
1052
+ ompt_record_implicit_task_t implicit_task;
1053
+ ompt_record_master_t master;
1054
+ ompt_record_sync_region_t sync_region;
1055
+ ompt_record_mutex_acquire_t mutex_acquire;
1056
+ ompt_record_mutex_t mutex;
1057
+ ompt_record_nest_lock_t nest_lock;
1058
+ ompt_record_flush_t flush;
1059
+ ompt_record_cancel_t cancel;
1060
+ ompt_record_target_t target;
1061
+ ompt_record_target_data_op_t target_data_op;
1062
+ ompt_record_target_map_t target_map;
1063
+ ompt_record_target_kernel_t target_kernel;
1064
+ ompt_record_control_tool_t control_tool;
1065
+ } record;
1066
+ } ompt_record_ompt_t;
1067
+
1068
+ typedef ompt_record_ompt_t *(*ompt_get_record_ompt_t) (
1069
+ ompt_buffer_t *buffer,
1070
+ ompt_buffer_cursor_t current
1071
+ );
1072
+
1073
+ #define ompt_id_none 0
1074
+ #define ompt_data_none {0}
1075
+ #define ompt_time_none 0
1076
+ #define ompt_hwid_none 0
1077
+ #define ompt_addr_none ~0
1078
+ #define ompt_mutex_impl_none 0
1079
+ #define ompt_wait_id_none 0
1080
+
1081
+ #define ompd_segment_none 0
1082
+
1083
+ #endif /* __OMPT__ */
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/include/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (198 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcheckpoint.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1626ff119582bca46605bc6d49769ab75314b9993dd647bd64a90dec747bc843
3
+ size 1534104
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abc63100e9cf516b8ed1fa25354ae53dbfe8df4838ac525d8d738332c2198dc2
3
+ size 7419504
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa2587c8d211fbc85e8b88cca0bcebe78c8cc40c81b0c3763ce57ac9e63f0669
3
+ size 5895416
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/include/nvrtc.h ADDED
@@ -0,0 +1,845 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //
2
+ // NVIDIA_COPYRIGHT_BEGIN
3
+ //
4
+ // Copyright (c) 2014-2023, NVIDIA CORPORATION. All rights reserved.
5
+ //
6
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
7
+ // and proprietary rights in and to this software, related documentation
8
+ // and any modifications thereto. Any use, reproduction, disclosure or
9
+ // distribution of this software and related documentation without an express
10
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
11
+ //
12
+ // NVIDIA_COPYRIGHT_END
13
+ //
14
+
15
+ #ifndef __NVRTC_H__
16
+ #define __NVRTC_H__
17
+
18
+ #ifdef __cplusplus
19
+ extern "C" {
20
+ #endif /* __cplusplus */
21
+
22
+ #include <stdlib.h>
23
+
24
+
25
+ /*************************************************************************//**
26
+ *
27
+ * \defgroup error Error Handling
28
+ *
29
+ * NVRTC defines the following enumeration type and function for API call
30
+ * error handling.
31
+ *
32
+ ****************************************************************************/
33
+
34
+
35
+ /**
36
+ * \ingroup error
37
+ * \brief The enumerated type nvrtcResult defines API call result codes.
38
+ * NVRTC API functions return nvrtcResult to indicate the call
39
+ * result.
40
+ */
41
+ typedef enum {
42
+ NVRTC_SUCCESS = 0,
43
+ NVRTC_ERROR_OUT_OF_MEMORY = 1,
44
+ NVRTC_ERROR_PROGRAM_CREATION_FAILURE = 2,
45
+ NVRTC_ERROR_INVALID_INPUT = 3,
46
+ NVRTC_ERROR_INVALID_PROGRAM = 4,
47
+ NVRTC_ERROR_INVALID_OPTION = 5,
48
+ NVRTC_ERROR_COMPILATION = 6,
49
+ NVRTC_ERROR_BUILTIN_OPERATION_FAILURE = 7,
50
+ NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION = 8,
51
+ NVRTC_ERROR_NO_LOWERED_NAMES_BEFORE_COMPILATION = 9,
52
+ NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID = 10,
53
+ NVRTC_ERROR_INTERNAL_ERROR = 11,
54
+ NVRTC_ERROR_TIME_FILE_WRITE_FAILED = 12
55
+ } nvrtcResult;
56
+
57
+
58
+ /**
59
+ * \ingroup error
60
+ * \brief nvrtcGetErrorString is a helper function that returns a string
61
+ * describing the given nvrtcResult code, e.g., NVRTC_SUCCESS to
62
+ * \c "NVRTC_SUCCESS".
63
+ * For unrecognized enumeration values, it returns
64
+ * \c "NVRTC_ERROR unknown".
65
+ *
66
+ * \param [in] result CUDA Runtime Compilation API result code.
67
+ * \return Message string for the given #nvrtcResult code.
68
+ */
69
+ const char *nvrtcGetErrorString(nvrtcResult result);
70
+
71
+
72
+ /*************************************************************************//**
73
+ *
74
+ * \defgroup query General Information Query
75
+ *
76
+ * NVRTC defines the following function for general information query.
77
+ *
78
+ ****************************************************************************/
79
+
80
+
81
+ /**
82
+ * \ingroup query
83
+ * \brief nvrtcVersion sets the output parameters \p major and \p minor
84
+ * with the CUDA Runtime Compilation version number.
85
+ *
86
+ * \param [out] major CUDA Runtime Compilation major version number.
87
+ * \param [out] minor CUDA Runtime Compilation minor version number.
88
+ * \return
89
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
90
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
91
+ *
92
+ */
93
+ nvrtcResult nvrtcVersion(int *major, int *minor);
94
+
95
+
96
+ /**
97
+ * \ingroup query
98
+ * \brief nvrtcGetNumSupportedArchs sets the output parameter \p numArchs
99
+ * with the number of architectures supported by NVRTC. This can
100
+ * then be used to pass an array to ::nvrtcGetSupportedArchs to
101
+ * get the supported architectures.
102
+ *
103
+ * \param [out] numArchs number of supported architectures.
104
+ * \return
105
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
106
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
107
+ *
108
+ * see ::nvrtcGetSupportedArchs
109
+ */
110
+ nvrtcResult nvrtcGetNumSupportedArchs(int* numArchs);
111
+
112
+
113
+ /**
114
+ * \ingroup query
115
+ * \brief nvrtcGetSupportedArchs populates the array passed via the output parameter
116
+ * \p supportedArchs with the architectures supported by NVRTC. The array is
117
+ * sorted in the ascending order. The size of the array to be passed can be
118
+ * determined using ::nvrtcGetNumSupportedArchs.
119
+ *
120
+ * \param [out] supportedArchs sorted array of supported architectures.
121
+ * \return
122
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
123
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
124
+ *
125
+ * see ::nvrtcGetNumSupportedArchs
126
+ */
127
+ nvrtcResult nvrtcGetSupportedArchs(int* supportedArchs);
128
+
129
+
130
+ /*************************************************************************//**
131
+ *
132
+ * \defgroup compilation Compilation
133
+ *
134
+ * NVRTC defines the following type and functions for actual compilation.
135
+ *
136
+ ****************************************************************************/
137
+
138
+
139
+ /**
140
+ * \ingroup compilation
141
+ * \brief nvrtcProgram is the unit of compilation, and an opaque handle for
142
+ * a program.
143
+ *
144
+ * To compile a CUDA program string, an instance of nvrtcProgram must be
145
+ * created first with ::nvrtcCreateProgram, then compiled with
146
+ * ::nvrtcCompileProgram.
147
+ */
148
+ typedef struct _nvrtcProgram *nvrtcProgram;
149
+
150
+
151
+ /**
152
+ * \ingroup compilation
153
+ * \brief nvrtcCreateProgram creates an instance of nvrtcProgram with the
154
+ * given input parameters, and sets the output parameter \p prog with
155
+ * it.
156
+ *
157
+ * \param [out] prog CUDA Runtime Compilation program.
158
+ * \param [in] src CUDA program source.
159
+ * \param [in] name CUDA program name.\n
160
+ * \p name can be \c NULL; \c "default_program" is
161
+ * used when \p name is \c NULL or "".
162
+ * \param [in] numHeaders Number of headers used.\n
163
+ * \p numHeaders must be greater than or equal to 0.
164
+ * \param [in] headers Sources of the headers.\n
165
+ * \p headers can be \c NULL when \p numHeaders is
166
+ * 0.
167
+ * \param [in] includeNames Name of each header by which they can be
168
+ * included in the CUDA program source.\n
169
+ * \p includeNames can be \c NULL when \p numHeaders
170
+ * is 0. These headers must be included with the exact
171
+ * names specified here.
172
+ * \return
173
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
174
+ * - \link #nvrtcResult NVRTC_ERROR_OUT_OF_MEMORY \endlink
175
+ * - \link #nvrtcResult NVRTC_ERROR_PROGRAM_CREATION_FAILURE \endlink
176
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
177
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
178
+ *
179
+ * \see ::nvrtcDestroyProgram
180
+ */
181
+ nvrtcResult nvrtcCreateProgram(nvrtcProgram *prog,
182
+ const char *src,
183
+ const char *name,
184
+ int numHeaders,
185
+ const char * const *headers,
186
+ const char * const *includeNames);
187
+
188
+
189
+ /**
190
+ * \ingroup compilation
191
+ * \brief nvrtcDestroyProgram destroys the given program.
192
+ *
193
+ * \param [in] prog CUDA Runtime Compilation program.
194
+ * \return
195
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
196
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
197
+ *
198
+ * \see ::nvrtcCreateProgram
199
+ */
200
+ nvrtcResult nvrtcDestroyProgram(nvrtcProgram *prog);
201
+
202
+
203
+ /**
204
+ * \ingroup compilation
205
+ * \brief nvrtcCompileProgram compiles the given program.
206
+ *
207
+ * \param [in] prog CUDA Runtime Compilation program.
208
+ * \param [in] numOptions Number of compiler options passed.
209
+ * \param [in] options Compiler options in the form of C string array.\n
210
+ * \p options can be \c NULL when \p numOptions is 0.
211
+ *
212
+ * \return
213
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
214
+ * - \link #nvrtcResult NVRTC_ERROR_OUT_OF_MEMORY \endlink
215
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
216
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
217
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_OPTION \endlink
218
+ * - \link #nvrtcResult NVRTC_ERROR_COMPILATION \endlink
219
+ * - \link #nvrtcResult NVRTC_ERROR_BUILTIN_OPERATION_FAILURE \endlink
220
+ * - \link #nvrtcResult NVRTC_ERROR_TIME_FILE_WRITE_FAILED \endlink
221
+ *
222
+ * It supports compile options listed in \ref options.
223
+ */
224
+ nvrtcResult nvrtcCompileProgram(nvrtcProgram prog,
225
+ int numOptions, const char * const *options);
226
+
227
+
228
+ /**
229
+ * \ingroup compilation
230
+ * \brief nvrtcGetPTXSize sets the value of \p ptxSizeRet with the size of the PTX
231
+ * generated by the previous compilation of \p prog (including the
232
+ * trailing \c NULL).
233
+ *
234
+ * \param [in] prog CUDA Runtime Compilation program.
235
+ * \param [out] ptxSizeRet Size of the generated PTX (including the trailing
236
+ * \c NULL).
237
+ * \return
238
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
239
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
240
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
241
+ *
242
+ * \see ::nvrtcGetPTX
243
+ */
244
+ nvrtcResult nvrtcGetPTXSize(nvrtcProgram prog, size_t *ptxSizeRet);
245
+
246
+
247
+ /**
248
+ * \ingroup compilation
249
+ * \brief nvrtcGetPTX stores the PTX generated by the previous compilation
250
+ * of \p prog in the memory pointed by \p ptx.
251
+ *
252
+ * \param [in] prog CUDA Runtime Compilation program.
253
+ * \param [out] ptx Compiled result.
254
+ * \return
255
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
256
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
257
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
258
+ *
259
+ * \see ::nvrtcGetPTXSize
260
+ */
261
+ nvrtcResult nvrtcGetPTX(nvrtcProgram prog, char *ptx);
262
+
263
+
264
+ /**
265
+ * \ingroup compilation
266
+ * \brief nvrtcGetCUBINSize sets the value of \p cubinSizeRet with the size of the cubin
267
+ * generated by the previous compilation of \p prog. The value of
268
+ * cubinSizeRet is set to 0 if the value specified to \c -arch is a
269
+ * virtual architecture instead of an actual architecture.
270
+ *
271
+ * \param [in] prog CUDA Runtime Compilation program.
272
+ * \param [out] cubinSizeRet Size of the generated cubin.
273
+ * \return
274
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
275
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
276
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
277
+ *
278
+ * \see ::nvrtcGetCUBIN
279
+ */
280
+ nvrtcResult nvrtcGetCUBINSize(nvrtcProgram prog, size_t *cubinSizeRet);
281
+
282
+
283
+ /**
284
+ * \ingroup compilation
285
+ * \brief nvrtcGetCUBIN stores the cubin generated by the previous compilation
286
+ * of \p prog in the memory pointed by \p cubin. No cubin is available
287
+ * if the value specified to \c -arch is a virtual architecture instead
288
+ * of an actual architecture.
289
+ *
290
+ * \param [in] prog CUDA Runtime Compilation program.
291
+ * \param [out] cubin Compiled and assembled result.
292
+ * \return
293
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
294
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
295
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
296
+ *
297
+ * \see ::nvrtcGetCUBINSize
298
+ */
299
+ nvrtcResult nvrtcGetCUBIN(nvrtcProgram prog, char *cubin);
300
+
301
+
302
+ #if defined(_WIN32)
303
+ # define __DEPRECATED__(msg) __declspec(deprecated(msg))
304
+ #elif (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 5 && !defined(__clang__))))
305
+ # define __DEPRECATED__(msg) __attribute__((deprecated))
306
+ #elif (defined(__GNUC__))
307
+ # define __DEPRECATED__(msg) __attribute__((deprecated(msg)))
308
+ #else
309
+ # define __DEPRECATED__(msg)
310
+ #endif
311
+
312
+ /**
313
+ * \ingroup compilation
314
+ * \brief
315
+ * DEPRECATION NOTICE: This function will be removed in a future release. Please use
316
+ * nvrtcGetLTOIRSize (and nvrtcGetLTOIR) instead.
317
+ */
318
+ __DEPRECATED__("This function will be removed in a future release. Please use nvrtcGetLTOIRSize instead")
319
+ nvrtcResult nvrtcGetNVVMSize(nvrtcProgram prog, size_t *nvvmSizeRet);
320
+
321
+ /**
322
+ * \ingroup compilation
323
+ * \brief
324
+ * DEPRECATION NOTICE: This function will be removed in a future release. Please use
325
+ * nvrtcGetLTOIR (and nvrtcGetLTOIRSize) instead.
326
+ */
327
+ __DEPRECATED__("This function will be removed in a future release. Please use nvrtcGetLTOIR instead")
328
+ nvrtcResult nvrtcGetNVVM(nvrtcProgram prog, char *nvvm);
329
+
330
+ #undef __DEPRECATED__
331
+
332
+ /**
333
+ * \ingroup compilation
334
+ * \brief nvrtcGetLTOIRSize sets the value of \p LTOIRSizeRet with the size of the LTO IR
335
+ * generated by the previous compilation of \p prog. The value of
336
+ * LTOIRSizeRet is set to 0 if the program was not compiled with
337
+ * \c -dlto.
338
+ *
339
+ * \param [in] prog CUDA Runtime Compilation program.
340
+ * \param [out] LTOIRSizeRet Size of the generated LTO IR.
341
+ * \return
342
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
343
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
344
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
345
+ *
346
+ * \see ::nvrtcGetLTOIR
347
+ */
348
+ nvrtcResult nvrtcGetLTOIRSize(nvrtcProgram prog, size_t *LTOIRSizeRet);
349
+
350
+
351
+ /**
352
+ * \ingroup compilation
353
+ * \brief nvrtcGetLTOIR stores the LTO IR generated by the previous compilation
354
+ * of \p prog in the memory pointed by \p LTOIR. No LTO IR is available
355
+ * if the program was compiled without \c -dlto.
356
+ *
357
+ * \param [in] prog CUDA Runtime Compilation program.
358
+ * \param [out] LTOIR Compiled result.
359
+ * \return
360
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
361
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
362
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
363
+ *
364
+ * \see ::nvrtcGetLTOIRSize
365
+ */
366
+ nvrtcResult nvrtcGetLTOIR(nvrtcProgram prog, char *LTOIR);
367
+
368
+
369
+ /**
370
+ * \ingroup compilation
371
+ * \brief nvrtcGetOptiXIRSize sets the value of \p optixirSizeRet with the size of the OptiX IR
372
+ * generated by the previous compilation of \p prog. The value of
373
+ * nvrtcGetOptiXIRSize is set to 0 if the program was compiled with
374
+ * options incompatible with OptiX IR generation.
375
+ *
376
+ * \param [in] prog CUDA Runtime Compilation program.
377
+ * \param [out] optixirSizeRet Size of the generated LTO IR.
378
+ * \return
379
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
380
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
381
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
382
+ *
383
+ * \see ::nvrtcGetOptiXIR
384
+ */
385
+ nvrtcResult nvrtcGetOptiXIRSize(nvrtcProgram prog, size_t *optixirSizeRet);
386
+
387
+
388
+ /**
389
+ * \ingroup compilation
390
+ * \brief nvrtcGetOptiXIR stores the OptiX IR generated by the previous compilation
391
+ * of \p prog in the memory pointed by \p optixir. No OptiX IR is available
392
+ * if the program was compiled with options incompatible with OptiX IR generation.
393
+ *
394
+ * \param [in] prog CUDA Runtime Compilation program.
395
+ * \param [out] Optix IR Compiled result.
396
+ * \return
397
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
398
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
399
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
400
+ *
401
+ * \see ::nvrtcGetOptiXIRSize
402
+ */
403
+ nvrtcResult nvrtcGetOptiXIR(nvrtcProgram prog, char *optixir);
404
+
405
+ /**
406
+ * \ingroup compilation
407
+ * \brief nvrtcGetProgramLogSize sets \p logSizeRet with the size of the
408
+ * log generated by the previous compilation of \p prog (including the
409
+ * trailing \c NULL).
410
+ *
411
+ * Note that compilation log may be generated with warnings and informative
412
+ * messages, even when the compilation of \p prog succeeds.
413
+ *
414
+ * \param [in] prog CUDA Runtime Compilation program.
415
+ * \param [out] logSizeRet Size of the compilation log
416
+ * (including the trailing \c NULL).
417
+ * \return
418
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
419
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
420
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
421
+ *
422
+ * \see ::nvrtcGetProgramLog
423
+ */
424
+ nvrtcResult nvrtcGetProgramLogSize(nvrtcProgram prog, size_t *logSizeRet);
425
+
426
+
427
+ /**
428
+ * \ingroup compilation
429
+ * \brief nvrtcGetProgramLog stores the log generated by the previous
430
+ * compilation of \p prog in the memory pointed by \p log.
431
+ *
432
+ * \param [in] prog CUDA Runtime Compilation program.
433
+ * \param [out] log Compilation log.
434
+ * \return
435
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
436
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
437
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
438
+ *
439
+ * \see ::nvrtcGetProgramLogSize
440
+ */
441
+ nvrtcResult nvrtcGetProgramLog(nvrtcProgram prog, char *log);
442
+
443
+
444
+ /**
445
+ * \ingroup compilation
446
+ * \brief nvrtcAddNameExpression notes the given name expression
447
+ * denoting the address of a __global__ function
448
+ * or __device__/__constant__ variable.
449
+ *
450
+ * The identical name expression string must be provided on a subsequent
451
+ * call to nvrtcGetLoweredName to extract the lowered name.
452
+ * \param [in] prog CUDA Runtime Compilation program.
453
+ * \param [in] name_expression constant expression denoting the address of
454
+ * a __global__ function or __device__/__constant__ variable.
455
+ * \return
456
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
457
+ * - \link #nvrtcResult NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION \endlink
458
+ *
459
+ * \see ::nvrtcGetLoweredName
460
+ */
461
+ nvrtcResult nvrtcAddNameExpression(nvrtcProgram prog,
462
+ const char * const name_expression);
463
+
464
+ /**
465
+ * \ingroup compilation
466
+ * \brief nvrtcGetLoweredName extracts the lowered (mangled) name
467
+ * for a __global__ function or __device__/__constant__ variable,
468
+ * and updates *lowered_name to point to it. The memory containing
469
+ * the name is released when the NVRTC program is destroyed by
470
+ * nvrtcDestroyProgram.
471
+ * The identical name expression must have been previously
472
+ * provided to nvrtcAddNameExpression.
473
+ *
474
+ * \param [in] prog CUDA Runtime Compilation program.
475
+ * \param [in] name_expression constant expression denoting the address of
476
+ * a __global__ function or __device__/__constant__ variable.
477
+ * \param [out] lowered_name initialized by the function to point to a
478
+ * C string containing the lowered (mangled)
479
+ * name corresponding to the provided name expression.
480
+ * \return
481
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
482
+ * - \link #nvrtcResult NVRTC_ERROR_NO_LOWERED_NAMES_BEFORE_COMPILATION \endlink
483
+ * - \link #nvrtcResult NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID \endlink
484
+ *
485
+ * \see ::nvrtcAddNameExpression
486
+ */
487
+ nvrtcResult nvrtcGetLoweredName(nvrtcProgram prog,
488
+ const char *const name_expression,
489
+ const char** lowered_name);
490
+
491
+
492
+ /**
493
+ * \defgroup options Supported Compile Options
494
+ *
495
+ * NVRTC supports the compile options below.
496
+ * Option names with two preceding dashs (\c --) are long option names and
497
+ * option names with one preceding dash (\c -) are short option names.
498
+ * Short option names can be used instead of long option names.
499
+ * When a compile option takes an argument, an assignment operator (\c =)
500
+ * is used to separate the compile option argument from the compile option
501
+ * name, e.g., \c "--gpu-architecture=compute_60".
502
+ * Alternatively, the compile option name and the argument can be specified in
503
+ * separate strings without an assignment operator, .e.g,
504
+ * \c "--gpu-architecture" \c "compute_60".
505
+ * Single-character short option names, such as \c -D, \c -U, and \c -I, do
506
+ * not require an assignment operator, and the compile option name and the
507
+ * argument can be present in the same string with or without spaces between
508
+ * them.
509
+ * For instance, \c "-D=<def>", \c "-D<def>", and \c "-D <def>" are all
510
+ * supported.
511
+ *
512
+ * The valid compiler options are:
513
+ *
514
+ * - Compilation targets
515
+ * - \c --gpu-architecture=\<arch\> (\c -arch)\n
516
+ * Specify the name of the class of GPU architectures for which the
517
+ * input must be compiled.\n
518
+ * - Valid <c>\<arch\></c>s:
519
+ * - \c compute_50
520
+ * - \c compute_52
521
+ * - \c compute_53
522
+ * - \c compute_60
523
+ * - \c compute_61
524
+ * - \c compute_62
525
+ * - \c compute_70
526
+ * - \c compute_72
527
+ * - \c compute_75
528
+ * - \c compute_80
529
+ * - \c compute_87
530
+ * - \c compute_89
531
+ * - \c compute_90
532
+ * - \c compute_90a
533
+ * - \c sm_50
534
+ * - \c sm_52
535
+ * - \c sm_53
536
+ * - \c sm_60
537
+ * - \c sm_61
538
+ * - \c sm_62
539
+ * - \c sm_70
540
+ * - \c sm_72
541
+ * - \c sm_75
542
+ * - \c sm_80
543
+ * - \c sm_87
544
+ * - \c sm_89
545
+ * - \c sm_90
546
+ * - \c sm_90a
547
+ * - Default: \c compute_52
548
+ * - Separate compilation / whole-program compilation
549
+ * - \c --device-c (\c -dc)\n
550
+ * Generate relocatable code that can be linked with other relocatable
551
+ * device code. It is equivalent to --relocatable-device-code=true.
552
+ * - \c --device-w (\c -dw)\n
553
+ * Generate non-relocatable code. It is equivalent to
554
+ * \c --relocatable-device-code=false.
555
+ * - \c --relocatable-device-code={true|false} (\c -rdc)\n
556
+ * Enable (disable) the generation of relocatable device code.
557
+ * - Default: \c false
558
+ * - \c --extensible-whole-program (\c -ewp)\n
559
+ * Do extensible whole program compilation of device code.
560
+ * - Default: \c false
561
+ * - Debugging support
562
+ * - \c --device-debug (\c -G)\n
563
+ * Generate debug information. If --dopt is not specified,
564
+ * then turns off all optimizations.
565
+ * - \c --generate-line-info (\c -lineinfo)\n
566
+ * Generate line-number information.
567
+ * - Code generation
568
+ * - \c --dopt on (\c -dopt)\n
569
+ * - \c --dopt=on \n
570
+ * Enable device code optimization. When specified along with '-G', enables
571
+ * limited debug information generation for optimized device code (currently,
572
+ * only line number information).
573
+ * When '-G' is not specified, '-dopt=on' is implicit.
574
+ * - \c --ptxas-options \<options\> (\c -Xptxas)\n
575
+ * - \c --ptxas-options=\<options\> \n
576
+ * Specify options directly to ptxas, the PTX optimizing assembler.
577
+ * - \c --maxrregcount=\<N\> (\c -maxrregcount)\n
578
+ * Specify the maximum amount of registers that GPU functions can use.
579
+ * Until a function-specific limit, a higher value will generally
580
+ * increase the performance of individual GPU threads that execute this
581
+ * function. However, because thread registers are allocated from a
582
+ * global register pool on each GPU, a higher value of this option will
583
+ * also reduce the maximum thread block size, thereby reducing the amount
584
+ * of thread parallelism. Hence, a good maxrregcount value is the result
585
+ * of a trade-off. If this option is not specified, then no maximum is
586
+ * assumed. Value less than the minimum registers required by ABI will
587
+ * be bumped up by the compiler to ABI minimum limit.
588
+ * - \c --ftz={true|false} (\c -ftz)\n
589
+ * When performing single-precision floating-point operations, flush
590
+ * denormal values to zero or preserve denormal values.
591
+ * \c --use_fast_math implies \c --ftz=true.
592
+ * - Default: \c false
593
+ * - \c --prec-sqrt={true|false} (\c -prec-sqrt)\n
594
+ * For single-precision floating-point square root, use IEEE
595
+ * round-to-nearest mode or use a faster approximation.
596
+ * \c --use_fast_math implies \c --prec-sqrt=false.
597
+ * - Default: \c true
598
+ * - \c --prec-div={true|false} (\c -prec-div)\n
599
+ * For single-precision floating-point division and reciprocals, use IEEE
600
+ * round-to-nearest mode or use a faster approximation.
601
+ * \c --use_fast_math implies \c --prec-div=false.
602
+ * - Default: \c true
603
+ * - \c --fmad={true|false} (\c -fmad)\n
604
+ * Enables (disables) the contraction of floating-point multiplies and
605
+ * adds/subtracts into floating-point multiply-add operations (FMAD,
606
+ * FFMA, or DFMA). \c --use_fast_math implies \c --fmad=true.
607
+ * - Default: \c true
608
+ * - \c --use_fast_math (\c -use_fast_math)\n
609
+ * Make use of fast math operations.
610
+ * \c --use_fast_math implies \c --ftz=true \c --prec-div=false
611
+ * \c --prec-sqrt=false \c --fmad=true.
612
+ * - \c --extra-device-vectorization (\c -extra-device-vectorization)\n
613
+ * Enables more aggressive device code vectorization in the NVVM optimizer.
614
+ * - \c --modify-stack-limit={true|false} (\c -modify-stack-limit)\n
615
+ * On Linux, during compilation, use \c setrlimit() to increase stack size
616
+ * to maximum allowed. The limit is reset to the previous value at the
617
+ * end of compilation.
618
+ * Note: \c setrlimit() changes the value for the entire process.
619
+ * - Default: \c true
620
+ * - \c --dlink-time-opt (\c -dlto)\n
621
+ * Generate intermediate code for later link-time optimization.
622
+ * It implies \c -rdc=true.
623
+ * Note: when this option is used the nvrtcGetLTOIR API should be used,
624
+ * as PTX or Cubin will not be generated.
625
+ * - \c --gen-opt-lto (\c -gen-opt-lto)\n
626
+ * Run the optimizer passes before generating the LTO IR.
627
+ * - \c --optix-ir (\c -optix-ir)\n
628
+ * Generate OptiX IR. The Optix IR is only intended for consumption by OptiX
629
+ * through appropriate APIs. This feature is not supported with
630
+ * link-time-optimization (\c -dlto)\n.
631
+ * Note: when this option is used the nvrtcGetOptiX API should be used,
632
+ * as PTX or Cubin will not be generated.
633
+ * - Preprocessing
634
+ * - \c --define-macro=\<def\> (\c -D)\n
635
+ * \c \<def\> can be either \c \<name\> or \c \<name=definitions\>.
636
+ * - \c \<name\> \n
637
+ * Predefine \c \<name\> as a macro with definition \c 1.
638
+ * - \c \<name\>=\<definition\> \n
639
+ * The contents of \c \<definition\> are tokenized and preprocessed
640
+ * as if they appeared during translation phase three in a \c \#define
641
+ * directive. In particular, the definition will be truncated by
642
+ * embedded new line characters.
643
+ * - \c --undefine-macro=\<def\> (\c -U)\n
644
+ * Cancel any previous definition of \c \<def\>.
645
+ * - \c --include-path=\<dir\> (\c -I)\n
646
+ * Add the directory \c \<dir\> to the list of directories to be
647
+ * searched for headers. These paths are searched after the list of
648
+ * headers given to ::nvrtcCreateProgram.
649
+ * - \c --pre-include=\<header\> (\c -include)\n
650
+ * Preinclude \c \<header\> during preprocessing.
651
+ * - \c --no-source-include (\c -no-source-include)
652
+ * The preprocessor by default adds the directory of each input sources
653
+ * to the include path. This option disables this feature and only
654
+ * considers the path specified explicitly.
655
+ * - Language Dialect
656
+ * - \c --std={c++03|c++11|c++14|c++17|c++20}
657
+ * (\c -std={c++11|c++14|c++17|c++20})\n
658
+ * Set language dialect to C++03, C++11, C++14, C++17 or C++20
659
+ * - Default: \c c++17
660
+ * - \c --builtin-move-forward={true|false} (\c -builtin-move-forward)\n
661
+ * Provide builtin definitions of \c std::move and \c std::forward,
662
+ * when C++11 or later language dialect is selected.
663
+ * - Default: \c true
664
+ * - \c --builtin-initializer-list={true|false}
665
+ * (\c -builtin-initializer-list)\n
666
+ * Provide builtin definitions of \c std::initializer_list class and
667
+ * member functions when C++11 or later language dialect is selected.
668
+ * - Default: \c true
669
+ * - Misc.
670
+ * - \c --disable-warnings (\c -w)\n
671
+ * Inhibit all warning messages.
672
+ * - \c --restrict (\c -restrict)\n
673
+ * Programmer assertion that all kernel pointer parameters are restrict
674
+ * pointers.
675
+ * - \c --device-as-default-execution-space
676
+ * (\c -default-device)\n
677
+ * Treat entities with no execution space annotation as \c __device__
678
+ * entities.
679
+ * - \c --device-int128 (\c -device-int128)\n
680
+ * Allow the \c __int128 type in device code. Also causes the macro \c __CUDACC_RTC_INT128__
681
+ * to be defined.
682
+ * - \c --optimization-info=\<kind\> (\c -opt-info)\n
683
+ * Provide optimization reports for the specified kind of optimization.
684
+ * The following kind tags are supported:
685
+ * - \c inline : emit a remark when a function is inlined.
686
+ * - \c --version-ident={true|false} (\c -dQ)\n
687
+ * Embed used compiler's version info into generated PTX/CUBIN
688
+ * - Default: \c false
689
+ * - \c --display-error-number (\c -err-no)\n
690
+ * Display diagnostic number for warning messages. (Default)
691
+ * - \c --no-display-error-number (\c -no-err-no)\n
692
+ * Disables the display of a diagnostic number for warning messages.
693
+ * - \c --diag-error=<error-number>,... (\c -diag-error)\n
694
+ * Emit error for specified diagnostic message number(s). Message numbers can be separated by comma.
695
+ * - \c --diag-suppress=<error-number>,... (\c -diag-suppress)\n
696
+ * Suppress specified diagnostic message number(s). Message numbers can be separated by comma.
697
+ * - \c --diag-warn=<error-number>,... (\c -diag-warn)\n
698
+ * Emit warning for specified diagnostic message number(s). Message numbers can be separated by comma.
699
+ * - \c --brief-diagnostics={true|false} (\c -brief-diag)\n
700
+ * This option disables or enables showing source line and column info
701
+ * in a diagnostic.
702
+ * The --brief-diagnostics=true will not show the source line and column info.
703
+ * - Default: \c false
704
+ * - \c --time=<file-name> (\c -time)\n
705
+ * Generate a comma separated value table with the time taken by each compilation
706
+ * phase, and append it at the end of the file given as the option argument.
707
+ * If the file does not exist, the column headings are generated in the first row
708
+ * of the table. If the file name is '-', the timing data is written to the compilation log.
709
+ *
710
+ */
711
+
712
+
713
+ #ifdef __cplusplus
714
+ }
715
+ #endif /* __cplusplus */
716
+
717
+
718
+ /* The utility function 'nvrtcGetTypeName' is not available by default. Define
719
+ the macro 'NVRTC_GET_TYPE_NAME' to a non-zero value to make it available.
720
+ */
721
+
722
+ #if NVRTC_GET_TYPE_NAME || __DOXYGEN_ONLY__
723
+
724
+ #if NVRTC_USE_CXXABI || __clang__ || __GNUC__ || __DOXYGEN_ONLY__
725
+ #include <cxxabi.h>
726
+ #include <cstdlib>
727
+
728
+ #elif defined(_WIN32)
729
+ #include <Windows.h>
730
+ #include <DbgHelp.h>
731
+ #endif /* NVRTC_USE_CXXABI || __clang__ || __GNUC__ */
732
+
733
+
734
+ #include <string>
735
+ #include <typeinfo>
736
+
737
+ template <typename T> struct __nvrtcGetTypeName_helper_t { };
738
+
739
+ /*************************************************************************//**
740
+ *
741
+ * \defgroup hosthelper Host Helper
742
+ *
743
+ * NVRTC defines the following functions for easier interaction with host code.
744
+ *
745
+ ****************************************************************************/
746
+
747
+ /**
748
+ * \ingroup hosthelper
749
+ * \brief nvrtcGetTypeName stores the source level name of a type in the given
750
+ * std::string location.
751
+ *
752
+ * This function is only provided when the macro NVRTC_GET_TYPE_NAME is
753
+ * defined with a non-zero value. It uses abi::__cxa_demangle or UnDecorateSymbolName
754
+ * function calls to extract the type name, when using gcc/clang or cl.exe compilers,
755
+ * respectively. If the name extraction fails, it will return NVRTC_INTERNAL_ERROR,
756
+ * otherwise *result is initialized with the extracted name.
757
+ *
758
+ * Windows-specific notes:
759
+ * - nvrtcGetTypeName() is not multi-thread safe because it calls UnDecorateSymbolName(),
760
+ * which is not multi-thread safe.
761
+ * - The returned string may contain Microsoft-specific keywords such as __ptr64 and __cdecl.
762
+ *
763
+ * \param [in] tinfo: reference to object of type std::type_info for a given type.
764
+ * \param [in] result: pointer to std::string in which to store the type name.
765
+ * \return
766
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
767
+ * - \link #nvrtcResult NVRTC_ERROR_INTERNAL_ERROR \endlink
768
+ *
769
+ */
770
+ inline nvrtcResult nvrtcGetTypeName(const std::type_info &tinfo, std::string *result)
771
+ {
772
+ #if USE_CXXABI || __clang__ || __GNUC__
773
+ const char *name = tinfo.name();
774
+ int status;
775
+ char *undecorated_name = abi::__cxa_demangle(name, 0, 0, &status);
776
+ if (status == 0) {
777
+ *result = undecorated_name;
778
+ free(undecorated_name);
779
+ return NVRTC_SUCCESS;
780
+ }
781
+ #elif defined(_WIN32)
782
+ const char *name = tinfo.raw_name();
783
+ if (!name || *name != '.') {
784
+ return NVRTC_ERROR_INTERNAL_ERROR;
785
+ }
786
+ char undecorated_name[4096];
787
+ //name+1 skips over the '.' prefix
788
+ if(UnDecorateSymbolName(name+1, undecorated_name,
789
+ sizeof(undecorated_name) / sizeof(*undecorated_name),
790
+ //note: doesn't seem to work correctly without UNDNAME_NO_ARGUMENTS.
791
+ UNDNAME_NO_ARGUMENTS | UNDNAME_NAME_ONLY ) ) {
792
+ *result = undecorated_name;
793
+ return NVRTC_SUCCESS;
794
+ }
795
+ #endif /* USE_CXXABI || __clang__ || __GNUC__ */
796
+
797
+ return NVRTC_ERROR_INTERNAL_ERROR;
798
+ }
799
+
800
+ /**
801
+ * \ingroup hosthelper
802
+ * \brief nvrtcGetTypeName stores the source level name of the template type argument
803
+ * T in the given std::string location.
804
+ *
805
+ * This function is only provided when the macro NVRTC_GET_TYPE_NAME is
806
+ * defined with a non-zero value. It uses abi::__cxa_demangle or UnDecorateSymbolName
807
+ * function calls to extract the type name, when using gcc/clang or cl.exe compilers,
808
+ * respectively. If the name extraction fails, it will return NVRTC_INTERNAL_ERROR,
809
+ * otherwise *result is initialized with the extracted name.
810
+ *
811
+ * Windows-specific notes:
812
+ * - nvrtcGetTypeName() is not multi-thread safe because it calls UnDecorateSymbolName(),
813
+ * which is not multi-thread safe.
814
+ * - The returned string may contain Microsoft-specific keywords such as __ptr64 and __cdecl.
815
+ *
816
+ * \param [in] result: pointer to std::string in which to store the type name.
817
+ * \return
818
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
819
+ * - \link #nvrtcResult NVRTC_ERROR_INTERNAL_ERROR \endlink
820
+ *
821
+ */
822
+
823
+ template <typename T>
824
+ nvrtcResult nvrtcGetTypeName(std::string *result)
825
+ {
826
+ nvrtcResult res = nvrtcGetTypeName(typeid(__nvrtcGetTypeName_helper_t<T>),
827
+ result);
828
+ if (res != NVRTC_SUCCESS)
829
+ return res;
830
+
831
+ std::string repr = *result;
832
+ std::size_t idx = repr.find("__nvrtcGetTypeName_helper_t");
833
+ idx = (idx != std::string::npos) ? repr.find("<", idx) : idx;
834
+ std::size_t last_idx = repr.find_last_of('>');
835
+ if (idx == std::string::npos || last_idx == std::string::npos) {
836
+ return NVRTC_ERROR_INTERNAL_ERROR;
837
+ }
838
+ ++idx;
839
+ *result = repr.substr(idx, last_idx - idx);
840
+ return NVRTC_SUCCESS;
841
+ }
842
+
843
+ #endif /* NVRTC_GET_TYPE_NAME */
844
+
845
+ #endif /* __NVRTC_H__ */
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (194 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nvidia/curand/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (186 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_globals.h ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+ #ifndef CURAND_GLOBALS_H
49
+ #define CURAND_GLOBALS_H
50
+
51
+ #define MAX_XOR_N (5)
52
+ #define SKIPAHEAD_BLOCKSIZE (4)
53
+ #define SKIPAHEAD_MASK ((1<<SKIPAHEAD_BLOCKSIZE)-1)
54
+ #define CURAND_2POW32 (4294967296.f)
55
+ #define CURAND_2POW32_DOUBLE (4294967296.)
56
+ #define CURAND_2POW32_INV (2.3283064e-10f)
57
+ #define CURAND_2POW32_INV_DOUBLE (2.3283064365386963e-10)
58
+ #define CURAND_2POW53_INV_DOUBLE (1.1102230246251565e-16)
59
+ #define CURAND_2POW32_INV_2PI (2.3283064e-10f * 6.2831855f)
60
+ #define CURAND_2PI (6.2831855f)
61
+ #define CURAND_2POW53_INV_2PI_DOUBLE (1.1102230246251565e-16 * 6.2831853071795860)
62
+ #define CURAND_PI_DOUBLE (3.1415926535897932)
63
+ #define CURAND_2PI_DOUBLE (6.2831853071795860)
64
+ #define CURAND_SQRT2 (-1.4142135f)
65
+ #define CURAND_SQRT2_DOUBLE (-1.4142135623730951)
66
+
67
+ #define SOBOL64_ITR_BINARY_DIVIDE 2
68
+ #define SOBOL_M2_BINARY_DIVIDE 10
69
+ #define MTGP32_M2_BINARY_DIVIDE 32
70
+ #define MAX_LAMBDA 400000
71
+ #define MIN_GAUSS_LAMBDA 2000
72
+
73
+ struct normal_args_st {
74
+ float mean;
75
+ float stddev;
76
+ };
77
+
78
+ typedef struct normal_args_st normal_args_t;
79
+
80
+ struct normal_args_double_st {
81
+ double mean;
82
+ double stddev;
83
+ };
84
+
85
+ typedef struct normal_args_double_st normal_args_double_t;
86
+
87
+
88
+
89
+
90
+
91
+
92
+
93
+ #endif
llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_lognormal.h ADDED
@@ -0,0 +1,697 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * The source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * The Licensed Deliverables contained herein are PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+
51
+ #if !defined(CURAND_LOGNORMAL_H_)
52
+ #define CURAND_LOGNORMAL_H_
53
+
54
+ /**
55
+ * \defgroup DEVICE Device API
56
+ *
57
+ * @{
58
+ */
59
+
60
+ #ifndef __CUDACC_RTC__
61
+ #include <math.h>
62
+ #endif // __CUDACC_RTC__
63
+
64
+ #include "curand_mrg32k3a.h"
65
+ #include "curand_mtgp32_kernel.h"
66
+ #include "curand_philox4x32_x.h"
67
+
68
+ /**
69
+ * \brief Return a log-normally distributed float from an XORWOW generator.
70
+ *
71
+ * Return a single log-normally distributed float derived from a normal
72
+ * distribution with mean \p mean and standard deviation \p stddev
73
+ * from the XORWOW generator in \p state,
74
+ * increment position of generator by one.
75
+ *
76
+ * The implementation uses a Box-Muller transform to generate two
77
+ * normally distributed results, transforms them to log-normal distribution,
78
+ * then returns them one at a time.
79
+ * See ::curand_log_normal2() for a more efficient version that returns
80
+ * both results at once.
81
+ *
82
+ * \param state - Pointer to state to update
83
+ * \param mean - Mean of the related normal distribution
84
+ * \param stddev - Standard deviation of the related normal distribution
85
+ *
86
+ * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
87
+ */
88
+ QUALIFIERS float curand_log_normal(curandStateXORWOW_t *state, float mean, float stddev)
89
+ {
90
+ if(state->boxmuller_flag != EXTRA_FLAG_LOG_NORMAL) {
91
+ unsigned int x, y;
92
+ x = curand(state);
93
+ y = curand(state);
94
+ float2 v = _curand_box_muller(x, y);
95
+ state->boxmuller_extra = expf(mean + (stddev * v.y));
96
+ state->boxmuller_flag = EXTRA_FLAG_LOG_NORMAL;
97
+ return expf(mean + (stddev * v.x));
98
+ }
99
+ state->boxmuller_flag = 0;
100
+ return state->boxmuller_extra;
101
+ }
102
+
103
+ /**
104
+ * \brief Return a log-normally distributed float from an Philox4_32_10 generator.
105
+ *
106
+ * Return a single log-normally distributed float derived from a normal
107
+ * distribution with mean \p mean and standard deviation \p stddev
108
+ * from the Philox4_32_10 generator in \p state,
109
+ * increment position of generator by one.
110
+ *
111
+ * The implementation uses a Box-Muller transform to generate two
112
+ * normally distributed results, transforms them to log-normal distribution,
113
+ * then returns them one at a time.
114
+ * See ::curand_log_normal2() for a more efficient version that returns
115
+ * both results at once.
116
+ *
117
+ * \param state - Pointer to state to update
118
+ * \param mean - Mean of the related normal distribution
119
+ * \param stddev - Standard deviation of the related normal distribution
120
+ *
121
+ * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
122
+ */
123
+
124
+ QUALIFIERS float curand_log_normal(curandStatePhilox4_32_10_t *state, float mean, float stddev)
125
+ {
126
+ if(state->boxmuller_flag != EXTRA_FLAG_LOG_NORMAL) {
127
+ unsigned int x, y;
128
+ x = curand(state);
129
+ y = curand(state);
130
+ float2 v = _curand_box_muller(x, y);
131
+ state->boxmuller_extra = expf(mean + (stddev * v.y));
132
+ state->boxmuller_flag = EXTRA_FLAG_LOG_NORMAL;
133
+ return expf(mean + (stddev * v.x));
134
+ }
135
+ state->boxmuller_flag = 0;
136
+ return state->boxmuller_extra;
137
+ }
138
+
139
+ /**
140
+ * \brief Return two normally distributed floats from an XORWOW generator.
141
+ *
142
+ * Return two log-normally distributed floats derived from a normal
143
+ * distribution with mean \p mean and standard deviation \p stddev
144
+ * from the XORWOW generator in \p state,
145
+ * increment position of generator by two.
146
+ *
147
+ * The implementation uses a Box-Muller transform to generate two
148
+ * normally distributed results, then transforms them to log-normal.
149
+ *
150
+ * \param state - Pointer to state to update
151
+ * \param mean - Mean of the related normal distribution
152
+ * \param stddev - Standard deviation of the related normal distribution
153
+ *
154
+ * \return Log-normally distributed float2 where each element is from a
155
+ * distribution with mean \p mean and standard deviation \p stddev
156
+ */
157
+ QUALIFIERS float2 curand_log_normal2(curandStateXORWOW_t *state, float mean, float stddev)
158
+ {
159
+ float2 v = curand_box_muller(state);
160
+ v.x = expf(mean + (stddev * v.x));
161
+ v.y = expf(mean + (stddev * v.y));
162
+ return v;
163
+ }
164
+
165
+ /**
166
+ * \brief Return two normally distributed floats from an Philox4_32_10 generator.
167
+ *
168
+ * Return two log-normally distributed floats derived from a normal
169
+ * distribution with mean \p mean and standard deviation \p stddev
170
+ * from the Philox4_32_10 generator in \p state,
171
+ * increment position of generator by two.
172
+ *
173
+ * The implementation uses a Box-Muller transform to generate two
174
+ * normally distributed results, then transforms them to log-normal.
175
+ *
176
+ * \param state - Pointer to state to update
177
+ * \param mean - Mean of the related normal distribution
178
+ * \param stddev - Standard deviation of the related normal distribution
179
+ *
180
+ * \return Log-normally distributed float2 where each element is from a
181
+ * distribution with mean \p mean and standard deviation \p stddev
182
+ */
183
+ QUALIFIERS float2 curand_log_normal2(curandStatePhilox4_32_10_t *state, float mean, float stddev)
184
+ {
185
+ float2 v = curand_box_muller(state);
186
+ v.x = expf(mean + (stddev * v.x));
187
+ v.y = expf(mean + (stddev * v.y));
188
+ return v;
189
+ }
190
+ /**
191
+ * \brief Return four normally distributed floats from an Philox4_32_10 generator.
192
+ *
193
+ * Return four log-normally distributed floats derived from a normal
194
+ * distribution with mean \p mean and standard deviation \p stddev
195
+ * from the Philox4_32_10 generator in \p state,
196
+ * increment position of generator by four.
197
+ *
198
+ * The implementation uses a Box-Muller transform to generate two
199
+ * normally distributed results, then transforms them to log-normal.
200
+ *
201
+ * \param state - Pointer to state to update
202
+ * \param mean - Mean of the related normal distribution
203
+ * \param stddev - Standard deviation of the related normal distribution
204
+ *
205
+ * \return Log-normally distributed float4 where each element is from a
206
+ * distribution with mean \p mean and standard deviation \p stddev
207
+ */
208
+ QUALIFIERS float4 curand_log_normal4(curandStatePhilox4_32_10_t *state, float mean, float stddev)
209
+ {
210
+ float4 v = curand_box_muller4(state);
211
+ v.x = expf(mean + (stddev * v.x));
212
+ v.y = expf(mean + (stddev * v.y));
213
+ v.z = expf(mean + (stddev * v.z));
214
+ v.w = expf(mean + (stddev * v.w));
215
+ return v;
216
+ }
217
+
218
+ /**
219
+ * \brief Return a log-normally distributed float from an MRG32k3a generator.
220
+ *
221
+ * Return a single log-normally distributed float derived from a normal
222
+ * distribution with mean \p mean and standard deviation \p stddev
223
+ * from the MRG32k3a generator in \p state,
224
+ * increment position of generator by one.
225
+ *
226
+ * The implementation uses a Box-Muller transform to generate two
227
+ * normally distributed results, transforms them to log-normal distribution,
228
+ * then returns them one at a time.
229
+ * See ::curand_log_normal2() for a more efficient version that returns
230
+ * both results at once.
231
+ *
232
+ * \param state - Pointer to state to update
233
+ * \param mean - Mean of the related normal distribution
234
+ * \param stddev - Standard deviation of the related normal distribution
235
+ *
236
+ * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
237
+ */
238
+ QUALIFIERS float curand_log_normal(curandStateMRG32k3a_t *state, float mean, float stddev)
239
+ {
240
+ if(state->boxmuller_flag != EXTRA_FLAG_LOG_NORMAL) {
241
+ float2 v = curand_box_muller_mrg(state);
242
+ state->boxmuller_extra = expf(mean + (stddev * v.y));
243
+ state->boxmuller_flag = EXTRA_FLAG_LOG_NORMAL;
244
+ return expf(mean + (stddev * v.x));
245
+ }
246
+ state->boxmuller_flag = 0;
247
+ return state->boxmuller_extra;
248
+ }
249
+
250
+ /**
251
+ * \brief Return two normally distributed floats from an MRG32k3a generator.
252
+ *
253
+ * Return two log-normally distributed floats derived from a normal
254
+ * distribution with mean \p mean and standard deviation \p stddev
255
+ * from the MRG32k3a generator in \p state,
256
+ * increment position of generator by two.
257
+ *
258
+ * The implementation uses a Box-Muller transform to generate two
259
+ * normally distributed results, then transforms them to log-normal.
260
+ *
261
+ * \param state - Pointer to state to update
262
+ * \param mean - Mean of the related normal distribution
263
+ * \param stddev - Standard deviation of the related normal distribution
264
+ *
265
+ * \return Log-normally distributed float2 where each element is from a
266
+ * distribution with mean \p mean and standard deviation \p stddev
267
+ */
268
+ QUALIFIERS float2 curand_log_normal2(curandStateMRG32k3a_t *state, float mean, float stddev)
269
+ {
270
+ float2 v = curand_box_muller_mrg(state);
271
+ v.x = expf(mean + (stddev * v.x));
272
+ v.y = expf(mean + (stddev * v.y));
273
+ return v;
274
+ }
275
+
276
+ /**
277
+ * \brief Return a log-normally distributed float from an MTGP32 generator.
278
+ *
279
+ * Return a single log-normally distributed float derived from a normal
280
+ * distribution with mean \p mean and standard deviation \p stddev
281
+ * from the MTGP32 generator in \p state,
282
+ * increment position of generator.
283
+ *
284
+ * The implementation uses the inverse cumulative distribution function
285
+ * to generate a normally distributed result, then transforms the result
286
+ * to log-normal.
287
+ *
288
+ * \param state - Pointer to state to update
289
+ * \param mean - Mean of the related normal distribution
290
+ * \param stddev - Standard deviation of the related normal distribution
291
+ *
292
+ * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
293
+ */
294
+ QUALIFIERS float curand_log_normal(curandStateMtgp32_t *state, float mean, float stddev)
295
+ {
296
+ return expf(mean + (stddev * _curand_normal_icdf(curand(state))));
297
+ }
298
+
299
+ /**
300
+ * \brief Return a log-normally distributed float from a Sobol32 generator.
301
+ *
302
+ * Return a single log-normally distributed float derived from a normal
303
+ * distribution with mean \p mean and standard deviation \p stddev
304
+ * from the Sobol32 generator in \p state,
305
+ * increment position of generator by one.
306
+ *
307
+ * The implementation uses the inverse cumulative distribution function
308
+ * to generate a normally distributed result, then transforms the result
309
+ * to log-normal.
310
+ *
311
+ * \param state - Pointer to state to update
312
+ * \param mean - Mean of the related normal distribution
313
+ * \param stddev - Standard deviation of the related normal distribution
314
+ *
315
+ * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
316
+ */
317
+ QUALIFIERS float curand_log_normal(curandStateSobol32_t *state, float mean, float stddev)
318
+ {
319
+ return expf(mean + (stddev * _curand_normal_icdf(curand(state))));
320
+ }
321
+ /**
322
+ * \brief Return a log-normally distributed float from a scrambled Sobol32 generator.
323
+ *
324
+ * Return a single log-normally distributed float derived from a normal
325
+ * distribution with mean \p mean and standard deviation \p stddev
326
+ * from the scrambled Sobol32 generator in \p state,
327
+ * increment position of generator by one.
328
+ *
329
+ * The implementation uses the inverse cumulative distribution function
330
+ * to generate a normally distributed result, then transforms the result
331
+ * to log-normal.
332
+ *
333
+ * \param state - Pointer to state to update
334
+ * \param mean - Mean of the related normal distribution
335
+ * \param stddev - Standard deviation of the related normal distribution
336
+ *
337
+ * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
338
+ */
339
+ QUALIFIERS float curand_log_normal(curandStateScrambledSobol32_t *state, float mean, float stddev)
340
+ {
341
+ return expf(mean + (stddev * _curand_normal_icdf(curand(state))));
342
+ }
343
+
344
+ /**
345
+ * \brief Return a log-normally distributed float from a Sobol64 generator.
346
+ *
347
+ * Return a single log-normally distributed float derived from a normal
348
+ * distribution with mean \p mean and standard deviation \p stddev
349
+ * from the Sobol64 generator in \p state,
350
+ * increment position of generator by one.
351
+ *
352
+ * The implementation uses the inverse cumulative distribution function
353
+ * to generate normally distributed results, then converts to log-normal
354
+ * distribution.
355
+ *
356
+ * \param state - Pointer to state to update
357
+ * \param mean - Mean of the related normal distribution
358
+ * \param stddev - Standard deviation of the related normal distribution
359
+ *
360
+ * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
361
+ */
362
+ QUALIFIERS float curand_log_normal(curandStateSobol64_t *state, float mean, float stddev)
363
+ {
364
+ return expf(mean + (stddev * _curand_normal_icdf(curand(state))));
365
+ }
366
+
367
+ /**
368
+ * \brief Return a log-normally distributed float from a scrambled Sobol64 generator.
369
+ *
370
+ * Return a single log-normally distributed float derived from a normal
371
+ * distribution with mean \p mean and standard deviation \p stddev
372
+ * from the scrambled Sobol64 generator in \p state,
373
+ * increment position of generator by one.
374
+ *
375
+ * The implementation uses the inverse cumulative distribution function
376
+ * to generate normally distributed results, then converts to log-normal
377
+ * distribution.
378
+ *
379
+ * \param state - Pointer to state to update
380
+ * \param mean - Mean of the related normal distribution
381
+ * \param stddev - Standard deviation of the related normal distribution
382
+ *
383
+ * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
384
+ */
385
+ QUALIFIERS float curand_log_normal(curandStateScrambledSobol64_t *state, float mean, float stddev)
386
+ {
387
+ return expf(mean + (stddev * _curand_normal_icdf(curand(state))));
388
+ }
389
+
390
+ /**
391
+ * \brief Return a log-normally distributed double from an XORWOW generator.
392
+ *
393
+ * Return a single normally distributed double derived from a normal
394
+ * distribution with mean \p mean and standard deviation \p stddev
395
+ * from the XORWOW generator in \p state,
396
+ * increment position of generator.
397
+ *
398
+ * The implementation uses a Box-Muller transform to generate two
399
+ * normally distributed results, transforms them to log-normal distribution,
400
+ * then returns them one at a time.
401
+ * See ::curand_log_normal2_double() for a more efficient version that returns
402
+ * both results at once.
403
+ *
404
+ * \param state - Pointer to state to update
405
+ * \param mean - Mean of the related normal distribution
406
+ * \param stddev - Standard deviation of the related normal distribution
407
+ *
408
+ * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
409
+ */
410
+
411
+ QUALIFIERS double curand_log_normal_double(curandStateXORWOW_t *state, double mean, double stddev)
412
+ {
413
+ if(state->boxmuller_flag_double != EXTRA_FLAG_LOG_NORMAL) {
414
+ unsigned int x0, x1, y0, y1;
415
+ x0 = curand(state);
416
+ x1 = curand(state);
417
+ y0 = curand(state);
418
+ y1 = curand(state);
419
+ double2 v = _curand_box_muller_double(x0, x1, y0, y1);
420
+ state->boxmuller_extra_double = exp(mean + (stddev * v.y));
421
+ state->boxmuller_flag_double = EXTRA_FLAG_LOG_NORMAL;
422
+ return exp(mean + (stddev * v.x));
423
+ }
424
+ state->boxmuller_flag_double = 0;
425
+ return state->boxmuller_extra_double;
426
+ }
427
+
428
+ /**
429
+ * \brief Return a log-normally distributed double from an Philox4_32_10 generator.
430
+ *
431
+ * Return a single normally distributed double derived from a normal
432
+ * distribution with mean \p mean and standard deviation \p stddev
433
+ * from the Philox4_32_10 generator in \p state,
434
+ * increment position of generator.
435
+ *
436
+ * The implementation uses a Box-Muller transform to generate two
437
+ * normally distributed results, transforms them to log-normal distribution,
438
+ * then returns them one at a time.
439
+ * See ::curand_log_normal2_double() for a more efficient version that returns
440
+ * both results at once.
441
+ *
442
+ * \param state - Pointer to state to update
443
+ * \param mean - Mean of the related normal distribution
444
+ * \param stddev - Standard deviation of the related normal distribution
445
+ *
446
+ * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
447
+ */
448
+
449
+ QUALIFIERS double curand_log_normal_double(curandStatePhilox4_32_10_t *state, double mean, double stddev)
450
+ {
451
+ if(state->boxmuller_flag_double != EXTRA_FLAG_LOG_NORMAL) {
452
+ uint4 _x;
453
+ _x = curand4(state);
454
+ double2 v = _curand_box_muller_double(_x.x, _x.y, _x.z, _x.w);
455
+ state->boxmuller_extra_double = exp(mean + (stddev * v.y));
456
+ state->boxmuller_flag_double = EXTRA_FLAG_LOG_NORMAL;
457
+ return exp(mean + (stddev * v.x));
458
+ }
459
+ state->boxmuller_flag_double = 0;
460
+ return state->boxmuller_extra_double;
461
+ }
462
+
463
+
464
+ /**
465
+ * \brief Return two log-normally distributed doubles from an XORWOW generator.
466
+ *
467
+ * Return two log-normally distributed doubles derived from a normal
468
+ * distribution with mean \p mean and standard deviation \p stddev
469
+ * from the XORWOW generator in \p state,
470
+ * increment position of generator by two.
471
+ *
472
+ * The implementation uses a Box-Muller transform to generate two
473
+ * normally distributed results, and transforms them to log-normal distribution,.
474
+ *
475
+ * \param state - Pointer to state to update
476
+ * \param mean - Mean of the related normal distribution
477
+ * \param stddev - Standard deviation of the related normal distribution
478
+ *
479
+ * \return Log-normally distributed double2 where each element is from a
480
+ * distribution with mean \p mean and standard deviation \p stddev
481
+ */
482
+ QUALIFIERS double2 curand_log_normal2_double(curandStateXORWOW_t *state, double mean, double stddev)
483
+ {
484
+ double2 v = curand_box_muller_double(state);
485
+ v.x = exp(mean + (stddev * v.x));
486
+ v.y = exp(mean + (stddev * v.y));
487
+ return v;
488
+ }
489
+
490
+ /**
491
+ * \brief Return two log-normally distributed doubles from an Philox4_32_10 generator.
492
+ *
493
+ * Return two log-normally distributed doubles derived from a normal
494
+ * distribution with mean \p mean and standard deviation \p stddev
495
+ * from the Philox4_32_10 generator in \p state,
496
+ * increment position of generator by four.
497
+ *
498
+ * The implementation uses a Box-Muller transform to generate two
499
+ * normally distributed results, and transforms them to log-normal distribution,.
500
+ *
501
+ * \param state - Pointer to state to update
502
+ * \param mean - Mean of the related normal distribution
503
+ * \param stddev - Standard deviation of the related normal distribution
504
+ *
505
+ * \return Log-normally distributed double4 where each element is from a
506
+ * distribution with mean \p mean and standard deviation \p stddev
507
+ */
508
+ QUALIFIERS double2 curand_log_normal2_double(curandStatePhilox4_32_10_t *state, double mean, double stddev)
509
+ {
510
+ double2 v = curand_box_muller2_double(state);
511
+ v.x = exp(mean + (stddev * v.x));
512
+ v.y = exp(mean + (stddev * v.y));
513
+ return v;
514
+ }
515
+ // nor part of API
516
+ QUALIFIERS double4 curand_log_normal4_double(curandStatePhilox4_32_10_t *state, double mean, double stddev)
517
+ {
518
+ double4 v = curand_box_muller4_double(state);
519
+ v.x = exp(mean + (stddev * v.x));
520
+ v.y = exp(mean + (stddev * v.y));
521
+ v.z = exp(mean + (stddev * v.z));
522
+ v.w = exp(mean + (stddev * v.w));
523
+ return v;
524
+ }
525
+
526
+ /**
527
+ * \brief Return a log-normally distributed double from an MRG32k3a generator.
528
+ *
529
+ * Return a single normally distributed double derived from a normal
530
+ * distribution with mean \p mean and standard deviation \p stddev
531
+ * from the MRG32k3a generator in \p state,
532
+ * increment position of generator.
533
+ *
534
+ * The implementation uses a Box-Muller transform to generate two
535
+ * normally distributed results, transforms them to log-normal distribution,
536
+ * then returns them one at a time.
537
+ * See ::curand_log_normal2_double() for a more efficient version that returns
538
+ * both results at once.
539
+ *
540
+ * \param state - Pointer to state to update
541
+ * \param mean - Mean of the related normal distribution
542
+ * \param stddev - Standard deviation of the related normal distribution
543
+ *
544
+ * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
545
+ */
546
+ QUALIFIERS double curand_log_normal_double(curandStateMRG32k3a_t *state, double mean, double stddev)
547
+ {
548
+ if(state->boxmuller_flag_double != EXTRA_FLAG_LOG_NORMAL) {
549
+ double2 v = curand_box_muller_mrg_double(state);
550
+ state->boxmuller_extra_double = exp(mean + (stddev * v.y));
551
+ state->boxmuller_flag_double = EXTRA_FLAG_LOG_NORMAL;
552
+ return exp(mean + (stddev * v.x));
553
+ }
554
+ state->boxmuller_flag_double = 0;
555
+ return state->boxmuller_extra_double;
556
+ }
557
+
558
+ /**
559
+ * \brief Return two log-normally distributed doubles from an MRG32k3a generator.
560
+ *
561
+ * Return two log-normally distributed doubles derived from a normal
562
+ * distribution with mean \p mean and standard deviation \p stddev
563
+ * from the MRG32k3a generator in \p state,
564
+ * increment position of generator by two.
565
+ *
566
+ * The implementation uses a Box-Muller transform to generate two
567
+ * normally distributed results, and transforms them to log-normal distribution,.
568
+ *
569
+ * \param state - Pointer to state to update
570
+ * \param mean - Mean of the related normal distribution
571
+ * \param stddev - Standard deviation of the related normal distribution
572
+ *
573
+ * \return Log-normally distributed double2 where each element is from a
574
+ * distribution with mean \p mean and standard deviation \p stddev
575
+ */
576
+ QUALIFIERS double2 curand_log_normal2_double(curandStateMRG32k3a_t *state, double mean, double stddev)
577
+ {
578
+ double2 v = curand_box_muller_mrg_double(state);
579
+ v.x = exp(mean + (stddev * v.x));
580
+ v.y = exp(mean + (stddev * v.y));
581
+ return v;
582
+ }
583
+
584
+ /**
585
+ * \brief Return a log-normally distributed double from an MTGP32 generator.
586
+ *
587
+ * Return a single log-normally distributed double derived from a normal
588
+ * distribution with mean \p mean and standard deviation \p stddev
589
+ * from the MTGP32 generator in \p state,
590
+ * increment position of generator.
591
+ *
592
+ * The implementation uses the inverse cumulative distribution function
593
+ * to generate normally distributed results, and transforms them into
594
+ * log-normal distribution.
595
+ *
596
+ * \param state - Pointer to state to update
597
+ * \param mean - Mean of the related normal distribution
598
+ * \param stddev - Standard deviation of the related normal distribution
599
+ *
600
+ * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
601
+ */
602
+ QUALIFIERS double curand_log_normal_double(curandStateMtgp32_t *state, double mean, double stddev)
603
+ {
604
+ return exp(mean + (stddev * _curand_normal_icdf_double(curand(state))));
605
+ }
606
+
607
+ /**
608
+ * \brief Return a log-normally distributed double from a Sobol32 generator.
609
+ *
610
+ * Return a single log-normally distributed double derived from a normal
611
+ * distribution with mean \p mean and standard deviation \p stddev
612
+ * from the Sobol32 generator in \p state,
613
+ * increment position of generator by one.
614
+ *
615
+ * The implementation uses the inverse cumulative distribution function
616
+ * to generate normally distributed results, and transforms them into
617
+ * log-normal distribution.
618
+ *
619
+ * \param state - Pointer to state to update
620
+ * \param mean - Mean of the related normal distribution
621
+ * \param stddev - Standard deviation of the related normal distribution
622
+ *
623
+ * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
624
+ */
625
+ QUALIFIERS double curand_log_normal_double(curandStateSobol32_t *state, double mean, double stddev)
626
+ {
627
+ return exp(mean + (stddev * _curand_normal_icdf_double(curand(state))));
628
+ }
629
+
630
+ /**
631
+ * \brief Return a log-normally distributed double from a scrambled Sobol32 generator.
632
+ *
633
+ * Return a single log-normally distributed double derived from a normal
634
+ * distribution with mean \p mean and standard deviation \p stddev
635
+ * from the scrambled Sobol32 generator in \p state,
636
+ * increment position of generator by one.
637
+ *
638
+ * The implementation uses the inverse cumulative distribution function
639
+ * to generate normally distributed results, and transforms them into
640
+ * log-normal distribution.
641
+ *
642
+ * \param state - Pointer to state to update
643
+ * \param mean - Mean of the related normal distribution
644
+ * \param stddev - Standard deviation of the related normal distribution
645
+ *
646
+ * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
647
+ */
648
+ QUALIFIERS double curand_log_normal_double(curandStateScrambledSobol32_t *state, double mean, double stddev)
649
+ {
650
+ return exp(mean + (stddev * _curand_normal_icdf_double(curand(state))));
651
+ }
652
+
653
+ /**
654
+ * \brief Return a log-normally distributed double from a Sobol64 generator.
655
+ *
656
+ * Return a single normally distributed double derived from a normal
657
+ * distribution with mean \p mean and standard deviation \p stddev
658
+ * from the Sobol64 generator in \p state,
659
+ * increment position of generator by one.
660
+ *
661
+ * The implementation uses the inverse cumulative distribution function
662
+ * to generate normally distributed results.
663
+ *
664
+ * \param state - Pointer to state to update
665
+ * \param mean - Mean of the related normal distribution
666
+ * \param stddev - Standard deviation of the related normal distribution
667
+ *
668
+ * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
669
+ */
670
+ QUALIFIERS double curand_log_normal_double(curandStateSobol64_t *state, double mean, double stddev)
671
+ {
672
+ return exp(mean + (stddev * _curand_normal_icdf_double(curand(state))));
673
+ }
674
+
675
+ /**
676
+ * \brief Return a log-normally distributed double from a scrambled Sobol64 generator.
677
+ *
678
+ * Return a single normally distributed double derived from a normal
679
+ * distribution with mean \p mean and standard deviation \p stddev
680
+ * from the scrambled Sobol64 generator in \p state,
681
+ * increment position of generator by one.
682
+ *
683
+ * The implementation uses the inverse cumulative distribution function
684
+ * to generate normally distributed results.
685
+ *
686
+ * \param state - Pointer to state to update
687
+ * \param mean - Mean of the related normal distribution
688
+ * \param stddev - Standard deviation of the related normal distribution
689
+ *
690
+ * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
691
+ */
692
+ QUALIFIERS double curand_log_normal_double(curandStateScrambledSobol64_t *state, double mean, double stddev)
693
+ {
694
+ return exp(mean + (stddev * _curand_normal_icdf_double(curand(state))));
695
+ }
696
+
697
+ #endif // !defined(CURAND_LOGNORMAL_H_)
llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32_host.h ADDED
@@ -0,0 +1,516 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * curand_mtgp32_host.h
52
+ *
53
+ *
54
+ * MTGP32-11213
55
+ *
56
+ * Mersenne Twister RNG for the GPU
57
+ *
58
+ * The period of generated integers is 2<sup>11213</sup>-1.
59
+ *
60
+ * This code generates 32-bit unsigned integers, and
61
+ * single precision floating point numbers uniformly distributed
62
+ * in the range [1, 2). (float r; 1.0 <= r < 2.0)
63
+ */
64
+
65
+ /*
66
+ * Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
67
+ * University. All rights reserved.
68
+ * Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
69
+ * University and University of Tokyo. All rights reserved.
70
+ *
71
+ * Redistribution and use in source and binary forms, with or without
72
+ * modification, are permitted provided that the following conditions are
73
+ * met:
74
+ *
75
+ * * Redistributions of source code must retain the above copyright
76
+ * notice, this list of conditions and the following disclaimer.
77
+ * * Redistributions in binary form must reproduce the above
78
+ * copyright notice, this list of conditions and the following
79
+ * disclaimer in the documentation and/or other materials provided
80
+ * with the distribution.
81
+ * * Neither the name of the Hiroshima University nor the names of
82
+ * its contributors may be used to endorse or promote products
83
+ * derived from this software without specific prior written
84
+ * permission.
85
+ *
86
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
87
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
88
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
89
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
90
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
91
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
92
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
93
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
94
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
95
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
96
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
97
+ */
98
+ #if !defined CURAND_MTGP32_HOST_H
99
+ #define CURAND_MTGP32_HOST_H
100
+
101
+ #if !defined(QUALIFIERS)
102
+ #define QUALIFIERS static inline __device__
103
+ #endif
104
+
105
+ #include <cuda_runtime.h>
106
+ #include <stdlib.h>
107
+ #include <memory.h>
108
+ #include <string.h>
109
+ #include "curand.h"
110
+ #include "curand_mtgp32.h"
111
+ #include "curand_mtgp32dc_p_11213.h"
112
+
113
+
114
+ /**
115
+ * \addtogroup DEVICE Device API
116
+ *
117
+ * @{
118
+ */
119
+
120
+ static const unsigned int non_zero = 0x4d544750;
121
+
122
+ /*
123
+ * This function represents a function used in the initialization
124
+ * by mtgp32_init_by_array() and mtgp32_init_by_str().
125
+ * @param[in] x 32-bit integer
126
+ * @return 32-bit integer
127
+ */
128
+ static __forceinline__ unsigned int ini_func1(unsigned int x) {
129
+ return (x ^ (x >> 27)) * (1664525);
130
+ }
131
+
132
+ /*
133
+ * This function represents a function used in the initialization
134
+ * by mtgp32_init_by_array() and mtgp32_init_by_str().
135
+ * @param[in] x 32-bit integer
136
+ * @return 32-bit integer
137
+ */
138
+ static __forceinline__ unsigned int ini_func2(unsigned int x) {
139
+ return (x ^ (x >> 27)) * (1566083941);
140
+ }
141
+
142
+ /*
143
+ * This function initializes the internal state array with a 32-bit
144
+ * integer seed. The allocated memory should be freed by calling
145
+ * mtgp32_free(). \b para should be one of the elements in the
146
+ * parameter table (mtgp32-param-ref.c).
147
+ *
148
+ * This function is call by cuda program, because cuda program uses
149
+ * another structure and another allocation method.
150
+ *
151
+ * @param[out] array MTGP internal status vector.
152
+ * @param[in] para parameter structure
153
+ * @param[in] seed a 32-bit integer used as the seed.
154
+ */
155
+ static __forceinline__ __host__
156
+ void mtgp32_init_state(unsigned int state[],
157
+ const mtgp32_params_fast_t *para, unsigned int seed) {
158
+ int i;
159
+ int size = para->mexp / 32 + 1;
160
+ unsigned int hidden_seed;
161
+ unsigned int tmp;
162
+ hidden_seed = para->tbl[4] ^ (para->tbl[8] << 16);
163
+ tmp = hidden_seed;
164
+ tmp += tmp >> 16;
165
+ tmp += tmp >> 8;
166
+ memset(state, tmp & 0xff, sizeof(unsigned int) * size);
167
+ state[0] = seed;
168
+ state[1] = hidden_seed;
169
+ for (i = 1; i < size; i++) {
170
+ state[i] ^= (1812433253) * (state[i - 1] ^ (state[i - 1] >> 30)) + i;
171
+ }
172
+ }
173
+
174
+ /*
175
+ * This function initializes the internal state array
176
+ * with a 32-bit integer array. \b para should be one of the elements in
177
+ * the parameter table (mtgp32-param-ref.c).
178
+ *
179
+ * @param[out] mtgp32 MTGP structure.
180
+ * @param[in] para parameter structure
181
+ * @param[in] array a 32-bit integer array used as a seed.
182
+ * @param[in] length length of the array.
183
+ * @return CURAND_STATUS_SUCCESS
184
+ */
185
+ static __forceinline__ __host__
186
+ int mtgp32_init_by_array(unsigned int state[],
187
+ const mtgp32_params_fast_t *para,
188
+ unsigned int *array, int length) {
189
+ int i, j, count;
190
+ unsigned int r;
191
+ int lag;
192
+ int mid;
193
+ int size = para->mexp / 32 + 1;
194
+ unsigned int hidden_seed;
195
+ unsigned int tmp;
196
+
197
+ if (size >= 623) {
198
+ lag = 11;
199
+ } else if (size >= 68) {
200
+ lag = 7;
201
+ } else if (size >= 39) {
202
+ lag = 5;
203
+ } else {
204
+ lag = 3;
205
+ }
206
+ mid = (size - lag) / 2;
207
+
208
+ hidden_seed = para->tbl[4] ^ (para->tbl[8] << 16);
209
+ tmp = hidden_seed;
210
+ tmp += tmp >> 16;
211
+ tmp += tmp >> 8;
212
+ memset(state, tmp & 0xff, sizeof(unsigned int) * size);
213
+ state[0] = hidden_seed;
214
+
215
+ if (length + 1 > size) {
216
+ count = length + 1;
217
+ } else {
218
+ count = size;
219
+ }
220
+ r = ini_func1(state[0] ^ state[mid] ^ state[size - 1]);
221
+ state[mid] += r;
222
+ r += length;
223
+ state[(mid + lag) % size] += r;
224
+ state[0] = r;
225
+ i = 1;
226
+ count--;
227
+ for (i = 1, j = 0; (j < count) && (j < length); j++) {
228
+ r = ini_func1(state[i] ^ state[(i + mid) % size]
229
+ ^ state[(i + size - 1) % size]);
230
+ state[(i + mid) % size] += r;
231
+ r += array[j] + i;
232
+ state[(i + mid + lag) % size] += r;
233
+ state[i] = r;
234
+ i = (i + 1) % size;
235
+ }
236
+ for (; j < count; j++) {
237
+ r = ini_func1(state[i] ^ state[(i + mid) % size]
238
+ ^ state[(i + size - 1) % size]);
239
+ state[(i + mid) % size] += r;
240
+ r += i;
241
+ state[(i + mid + lag) % size] += r;
242
+ state[i] = r;
243
+ i = (i + 1) % size;
244
+ }
245
+ for (j = 0; j < size; j++) {
246
+ r = ini_func2(state[i] + state[(i + mid) % size]
247
+ + state[(i + size - 1) % size]);
248
+ state[(i + mid) % size] ^= r;
249
+ r -= i;
250
+ state[(i + mid + lag) % size] ^= r;
251
+ state[i] = r;
252
+ i = (i + 1) % size;
253
+ }
254
+ if (state[size - 1] == 0) {
255
+ state[size - 1] = non_zero;
256
+ }
257
+ return 0;
258
+ }
259
+
260
+ /*
261
+ * This function initializes the internal state array
262
+ * with a character array. \b para should be one of the elements in
263
+ * the parameter table (mtgp32-param-ref.c).
264
+ * This is the same algorithm with mtgp32_init_by_array(), but hope to
265
+ * be more useful.
266
+ *
267
+ * @param[out] mtgp32 MTGP structure.
268
+ * @param[in] para parameter structure
269
+ * @param[in] array a character array used as a seed. (terminated by zero.)
270
+ * @return memory allocation result. if 0 then O.K.
271
+ */
272
+ static __forceinline__ __host__
273
+ int mtgp32_init_by_str(unsigned int state[],
274
+ const mtgp32_params_fast_t *para, unsigned char *array) {
275
+ int i, j, count;
276
+ unsigned int r;
277
+ int lag;
278
+ int mid;
279
+ int size = para->mexp / 32 + 1;
280
+ int length = (unsigned int)strlen((char *)array);
281
+ unsigned int hidden_seed;
282
+ unsigned int tmp;
283
+
284
+ if (size >= 623) {
285
+ lag = 11;
286
+ } else if (size >= 68) {
287
+ lag = 7;
288
+ } else if (size >= 39) {
289
+ lag = 5;
290
+ } else {
291
+ lag = 3;
292
+ }
293
+ mid = (size - lag) / 2;
294
+
295
+ hidden_seed = para->tbl[4] ^ (para->tbl[8] << 16);
296
+ tmp = hidden_seed;
297
+ tmp += tmp >> 16;
298
+ tmp += tmp >> 8;
299
+ memset(state, tmp & 0xff, sizeof(unsigned int) * size);
300
+ state[0] = hidden_seed;
301
+
302
+ if (length + 1 > size) {
303
+ count = length + 1;
304
+ } else {
305
+ count = size;
306
+ }
307
+ r = ini_func1(state[0] ^ state[mid] ^ state[size - 1]);
308
+ state[mid] += r;
309
+ r += length;
310
+ state[(mid + lag) % size] += r;
311
+ state[0] = r;
312
+ i = 1;
313
+ count--;
314
+ for (i = 1, j = 0; (j < count) && (j < length); j++) {
315
+ r = ini_func1(state[i] ^ state[(i + mid) % size]
316
+ ^ state[(i + size - 1) % size]);
317
+ state[(i + mid) % size] += r;
318
+ r += array[j] + i;
319
+ state[(i + mid + lag) % size] += r;
320
+ state[i] = r;
321
+ i = (i + 1) % size;
322
+ }
323
+ for (; j < count; j++) {
324
+ r = ini_func1(state[i] ^ state[(i + mid) % size]
325
+ ^ state[(i + size - 1) % size]);
326
+ state[(i + mid) % size] += r;
327
+ r += i;
328
+ state[(i + mid + lag) % size] += r;
329
+ state[i] = r;
330
+ i = (i + 1) % size;
331
+ }
332
+ for (j = 0; j < size; j++) {
333
+ r = ini_func2(state[i] + state[(i + mid) % size]
334
+ + state[(i + size - 1) % size]);
335
+ state[(i + mid) % size] ^= r;
336
+ r -= i;
337
+ state[(i + mid + lag) % size] ^= r;
338
+ state[i] = r;
339
+ i = (i + 1) % size;
340
+ }
341
+ if (state[size - 1] == 0) {
342
+ state[size - 1] = non_zero;
343
+ }
344
+ return 0;
345
+ }
346
+
347
+ template<typename ParamsType>
348
+ static __forceinline__ __host__
349
+ curandStatus_t curandMakeMTGP32ConstantsImpl(const mtgp32_params_fast_t params[], ParamsType * p, const int block_num)
350
+ {
351
+ const int size1 = sizeof(unsigned int) * block_num;
352
+ const int size2 = sizeof(unsigned int) * block_num * TBL_SIZE;
353
+ unsigned int *h_pos_tbl;
354
+ unsigned int *h_sh1_tbl;
355
+ unsigned int *h_sh2_tbl;
356
+ unsigned int *h_param_tbl;
357
+ unsigned int *h_temper_tbl;
358
+ unsigned int *h_single_temper_tbl;
359
+ unsigned int *h_mask;
360
+ curandStatus_t status = CURAND_STATUS_SUCCESS;
361
+
362
+ h_pos_tbl = (unsigned int *)malloc(size1);
363
+ h_sh1_tbl = (unsigned int *)malloc(size1);
364
+ h_sh2_tbl = (unsigned int *)malloc(size1);
365
+ h_param_tbl = (unsigned int *)malloc(size2);
366
+ h_temper_tbl = (unsigned int *)malloc(size2);
367
+ h_single_temper_tbl = (unsigned int *)malloc(size2);
368
+ h_mask = (unsigned int *)malloc(sizeof(unsigned int));
369
+ if (h_pos_tbl == NULL
370
+ || h_sh1_tbl == NULL
371
+ || h_sh2_tbl == NULL
372
+ || h_param_tbl == NULL
373
+ || h_temper_tbl == NULL
374
+ || h_single_temper_tbl == NULL
375
+ || h_mask == NULL) {
376
+ if (h_pos_tbl != NULL) free(h_pos_tbl);
377
+ if (h_sh1_tbl != NULL) free(h_sh1_tbl);
378
+ if (h_sh2_tbl != NULL) free(h_sh2_tbl);
379
+ if (h_param_tbl != NULL) free(h_param_tbl);
380
+ if (h_temper_tbl != NULL) free(h_temper_tbl);
381
+ if (h_single_temper_tbl != NULL) free(h_single_temper_tbl);
382
+ if (h_mask != NULL) free(h_mask);
383
+ status = CURAND_STATUS_ALLOCATION_FAILED;
384
+ } else {
385
+
386
+ h_mask[0] = params[0].mask;
387
+ for (int i = 0; i < block_num; i++) {
388
+ h_pos_tbl[i] = params[i].pos;
389
+ h_sh1_tbl[i] = params[i].sh1;
390
+ h_sh2_tbl[i] = params[i].sh2;
391
+ for (int j = 0; j < TBL_SIZE; j++) {
392
+ h_param_tbl[i * TBL_SIZE + j] = params[i].tbl[j];
393
+ h_temper_tbl[i * TBL_SIZE + j] = params[i].tmp_tbl[j];
394
+ h_single_temper_tbl[i * TBL_SIZE + j] = params[i].flt_tmp_tbl[j];
395
+ }
396
+ }
397
+ if (cudaMemcpy( p->pos_tbl,
398
+ h_pos_tbl, size1, cudaMemcpyHostToDevice) != cudaSuccess)
399
+ {
400
+ status = CURAND_STATUS_INITIALIZATION_FAILED;
401
+ } else
402
+ if (cudaMemcpy( p->sh1_tbl,
403
+ h_sh1_tbl, size1, cudaMemcpyHostToDevice) != cudaSuccess)
404
+ {
405
+ status = CURAND_STATUS_INITIALIZATION_FAILED;
406
+ } else
407
+ if (cudaMemcpy( p->sh2_tbl,
408
+ h_sh2_tbl, size1, cudaMemcpyHostToDevice) != cudaSuccess)
409
+ {
410
+ status = CURAND_STATUS_INITIALIZATION_FAILED;
411
+ } else
412
+ if (cudaMemcpy( p->param_tbl,
413
+ h_param_tbl, size2, cudaMemcpyHostToDevice) != cudaSuccess)
414
+ {
415
+ status = CURAND_STATUS_INITIALIZATION_FAILED;
416
+ } else
417
+ if (cudaMemcpy( p->temper_tbl,
418
+ h_temper_tbl, size2, cudaMemcpyHostToDevice) != cudaSuccess)
419
+ {
420
+ status = CURAND_STATUS_INITIALIZATION_FAILED;
421
+ } else
422
+ if (cudaMemcpy( p->single_temper_tbl,
423
+ h_single_temper_tbl, size2, cudaMemcpyHostToDevice) != cudaSuccess)
424
+ {
425
+ status = CURAND_STATUS_INITIALIZATION_FAILED;
426
+ } else
427
+ if (cudaMemcpy( p->mask,
428
+ h_mask, sizeof(unsigned int), cudaMemcpyHostToDevice) != cudaSuccess)
429
+ {
430
+ status = CURAND_STATUS_INITIALIZATION_FAILED;
431
+ }
432
+ }
433
+ if (h_pos_tbl != NULL) free(h_pos_tbl);
434
+ if (h_sh1_tbl != NULL) free(h_sh1_tbl);
435
+ if (h_sh2_tbl != NULL) free(h_sh2_tbl);
436
+ if (h_param_tbl != NULL) free(h_param_tbl);
437
+ if (h_temper_tbl != NULL) free(h_temper_tbl);
438
+ if (h_single_temper_tbl != NULL)free(h_single_temper_tbl);
439
+ if (h_mask != NULL) free(h_mask);
440
+ return status;
441
+ }
442
+
443
+ /**
444
+ * \brief Set up constant parameters for the mtgp32 generator
445
+ *
446
+ * This host-side helper function re-organizes CURAND_NUM_MTGP32_PARAMS sets of
447
+ * generator parameters for use by kernel functions and copies the
448
+ * result to the specified location in device memory.
449
+ *
450
+ * \param params - Pointer to an array of type mtgp32_params_fast_t in host memory
451
+ * \param p - pointer to a structure of type mtgp32_kernel_params_t in device memory.
452
+ *
453
+ * \return
454
+ * - CURAND_STATUS_ALLOCATION_FAILED if host memory could not be allocated
455
+ * - CURAND_STATUS_INITIALIZATION_FAILED if the copy to device memory failed
456
+ * - CURAND_STATUS_SUCCESS otherwise
457
+ */
458
+ static __forceinline__ __host__
459
+ curandStatus_t curandMakeMTGP32Constants(const mtgp32_params_fast_t params[], mtgp32_kernel_params_t * p)
460
+ {
461
+ return curandMakeMTGP32ConstantsImpl(params, p, CURAND_NUM_MTGP32_PARAMS);
462
+ }
463
+
464
+ /**
465
+ * \brief Set up initial states for the mtgp32 generator
466
+ *
467
+ * This host-side helper function initializes a number of states (one parameter set per state) for
468
+ * an mtgp32 generator. To accomplish this it allocates a state array in host memory,
469
+ * initializes that array, and copies the result to device memory.
470
+ *
471
+ * \param s - pointer to an array of states in device memory
472
+ * \param params - Pointer to an array of type mtgp32_params_fast_t in host memory
473
+ * \param k - pointer to a structure of type mtgp32_kernel_params_t in device memory
474
+ * \param n - number of parameter sets/states to initialize
475
+ * \param seed - seed value
476
+ *
477
+ * \return
478
+ * - CURAND_STATUS_ALLOCATION_FAILED if host memory state could not be allocated
479
+ * - CURAND_STATUS_INITIALIZATION_FAILED if the copy to device memory failed
480
+ * - CURAND_STATUS_SUCCESS otherwise
481
+ */
482
+ static __forceinline__ __host__
483
+ curandStatus_t CURANDAPI curandMakeMTGP32KernelState(curandStateMtgp32_t *s,
484
+ mtgp32_params_fast_t params[],
485
+ mtgp32_kernel_params_t *k,
486
+ int n,
487
+ unsigned long long seed)
488
+ {
489
+ int i;
490
+ curandStatus_t status = CURAND_STATUS_SUCCESS;
491
+ curandStateMtgp32_t *h_status =(curandStateMtgp32_t *) malloc(sizeof(curandStateMtgp32_t) * n);
492
+ if (h_status == NULL) {
493
+ status = CURAND_STATUS_ALLOCATION_FAILED;
494
+ } else {
495
+ seed = seed ^ (seed >> 32);
496
+ for (i = 0; i < n; i++) {
497
+ mtgp32_init_state(&(h_status[i].s[0]), &params[i],(unsigned int)seed + i + 1);
498
+ h_status[i].offset = 0;
499
+ h_status[i].pIdx = i;
500
+ h_status[i].k = k;
501
+ }
502
+ if (cudaMemcpy(s, h_status,
503
+ sizeof(curandStateMtgp32_t) * n,
504
+ cudaMemcpyHostToDevice) != cudaSuccess) {
505
+ status = CURAND_STATUS_INITIALIZATION_FAILED;
506
+ }
507
+ }
508
+ free(h_status);
509
+ return status;
510
+ }
511
+
512
+ /** @} */
513
+
514
+ #endif
515
+
516
+
llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32_kernel.h ADDED
@@ -0,0 +1,386 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * curand_mtgp32_kernel.h
52
+ *
53
+ *
54
+ * MTGP32-11213
55
+ *
56
+ * Mersenne Twister RNG for the GPU
57
+ *
58
+ * The period of generated integers is 2<sup>11213</sup>-1.
59
+ *
60
+ * This code generates 32-bit unsigned integers, and
61
+ * single precision floating point numbers uniformly distributed
62
+ * in the range [1, 2). (float r; 1.0 <= r < 2.0)
63
+ */
64
+
65
+ /*
66
+ * Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
67
+ * University. All rights reserved.
68
+ * Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
69
+ * University and University of Tokyo. All rights reserved.
70
+ *
71
+ * Redistribution and use in source and binary forms, with or without
72
+ * modification, are permitted provided that the following conditions are
73
+ * met:
74
+ *
75
+ * * Redistributions of source code must retain the above copyright
76
+ * notice, this list of conditions and the following disclaimer.
77
+ * * Redistributions in binary form must reproduce the above
78
+ * copyright notice, this list of conditions and the following
79
+ * disclaimer in the documentation and/or other materials provided
80
+ * with the distribution.
81
+ * * Neither the name of the Hiroshima University nor the names of
82
+ * its contributors may be used to endorse or promote products
83
+ * derived from this software without specific prior written
84
+ * permission.
85
+ *
86
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
87
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
88
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
89
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
90
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
91
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
92
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
93
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
94
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
95
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
96
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
97
+ */
98
+ #if !defined CURAND_MTGP32_KERNEL_H
99
+ #define CURAND_MTGP32_KERNEL_H
100
+
101
+ #if !defined(QUALIFIERS)
102
+ #define QUALIFIERS static __forceinline__ __device__
103
+ #endif
104
+
105
+ #ifndef __CUDACC_RTC__
106
+ #include <cuda_runtime.h>
107
+ #include <stdlib.h>
108
+ #include <memory.h>
109
+ #include <string.h>
110
+ #endif // ifndef __CUDACC_RTC__
111
+ #include <nv/target>
112
+ #include "curand.h"
113
+ #include "curand_mtgp32.h"
114
+
115
+ /**
116
+ * \addtogroup DEVICE Device API
117
+ *
118
+ * @{
119
+ */
120
+
121
+ #ifndef __CUDA_ARCH__
122
+ // define blockDim and threadIdx for host compatibility call
123
+ extern const dim3 blockDim;
124
+ extern const uint3 threadIdx;
125
+ #endif
126
+
127
+
128
+ /*
129
+ * The function of the recursion formula calculation.
130
+ *
131
+ * @param[in] X1 the farthest part of state array.
132
+ * @param[in] X2 the second farthest part of state array.
133
+ * @param[in] Y a part of state array.
134
+ * @param[in] bid block id.
135
+ * @return output
136
+ */
137
+ QUALIFIERS unsigned int para_rec(mtgp32_kernel_params_t * k,unsigned int X1, unsigned int X2, unsigned int Y, int bid) {
138
+ unsigned int X = (X1 & k->mask[0]) ^ X2;
139
+ unsigned int MAT;
140
+
141
+ X ^= X << k->sh1_tbl[bid];
142
+ Y = X ^ (Y >> k->sh2_tbl[bid]);
143
+ MAT = k->param_tbl[bid][Y & 0x0f];
144
+ return Y ^ MAT;
145
+ }
146
+
147
+ /*
148
+ * The tempering function.
149
+ *
150
+ * @param[in] V the output value should be tempered.
151
+ * @param[in] T the tempering helper value.
152
+ * @param[in] bid block id.
153
+ * @return the tempered value.
154
+ */
155
+ QUALIFIERS unsigned int temper(mtgp32_kernel_params_t * k,unsigned int V, unsigned int T, int bid) {
156
+ unsigned int MAT;
157
+
158
+ T ^= T >> 16;
159
+ T ^= T >> 8;
160
+ MAT = k->temper_tbl[bid][T & 0x0f];
161
+ return V ^ MAT;
162
+ }
163
+
164
+ /*
165
+ * The tempering and converting function.
166
+ * By using the preset table, converting to IEEE format
167
+ * and tempering are done simultaneously.
168
+ *
169
+ * @param[in] V the output value should be tempered.
170
+ * @param[in] T the tempering helper value.
171
+ * @param[in] bid block id.
172
+ * @return the tempered and converted value.
173
+ */
174
+ QUALIFIERS unsigned int temper_single(mtgp32_kernel_params_t * k,unsigned int V, unsigned int T, int bid) {
175
+ unsigned int MAT;
176
+ unsigned int r;
177
+
178
+ T ^= T >> 16;
179
+ T ^= T >> 8;
180
+ MAT = k->single_temper_tbl[bid][T & 0x0f];
181
+ r = (V >> 9) ^ MAT;
182
+ return r;
183
+ }
184
+
185
+ /**
186
+ * \brief Return 32-bits of pseudorandomness from a mtgp32 generator.
187
+ *
188
+ * Return 32-bits of pseudorandomness from the mtgp32 generator in \p state,
189
+ * increment position of generator by the number of threads in the block.
190
+ * Note the number of threads in the block can not exceed 256.
191
+ *
192
+ * \param state - Pointer to state to update
193
+ *
194
+ * \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use.
195
+ */
196
+ QUALIFIERS unsigned int curand(curandStateMtgp32_t *state)
197
+ {
198
+ unsigned int t;
199
+ unsigned int d;
200
+ int pos = state->k->pos_tbl[state->pIdx];
201
+ unsigned int r;
202
+ unsigned int o;
203
+
204
+ d = blockDim.z * blockDim.y * blockDim.x;
205
+ //assert( d <= 256 );
206
+ t = (blockDim.z * blockDim.y * threadIdx.z) + (blockDim.x * threadIdx.y) + threadIdx.x;
207
+ r = para_rec(state->k, state->s[(t + state->offset) & MTGP32_STATE_MASK],
208
+ state->s[(t + state->offset + 1) & MTGP32_STATE_MASK],
209
+ state->s[(t + state->offset + pos) & MTGP32_STATE_MASK],
210
+ state->pIdx);
211
+
212
+ state->s[(t + state->offset + MTGPDC_N) & MTGP32_STATE_MASK] = r;
213
+ o = temper(state->k, r,
214
+ state->s[(t + state->offset + pos -1) & MTGP32_STATE_MASK],
215
+ state->pIdx);
216
+ NV_IF_TARGET(NV_IS_DEVICE,
217
+ __syncthreads();
218
+ )
219
+ if (t == 0)
220
+ {
221
+ state->offset = (state->offset + d) & MTGP32_STATE_MASK;
222
+ }
223
+ NV_IF_TARGET(NV_IS_DEVICE,
224
+ __syncthreads();
225
+ )
226
+ return o;
227
+
228
+ }
229
+ /**
230
+ * \brief Return 32-bits of pseudorandomness from a specific position in a mtgp32 generator.
231
+ *
232
+ * Return 32-bits of pseudorandomness from position \p index of the mtgp32 generator in \p state,
233
+ * increment position of generator by \p n positions, which must be the total number of positions
234
+ * upddated in the state by the thread block, for this invocation.
235
+ *
236
+ * Note :
237
+ * Thread indices must range from 0...\ n - 1.
238
+ * The number of positions updated may not exceed 256.
239
+ * A thread block may update more than one state, but a given state may not be updated by more than one thread block.
240
+ *
241
+ * \param state - Pointer to state to update
242
+ * \param index - Index (0..255) of the position within the state to draw from and update
243
+ * \param n - The total number of postions in this state that are being updated by this invocation
244
+ *
245
+ * \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use.
246
+ */
247
+ QUALIFIERS unsigned int curand_mtgp32_specific(curandStateMtgp32_t *state, unsigned char index, unsigned char n)
248
+ {
249
+ unsigned int t;
250
+ int pos = state->k->pos_tbl[state->pIdx];
251
+ unsigned int r;
252
+ unsigned int o;
253
+
254
+ t = index;
255
+ r = para_rec(state->k, state->s[(t + state->offset) & MTGP32_STATE_MASK],
256
+ state->s[(t + state->offset + 1) & MTGP32_STATE_MASK],
257
+ state->s[(t + state->offset + pos) & MTGP32_STATE_MASK],
258
+ state->pIdx);
259
+
260
+ state->s[(t + state->offset + MTGPDC_N) & MTGP32_STATE_MASK] = r;
261
+ o = temper(state->k, r,
262
+ state->s[(t + state->offset + pos -1) & MTGP32_STATE_MASK],
263
+ state->pIdx);
264
+ NV_IF_TARGET(NV_IS_DEVICE,
265
+ __syncthreads();
266
+ )
267
+ if (index == 0)
268
+ {
269
+ state->offset = (state->offset + n) & MTGP32_STATE_MASK;
270
+ }
271
+ NV_IF_TARGET(NV_IS_DEVICE,
272
+ __syncthreads();
273
+ )
274
+ return o;
275
+ }
276
+ /**
277
+ * \brief Return a uniformly distributed float from a mtgp32 generator.
278
+ *
279
+ * Return a uniformly distributed float between \p 0.0f and \p 1.0f
280
+ * from the mtgp32 generator in \p state, increment position of generator.
281
+ * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
282
+ * point outputs are never returned.
283
+ *
284
+ * Note: This alternate derivation of a uniform float is provided for completeness
285
+ * with the original source
286
+ *
287
+ * \param state - Pointer to state to update
288
+ *
289
+ * \return uniformly distributed float between \p 0.0f and \p 1.0f
290
+ */
291
+ QUALIFIERS float curand_mtgp32_single(curandStateMtgp32_t *state)
292
+ {
293
+ unsigned int t;
294
+ unsigned int d;
295
+ int pos = state->k->pos_tbl[state->pIdx];
296
+ unsigned int r;
297
+ unsigned int o_u;
298
+ float o_f;
299
+
300
+
301
+ t = blockDim.z * blockDim.y;
302
+ d = t * blockDim.x;
303
+ //assert( d <= 256 );
304
+ t += threadIdx.x;
305
+ r = para_rec(state->k, state->s[(t + state->offset) & MTGP32_STATE_MASK],
306
+ state->s[(t + state->offset + 1) & MTGP32_STATE_MASK],
307
+ state->s[(t + state->offset + pos) & MTGP32_STATE_MASK],
308
+ state->pIdx);
309
+
310
+ state->s[t] = r;
311
+ o_u = temper_single(state->k, r,
312
+ state->s[(t + state->offset + pos -1) & MTGP32_STATE_MASK],
313
+ state->pIdx);
314
+ NV_IF_TARGET(NV_IS_DEVICE,
315
+ __syncthreads();
316
+ )
317
+ if (threadIdx.x == 0)
318
+ {
319
+ state->offset = (state->offset + d) & MTGP32_STATE_MASK;
320
+ }
321
+ NV_IF_TARGET(NV_IS_DEVICE,
322
+ __syncthreads();
323
+ )
324
+ memcpy(&o_f, &o_u, sizeof(o_u));
325
+ return o_f;
326
+ }
327
+
328
+ /**
329
+ * \brief Return a uniformly distributed float from a specific position in a mtgp32 generator.
330
+ *
331
+ * Return a uniformly distributed float between \p 0.0f and \p 1.0f
332
+ * from position \p index of the mtgp32 generator in \p state, and
333
+ * increment position of generator by \p n positions, which must be the total number of positions
334
+ * upddated in the state by the thread block, for this invocation.
335
+ * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
336
+ * point outputs are never returned.
337
+ *
338
+ * Note 1:
339
+ * Thread indices must range from 0...\p n - 1.
340
+ * The number of positions updated may not exceed 256.
341
+ * A thread block may update more than one state, but a given state may not be updated by more than one thread block.
342
+ *
343
+ * Note 2: This alternate derivation of a uniform float is provided for completeness
344
+ * with the original source
345
+ *
346
+ * \param state - Pointer to state to update
347
+ * \param index - Index (0..255) of the position within the state to draw from and update
348
+ * \param n - The total number of postions in this state that are being updated by this invocation
349
+ *
350
+ * \return uniformly distributed float between \p 0.0f and \p 1.0f
351
+ */
352
+ QUALIFIERS float curand_mtgp32_single_specific(curandStateMtgp32_t *state, unsigned char index, unsigned char n)
353
+ {
354
+ unsigned int t;
355
+ int pos = state->k->pos_tbl[state->pIdx];
356
+ unsigned int r;
357
+ unsigned int o_u;
358
+ float o_f;
359
+
360
+ t = index;
361
+ r = para_rec(state->k, state->s[(t + state->offset) & MTGP32_STATE_MASK],
362
+ state->s[(t + state->offset + 1) & MTGP32_STATE_MASK],
363
+ state->s[(t + state->offset + pos) & MTGP32_STATE_MASK],
364
+ state->pIdx);
365
+
366
+ state->s[t] = r;
367
+ o_u = temper_single(state->k, r,
368
+ state->s[(t + state->offset + pos -1) & MTGP32_STATE_MASK],
369
+ state->pIdx);
370
+ NV_IF_TARGET(NV_IS_DEVICE,
371
+ __syncthreads();
372
+ )
373
+ if (threadIdx.x == 0)
374
+ {
375
+ state->offset = (state->offset + n) & MTGP32_STATE_MASK;
376
+ }
377
+ NV_IF_TARGET(NV_IS_DEVICE,
378
+ __syncthreads();
379
+ )
380
+ memcpy(&o_f, &o_u, sizeof(o_u));
381
+ return o_f;
382
+ }
383
+
384
+ /** @} */
385
+
386
+ #endif