diff --git a/ckpts/universal/global_step80/zero/11.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/11.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..1fa7d07e671e1a639f310dc0915a653e84e9ec8e --- /dev/null +++ b/ckpts/universal/global_step80/zero/11.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61563ba8e0b5ecc0c60abf6de7fca13625ccdfb06c023bf6cc82be18813e84ea +size 33555627 diff --git a/ckpts/universal/global_step80/zero/11.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step80/zero/11.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..89a115168517800636e89fdb1d5863484f464d45 --- /dev/null +++ b/ckpts/universal/global_step80/zero/11.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c01a8ce004c1f19494c18155c0095b6724158c388db1c1b0c9e603aef4cab84b +size 33555533 diff --git a/ckpts/universal/global_step80/zero/18.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/18.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..d269d6a1b81cd05582b6b9ad13ec002c64fa6013 --- /dev/null +++ b/ckpts/universal/global_step80/zero/18.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb70643ff3c14b921689a4af1854a26de591d09389f6f038fc225599c7b0bd73 +size 50332843 diff --git a/ckpts/universal/global_step80/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..f37d10422d53f5ed220a650f7c0ca7c5389a9873 --- /dev/null +++ b/ckpts/universal/global_step80/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96168e5fdaa384db5d1d07fc220ffd2cb3e1e75825aaff40c7f93e36e3fb1c4e +size 33555612 diff --git a/ckpts/universal/global_step80/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..30368bc2538d781249fefc7ec2a7c5c6061b6f54 --- /dev/null +++ b/ckpts/universal/global_step80/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e700f0de5aaf5dccf3849fe7dc919146f726ca849542d38d0a3178d31ae5be7 +size 33555627 diff --git a/ckpts/universal/global_step80/zero/18.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step80/zero/18.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..3f1678c5e3cee0081dab62be935f787852eb351e --- /dev/null +++ b/ckpts/universal/global_step80/zero/18.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:325a36d1f2f17b68ec2848a8c22aaa5dff21c97edad5b59cb3213a60632e0f1c +size 33555533 diff --git a/ckpts/universal/global_step80/zero/24.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step80/zero/24.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..d14e855c2a3191f08c720c437d02913938ea7fdb --- /dev/null +++ b/ckpts/universal/global_step80/zero/24.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a11f8382a8ddde2efa7fc46bff2043dbee9b4a7a3c3a03a8076529faf38a7044 +size 33555533 diff --git a/ckpts/universal/global_step80/zero/5.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/5.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..2c39dbbc96d7be3bbf57f90c14214d8c57202a3c --- /dev/null +++ b/ckpts/universal/global_step80/zero/5.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2152e1d28c10c8f684386e3e8e6044c4ce9b629aa52385e10444d41a5743f8a0 +size 33555612 diff --git a/ckpts/universal/global_step80/zero/5.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/5.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..4198af653a4d8940ff6c6546f51d389fc97cace7 --- /dev/null +++ b/ckpts/universal/global_step80/zero/5.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2dcff6198ec0feb56dfb12336c56642d4958f81060f956588001452439dcfb56 +size 33555627 diff --git a/ckpts/universal/global_step80/zero/5.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step80/zero/5.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..89a6b27e324bf960e5663bf2b9fc1d381d34d9ad --- /dev/null +++ b/ckpts/universal/global_step80/zero/5.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2c77b2af1d15cd7a10c3d5a60652075756ef8bae8f49bcf8f29ad94de51797f +size 33555533 diff --git a/ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..7a07b2d738ade5472da7fae207aeaadb792d6653 --- /dev/null +++ b/ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e48a024c817f04d24f3b92e5554d684e1f9b9c14ef9da38bbd941d83f330bdc +size 33555612 diff --git a/ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..1557bc92ab66fdf6f63f6e1b1de76ae37fc279a2 --- /dev/null +++ b/ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fef3a709d477f61ff776b305a84314b582a7b1e74d5f63e96a0065fde574ba5 +size 33555627 diff --git a/ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..cc0a23b3ad23b7f1d986ff20a550fd74129ccd57 --- /dev/null +++ b/ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f95c7614eeede8576548db2a6a9df5d3f770c027c118369c635a83493f28ee4 +size 33555533 diff --git a/ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..f2d3762444538b657ace0ce9fd8f9742c72699a9 --- /dev/null +++ b/ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca41078f19aca92cf4487922d8c477a3e4224e860143a72ca8c4407c7333db2e +size 33555627 diff --git a/ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..9c2a74684faf25176305e84a95cf5010bab11b4f --- /dev/null +++ b/ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6a7611ba7695d36ade228f5458db2ab1be0060784a72d85d24b0f99f7418ae1 +size 33555533 diff --git a/venv/lib/python3.10/site-packages/markupsafe/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/markupsafe/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bcdc041d5514bfc93384c65370fbd32fa010aaef Binary files /dev/null and b/venv/lib/python3.10/site-packages/markupsafe/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/markupsafe/__pycache__/_native.cpython-310.pyc b/venv/lib/python3.10/site-packages/markupsafe/__pycache__/_native.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a9372864ec215686b6dc171b8e7646c8601b8e8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/markupsafe/__pycache__/_native.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nvidia/__init__.py b/venv/lib/python3.10/site-packages/nvidia/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/nvidia/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nvidia/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6ff5f4aea8e016234c33b53f8cc9df8855517a0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nvidia/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nvidia/cublas/__init__.py b/venv/lib/python3.10/site-packages/nvidia/cublas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/nvidia/cublas/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nvidia/cublas/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc9648605ca030fc3fcc3164ef39e129dc06b4bb Binary files /dev/null and b/venv/lib/python3.10/site-packages/nvidia/cublas/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nvidia/cublas/include/__init__.py b/venv/lib/python3.10/site-packages/nvidia/cublas/include/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/nvidia/cublas/include/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nvidia/cublas/include/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56328d3e6576c0716aa66091abb7704ef26d04b9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nvidia/cublas/include/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nvidia/cublas/include/cublas.h b/venv/lib/python3.10/site-packages/nvidia/cublas/include/cublas.h new file mode 100644 index 0000000000000000000000000000000000000000..96eadad8a8e8c3979b99910ceea41ceaf2c8b58e --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cublas/include/cublas.h @@ -0,0 +1,891 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * This is the public header file for the CUBLAS library, defining the API + * + * CUBLAS is an implementation of BLAS (Basic Linear Algebra Subroutines) + * on top of the CUDA runtime. + */ + +#if !defined(CUBLAS_H_) +#define CUBLAS_H_ + +#if defined(CUBLAS_V2_H_) +#error "It is an error to include both cublas.h and cublas_v2.h" +#endif + +#include + +#ifndef CUBLASWINAPI +#ifdef _WIN32 +#define CUBLASWINAPI __stdcall +#else +#define CUBLASWINAPI +#endif +#endif + +#undef CUBLASAPI +#ifdef __CUDACC__ +#define CUBLASAPI __host__ +#else +#define CUBLASAPI +#endif + +#include "cublas_api.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +/* CUBLAS data types */ +#define cublasStatus cublasStatus_t + +cublasStatus CUBLASWINAPI cublasInit(void); +cublasStatus CUBLASWINAPI cublasShutdown(void); +cublasStatus CUBLASWINAPI cublasGetError(void); + +cublasStatus CUBLASWINAPI cublasGetVersion(int* version); +cublasStatus CUBLASWINAPI cublasAlloc(int n, int elemSize, void** devicePtr); + +cublasStatus CUBLASWINAPI cublasFree(void* devicePtr); + +cublasStatus CUBLASWINAPI cublasSetKernelStream(cudaStream_t stream); + +/* ---------------- CUBLAS BLAS1 functions ---------------- */ +/* NRM2 */ +float CUBLASWINAPI cublasSnrm2(int n, const float* x, int incx); +double CUBLASWINAPI cublasDnrm2(int n, const double* x, int incx); +float CUBLASWINAPI cublasScnrm2(int n, const cuComplex* x, int incx); +double CUBLASWINAPI cublasDznrm2(int n, const cuDoubleComplex* x, int incx); +/*------------------------------------------------------------------------*/ +/* DOT */ +float CUBLASWINAPI cublasSdot(int n, const float* x, int incx, const float* y, int incy); +double CUBLASWINAPI cublasDdot(int n, const double* x, int incx, const double* y, int incy); +cuComplex CUBLASWINAPI cublasCdotu(int n, const cuComplex* x, int incx, const cuComplex* y, int incy); +cuComplex CUBLASWINAPI cublasCdotc(int n, const cuComplex* x, int incx, const cuComplex* y, int incy); +cuDoubleComplex CUBLASWINAPI cublasZdotu(int n, const cuDoubleComplex* x, int incx, const cuDoubleComplex* y, int incy); +cuDoubleComplex CUBLASWINAPI cublasZdotc(int n, const cuDoubleComplex* x, int incx, const cuDoubleComplex* y, int incy); +/*------------------------------------------------------------------------*/ +/* SCAL */ +void CUBLASWINAPI cublasSscal(int n, float alpha, float* x, int incx); +void CUBLASWINAPI cublasDscal(int n, double alpha, double* x, int incx); +void CUBLASWINAPI cublasCscal(int n, cuComplex alpha, cuComplex* x, int incx); +void CUBLASWINAPI cublasZscal(int n, cuDoubleComplex alpha, cuDoubleComplex* x, int incx); + +void CUBLASWINAPI cublasCsscal(int n, float alpha, cuComplex* x, int incx); +void CUBLASWINAPI cublasZdscal(int n, double alpha, cuDoubleComplex* x, int incx); +/*------------------------------------------------------------------------*/ +/* AXPY */ +void CUBLASWINAPI cublasSaxpy(int n, float alpha, const float* x, int incx, float* y, int incy); +void CUBLASWINAPI cublasDaxpy(int n, double alpha, const double* x, int incx, double* y, int incy); +void CUBLASWINAPI cublasCaxpy(int n, cuComplex alpha, const cuComplex* x, int incx, cuComplex* y, int incy); +void CUBLASWINAPI +cublasZaxpy(int n, cuDoubleComplex alpha, const cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy); +/*------------------------------------------------------------------------*/ +/* COPY */ +void CUBLASWINAPI cublasScopy(int n, const float* x, int incx, float* y, int incy); +void CUBLASWINAPI cublasDcopy(int n, const double* x, int incx, double* y, int incy); +void CUBLASWINAPI cublasCcopy(int n, const cuComplex* x, int incx, cuComplex* y, int incy); +void CUBLASWINAPI cublasZcopy(int n, const cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy); +/*------------------------------------------------------------------------*/ +/* SWAP */ +void CUBLASWINAPI cublasSswap(int n, float* x, int incx, float* y, int incy); +void CUBLASWINAPI cublasDswap(int n, double* x, int incx, double* y, int incy); +void CUBLASWINAPI cublasCswap(int n, cuComplex* x, int incx, cuComplex* y, int incy); +void CUBLASWINAPI cublasZswap(int n, cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy); +/*------------------------------------------------------------------------*/ +/* AMAX */ +int CUBLASWINAPI cublasIsamax(int n, const float* x, int incx); +int CUBLASWINAPI cublasIdamax(int n, const double* x, int incx); +int CUBLASWINAPI cublasIcamax(int n, const cuComplex* x, int incx); +int CUBLASWINAPI cublasIzamax(int n, const cuDoubleComplex* x, int incx); +/*------------------------------------------------------------------------*/ +/* AMIN */ +int CUBLASWINAPI cublasIsamin(int n, const float* x, int incx); +int CUBLASWINAPI cublasIdamin(int n, const double* x, int incx); + +int CUBLASWINAPI cublasIcamin(int n, const cuComplex* x, int incx); +int CUBLASWINAPI cublasIzamin(int n, const cuDoubleComplex* x, int incx); +/*------------------------------------------------------------------------*/ +/* ASUM */ +float CUBLASWINAPI cublasSasum(int n, const float* x, int incx); +double CUBLASWINAPI cublasDasum(int n, const double* x, int incx); +float CUBLASWINAPI cublasScasum(int n, const cuComplex* x, int incx); +double CUBLASWINAPI cublasDzasum(int n, const cuDoubleComplex* x, int incx); +/*------------------------------------------------------------------------*/ +/* ROT */ +void CUBLASWINAPI cublasSrot(int n, float* x, int incx, float* y, int incy, float sc, float ss); +void CUBLASWINAPI cublasDrot(int n, double* x, int incx, double* y, int incy, double sc, double ss); +void CUBLASWINAPI cublasCrot(int n, cuComplex* x, int incx, cuComplex* y, int incy, float c, cuComplex s); +void CUBLASWINAPI +cublasZrot(int n, cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy, double sc, cuDoubleComplex cs); +void CUBLASWINAPI cublasCsrot(int n, cuComplex* x, int incx, cuComplex* y, int incy, float c, float s); +void CUBLASWINAPI cublasZdrot(int n, cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy, double c, double s); +/*------------------------------------------------------------------------*/ +/* ROTG */ +void CUBLASWINAPI cublasSrotg(float* sa, float* sb, float* sc, float* ss); +void CUBLASWINAPI cublasDrotg(double* sa, double* sb, double* sc, double* ss); +void CUBLASWINAPI cublasCrotg(cuComplex* ca, cuComplex cb, float* sc, cuComplex* cs); +void CUBLASWINAPI cublasZrotg(cuDoubleComplex* ca, cuDoubleComplex cb, double* sc, cuDoubleComplex* cs); +/*------------------------------------------------------------------------*/ +/* ROTM */ +void CUBLASWINAPI cublasSrotm(int n, float* x, int incx, float* y, int incy, const float* sparam); +void CUBLASWINAPI cublasDrotm(int n, double* x, int incx, double* y, int incy, const double* sparam); +/*------------------------------------------------------------------------*/ +/* ROTMG */ +void CUBLASWINAPI cublasSrotmg(float* sd1, float* sd2, float* sx1, const float* sy1, float* sparam); +void CUBLASWINAPI cublasDrotmg(double* sd1, double* sd2, double* sx1, const double* sy1, double* sparam); + +/* --------------- CUBLAS BLAS2 functions ---------------- */ +/* GEMV */ +void CUBLASWINAPI cublasSgemv(char trans, + int m, + int n, + float alpha, + const float* A, + int lda, + const float* x, + int incx, + float beta, + float* y, + int incy); +void CUBLASWINAPI cublasDgemv(char trans, + int m, + int n, + double alpha, + const double* A, + int lda, + const double* x, + int incx, + double beta, + double* y, + int incy); +void CUBLASWINAPI cublasCgemv(char trans, + int m, + int n, + cuComplex alpha, + const cuComplex* A, + int lda, + const cuComplex* x, + int incx, + cuComplex beta, + cuComplex* y, + int incy); +void CUBLASWINAPI cublasZgemv(char trans, + int m, + int n, + cuDoubleComplex alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* x, + int incx, + cuDoubleComplex beta, + cuDoubleComplex* y, + int incy); +/*------------------------------------------------------------------------*/ +/* GBMV */ +void CUBLASWINAPI cublasSgbmv(char trans, + int m, + int n, + int kl, + int ku, + float alpha, + const float* A, + int lda, + const float* x, + int incx, + float beta, + float* y, + int incy); +void CUBLASWINAPI cublasDgbmv(char trans, + int m, + int n, + int kl, + int ku, + double alpha, + const double* A, + int lda, + const double* x, + int incx, + double beta, + double* y, + int incy); +void CUBLASWINAPI cublasCgbmv(char trans, + int m, + int n, + int kl, + int ku, + cuComplex alpha, + const cuComplex* A, + int lda, + const cuComplex* x, + int incx, + cuComplex beta, + cuComplex* y, + int incy); +void CUBLASWINAPI cublasZgbmv(char trans, + int m, + int n, + int kl, + int ku, + cuDoubleComplex alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* x, + int incx, + cuDoubleComplex beta, + cuDoubleComplex* y, + int incy); +/*------------------------------------------------------------------------*/ +/* TRMV */ +void CUBLASWINAPI cublasStrmv(char uplo, char trans, char diag, int n, const float* A, int lda, float* x, int incx); +void CUBLASWINAPI cublasDtrmv(char uplo, char trans, char diag, int n, const double* A, int lda, double* x, int incx); +void CUBLASWINAPI +cublasCtrmv(char uplo, char trans, char diag, int n, const cuComplex* A, int lda, cuComplex* x, int incx); +void CUBLASWINAPI +cublasZtrmv(char uplo, char trans, char diag, int n, const cuDoubleComplex* A, int lda, cuDoubleComplex* x, int incx); +/*------------------------------------------------------------------------*/ +/* TBMV */ +void CUBLASWINAPI +cublasStbmv(char uplo, char trans, char diag, int n, int k, const float* A, int lda, float* x, int incx); +void CUBLASWINAPI +cublasDtbmv(char uplo, char trans, char diag, int n, int k, const double* A, int lda, double* x, int incx); +void CUBLASWINAPI +cublasCtbmv(char uplo, char trans, char diag, int n, int k, const cuComplex* A, int lda, cuComplex* x, int incx); +void CUBLASWINAPI cublasZtbmv( + char uplo, char trans, char diag, int n, int k, const cuDoubleComplex* A, int lda, cuDoubleComplex* x, int incx); +/*------------------------------------------------------------------------*/ +/* TPMV */ +void CUBLASWINAPI cublasStpmv(char uplo, char trans, char diag, int n, const float* AP, float* x, int incx); + +void CUBLASWINAPI cublasDtpmv(char uplo, char trans, char diag, int n, const double* AP, double* x, int incx); + +void CUBLASWINAPI cublasCtpmv(char uplo, char trans, char diag, int n, const cuComplex* AP, cuComplex* x, int incx); + +void CUBLASWINAPI +cublasZtpmv(char uplo, char trans, char diag, int n, const cuDoubleComplex* AP, cuDoubleComplex* x, int incx); +/*------------------------------------------------------------------------*/ +/* TRSV */ +void CUBLASWINAPI cublasStrsv(char uplo, char trans, char diag, int n, const float* A, int lda, float* x, int incx); + +void CUBLASWINAPI cublasDtrsv(char uplo, char trans, char diag, int n, const double* A, int lda, double* x, int incx); + +void CUBLASWINAPI +cublasCtrsv(char uplo, char trans, char diag, int n, const cuComplex* A, int lda, cuComplex* x, int incx); + +void CUBLASWINAPI +cublasZtrsv(char uplo, char trans, char diag, int n, const cuDoubleComplex* A, int lda, cuDoubleComplex* x, int incx); +/*------------------------------------------------------------------------*/ +/* TPSV */ +void CUBLASWINAPI cublasStpsv(char uplo, char trans, char diag, int n, const float* AP, float* x, int incx); + +void CUBLASWINAPI cublasDtpsv(char uplo, char trans, char diag, int n, const double* AP, double* x, int incx); + +void CUBLASWINAPI cublasCtpsv(char uplo, char trans, char diag, int n, const cuComplex* AP, cuComplex* x, int incx); + +void CUBLASWINAPI +cublasZtpsv(char uplo, char trans, char diag, int n, const cuDoubleComplex* AP, cuDoubleComplex* x, int incx); +/*------------------------------------------------------------------------*/ +/* TBSV */ +void CUBLASWINAPI +cublasStbsv(char uplo, char trans, char diag, int n, int k, const float* A, int lda, float* x, int incx); + +void CUBLASWINAPI +cublasDtbsv(char uplo, char trans, char diag, int n, int k, const double* A, int lda, double* x, int incx); +void CUBLASWINAPI +cublasCtbsv(char uplo, char trans, char diag, int n, int k, const cuComplex* A, int lda, cuComplex* x, int incx); + +void CUBLASWINAPI cublasZtbsv( + char uplo, char trans, char diag, int n, int k, const cuDoubleComplex* A, int lda, cuDoubleComplex* x, int incx); +/*------------------------------------------------------------------------*/ +/* SYMV/HEMV */ +void CUBLASWINAPI cublasSsymv( + char uplo, int n, float alpha, const float* A, int lda, const float* x, int incx, float beta, float* y, int incy); +void CUBLASWINAPI cublasDsymv(char uplo, + int n, + double alpha, + const double* A, + int lda, + const double* x, + int incx, + double beta, + double* y, + int incy); +void CUBLASWINAPI cublasChemv(char uplo, + int n, + cuComplex alpha, + const cuComplex* A, + int lda, + const cuComplex* x, + int incx, + cuComplex beta, + cuComplex* y, + int incy); +void CUBLASWINAPI cublasZhemv(char uplo, + int n, + cuDoubleComplex alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* x, + int incx, + cuDoubleComplex beta, + cuDoubleComplex* y, + int incy); +/*------------------------------------------------------------------------*/ +/* SBMV/HBMV */ +void CUBLASWINAPI cublasSsbmv(char uplo, + int n, + int k, + float alpha, + const float* A, + int lda, + const float* x, + int incx, + float beta, + float* y, + int incy); +void CUBLASWINAPI cublasDsbmv(char uplo, + int n, + int k, + double alpha, + const double* A, + int lda, + const double* x, + int incx, + double beta, + double* y, + int incy); +void CUBLASWINAPI cublasChbmv(char uplo, + int n, + int k, + cuComplex alpha, + const cuComplex* A, + int lda, + const cuComplex* x, + int incx, + cuComplex beta, + cuComplex* y, + int incy); +void CUBLASWINAPI cublasZhbmv(char uplo, + int n, + int k, + cuDoubleComplex alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* x, + int incx, + cuDoubleComplex beta, + cuDoubleComplex* y, + int incy); +/*------------------------------------------------------------------------*/ +/* SPMV/HPMV */ +void CUBLASWINAPI +cublasSspmv(char uplo, int n, float alpha, const float* AP, const float* x, int incx, float beta, float* y, int incy); +void CUBLASWINAPI cublasDspmv( + char uplo, int n, double alpha, const double* AP, const double* x, int incx, double beta, double* y, int incy); +void CUBLASWINAPI cublasChpmv(char uplo, + int n, + cuComplex alpha, + const cuComplex* AP, + const cuComplex* x, + int incx, + cuComplex beta, + cuComplex* y, + int incy); +void CUBLASWINAPI cublasZhpmv(char uplo, + int n, + cuDoubleComplex alpha, + const cuDoubleComplex* AP, + const cuDoubleComplex* x, + int incx, + cuDoubleComplex beta, + cuDoubleComplex* y, + int incy); + +/*------------------------------------------------------------------------*/ +/* GER */ +void CUBLASWINAPI +cublasSger(int m, int n, float alpha, const float* x, int incx, const float* y, int incy, float* A, int lda); +void CUBLASWINAPI +cublasDger(int m, int n, double alpha, const double* x, int incx, const double* y, int incy, double* A, int lda); + +void CUBLASWINAPI cublasCgeru( + int m, int n, cuComplex alpha, const cuComplex* x, int incx, const cuComplex* y, int incy, cuComplex* A, int lda); +void CUBLASWINAPI cublasCgerc( + int m, int n, cuComplex alpha, const cuComplex* x, int incx, const cuComplex* y, int incy, cuComplex* A, int lda); +void CUBLASWINAPI cublasZgeru(int m, + int n, + cuDoubleComplex alpha, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* y, + int incy, + cuDoubleComplex* A, + int lda); +void CUBLASWINAPI cublasZgerc(int m, + int n, + cuDoubleComplex alpha, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* y, + int incy, + cuDoubleComplex* A, + int lda); +/*------------------------------------------------------------------------*/ +/* SYR/HER */ +void CUBLASWINAPI cublasSsyr(char uplo, int n, float alpha, const float* x, int incx, float* A, int lda); +void CUBLASWINAPI cublasDsyr(char uplo, int n, double alpha, const double* x, int incx, double* A, int lda); + +void CUBLASWINAPI cublasCher(char uplo, int n, float alpha, const cuComplex* x, int incx, cuComplex* A, int lda); +void CUBLASWINAPI +cublasZher(char uplo, int n, double alpha, const cuDoubleComplex* x, int incx, cuDoubleComplex* A, int lda); + +/*------------------------------------------------------------------------*/ +/* SPR/HPR */ +void CUBLASWINAPI cublasSspr(char uplo, int n, float alpha, const float* x, int incx, float* AP); +void CUBLASWINAPI cublasDspr(char uplo, int n, double alpha, const double* x, int incx, double* AP); +void CUBLASWINAPI cublasChpr(char uplo, int n, float alpha, const cuComplex* x, int incx, cuComplex* AP); +void CUBLASWINAPI cublasZhpr(char uplo, int n, double alpha, const cuDoubleComplex* x, int incx, cuDoubleComplex* AP); +/*------------------------------------------------------------------------*/ +/* SYR2/HER2 */ +void CUBLASWINAPI +cublasSsyr2(char uplo, int n, float alpha, const float* x, int incx, const float* y, int incy, float* A, int lda); +void CUBLASWINAPI +cublasDsyr2(char uplo, int n, double alpha, const double* x, int incx, const double* y, int incy, double* A, int lda); +void CUBLASWINAPI cublasCher2(char uplo, + int n, + cuComplex alpha, + const cuComplex* x, + int incx, + const cuComplex* y, + int incy, + cuComplex* A, + int lda); +void CUBLASWINAPI cublasZher2(char uplo, + int n, + cuDoubleComplex alpha, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* y, + int incy, + cuDoubleComplex* A, + int lda); + +/*------------------------------------------------------------------------*/ +/* SPR2/HPR2 */ +void CUBLASWINAPI +cublasSspr2(char uplo, int n, float alpha, const float* x, int incx, const float* y, int incy, float* AP); +void CUBLASWINAPI +cublasDspr2(char uplo, int n, double alpha, const double* x, int incx, const double* y, int incy, double* AP); +void CUBLASWINAPI cublasChpr2( + char uplo, int n, cuComplex alpha, const cuComplex* x, int incx, const cuComplex* y, int incy, cuComplex* AP); +void CUBLASWINAPI cublasZhpr2(char uplo, + int n, + cuDoubleComplex alpha, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* y, + int incy, + cuDoubleComplex* AP); +/* ------------------------BLAS3 Functions ------------------------------- */ +/* GEMM */ +void CUBLASWINAPI cublasSgemm(char transa, + char transb, + int m, + int n, + int k, + float alpha, + const float* A, + int lda, + const float* B, + int ldb, + float beta, + float* C, + int ldc); +void CUBLASWINAPI cublasDgemm(char transa, + char transb, + int m, + int n, + int k, + double alpha, + const double* A, + int lda, + const double* B, + int ldb, + double beta, + double* C, + int ldc); +void CUBLASWINAPI cublasCgemm(char transa, + char transb, + int m, + int n, + int k, + cuComplex alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + cuComplex beta, + cuComplex* C, + int ldc); +void CUBLASWINAPI cublasZgemm(char transa, + char transb, + int m, + int n, + int k, + cuDoubleComplex alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + cuDoubleComplex beta, + cuDoubleComplex* C, + int ldc); +/* -------------------------------------------------------*/ +/* SYRK */ +void CUBLASWINAPI +cublasSsyrk(char uplo, char trans, int n, int k, float alpha, const float* A, int lda, float beta, float* C, int ldc); +void CUBLASWINAPI cublasDsyrk( + char uplo, char trans, int n, int k, double alpha, const double* A, int lda, double beta, double* C, int ldc); + +void CUBLASWINAPI cublasCsyrk(char uplo, + char trans, + int n, + int k, + cuComplex alpha, + const cuComplex* A, + int lda, + cuComplex beta, + cuComplex* C, + int ldc); +void CUBLASWINAPI cublasZsyrk(char uplo, + char trans, + int n, + int k, + cuDoubleComplex alpha, + const cuDoubleComplex* A, + int lda, + cuDoubleComplex beta, + cuDoubleComplex* C, + int ldc); +/* ------------------------------------------------------- */ +/* HERK */ +void CUBLASWINAPI cublasCherk( + char uplo, char trans, int n, int k, float alpha, const cuComplex* A, int lda, float beta, cuComplex* C, int ldc); +void CUBLASWINAPI cublasZherk(char uplo, + char trans, + int n, + int k, + double alpha, + const cuDoubleComplex* A, + int lda, + double beta, + cuDoubleComplex* C, + int ldc); +/* ------------------------------------------------------- */ +/* SYR2K */ +void CUBLASWINAPI cublasSsyr2k(char uplo, + char trans, + int n, + int k, + float alpha, + const float* A, + int lda, + const float* B, + int ldb, + float beta, + float* C, + int ldc); + +void CUBLASWINAPI cublasDsyr2k(char uplo, + char trans, + int n, + int k, + double alpha, + const double* A, + int lda, + const double* B, + int ldb, + double beta, + double* C, + int ldc); +void CUBLASWINAPI cublasCsyr2k(char uplo, + char trans, + int n, + int k, + cuComplex alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + cuComplex beta, + cuComplex* C, + int ldc); + +void CUBLASWINAPI cublasZsyr2k(char uplo, + char trans, + int n, + int k, + cuDoubleComplex alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + cuDoubleComplex beta, + cuDoubleComplex* C, + int ldc); +/* ------------------------------------------------------- */ +/* HER2K */ +void CUBLASWINAPI cublasCher2k(char uplo, + char trans, + int n, + int k, + cuComplex alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + float beta, + cuComplex* C, + int ldc); + +void CUBLASWINAPI cublasZher2k(char uplo, + char trans, + int n, + int k, + cuDoubleComplex alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + double beta, + cuDoubleComplex* C, + int ldc); + +/*------------------------------------------------------------------------*/ +/* SYMM*/ +void CUBLASWINAPI cublasSsymm(char side, + char uplo, + int m, + int n, + float alpha, + const float* A, + int lda, + const float* B, + int ldb, + float beta, + float* C, + int ldc); +void CUBLASWINAPI cublasDsymm(char side, + char uplo, + int m, + int n, + double alpha, + const double* A, + int lda, + const double* B, + int ldb, + double beta, + double* C, + int ldc); + +void CUBLASWINAPI cublasCsymm(char side, + char uplo, + int m, + int n, + cuComplex alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + cuComplex beta, + cuComplex* C, + int ldc); + +void CUBLASWINAPI cublasZsymm(char side, + char uplo, + int m, + int n, + cuDoubleComplex alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + cuDoubleComplex beta, + cuDoubleComplex* C, + int ldc); +/*------------------------------------------------------------------------*/ +/* HEMM*/ +void CUBLASWINAPI cublasChemm(char side, + char uplo, + int m, + int n, + cuComplex alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + cuComplex beta, + cuComplex* C, + int ldc); +void CUBLASWINAPI cublasZhemm(char side, + char uplo, + int m, + int n, + cuDoubleComplex alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + cuDoubleComplex beta, + cuDoubleComplex* C, + int ldc); + +/*------------------------------------------------------------------------*/ +/* TRSM*/ +void CUBLASWINAPI cublasStrsm(char side, + char uplo, + char transa, + char diag, + int m, + int n, + float alpha, + const float* A, + int lda, + float* B, + int ldb); + +void CUBLASWINAPI cublasDtrsm(char side, + char uplo, + char transa, + char diag, + int m, + int n, + double alpha, + const double* A, + int lda, + double* B, + int ldb); + +void CUBLASWINAPI cublasCtrsm(char side, + char uplo, + char transa, + char diag, + int m, + int n, + cuComplex alpha, + const cuComplex* A, + int lda, + cuComplex* B, + int ldb); + +void CUBLASWINAPI cublasZtrsm(char side, + char uplo, + char transa, + char diag, + int m, + int n, + cuDoubleComplex alpha, + const cuDoubleComplex* A, + int lda, + cuDoubleComplex* B, + int ldb); +/*------------------------------------------------------------------------*/ +/* TRMM*/ +void CUBLASWINAPI cublasStrmm(char side, + char uplo, + char transa, + char diag, + int m, + int n, + float alpha, + const float* A, + int lda, + float* B, + int ldb); +void CUBLASWINAPI cublasDtrmm(char side, + char uplo, + char transa, + char diag, + int m, + int n, + double alpha, + const double* A, + int lda, + double* B, + int ldb); +void CUBLASWINAPI cublasCtrmm(char side, + char uplo, + char transa, + char diag, + int m, + int n, + cuComplex alpha, + const cuComplex* A, + int lda, + cuComplex* B, + int ldb); +void CUBLASWINAPI cublasZtrmm(char side, + char uplo, + char transa, + char diag, + int m, + int n, + cuDoubleComplex alpha, + const cuDoubleComplex* A, + int lda, + cuDoubleComplex* B, + int ldb); + +#if defined(__cplusplus) +} +#endif /* __cplusplus */ + +#endif /* !defined(CUBLAS_H_) */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cublas/include/cublasLt.h b/venv/lib/python3.10/site-packages/nvidia/cublas/include/cublasLt.h new file mode 100644 index 0000000000000000000000000000000000000000..ffe628de2ca547186be8667120131b07d51757d2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cublas/include/cublasLt.h @@ -0,0 +1,1815 @@ +/* + * Copyright 1993-2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ +#pragma once + +#ifndef CUBLASAPI +#ifdef __CUDACC__ +#define CUBLASAPI __host__ __device__ +#else +#define CUBLASAPI +#endif +#endif + +#include + +#include +#include +#include + +#if defined(__cplusplus) +extern "C" { +#endif /* __cplusplus */ + +/** Opaque structure holding CUBLASLT context + */ +typedef struct cublasLtContext* cublasLtHandle_t; + +cublasStatus_t CUBLASWINAPI cublasLtCreate(cublasLtHandle_t* lightHandle); + +cublasStatus_t CUBLASWINAPI cublasLtDestroy(cublasLtHandle_t lightHandle); + +const char* CUBLASWINAPI cublasLtGetStatusName(cublasStatus_t status); + +const char* CUBLASWINAPI cublasLtGetStatusString(cublasStatus_t status); + +size_t CUBLASWINAPI cublasLtGetVersion(void); + +size_t CUBLASWINAPI cublasLtGetCudartVersion(void); + +cublasStatus_t CUBLASWINAPI cublasLtGetProperty(libraryPropertyType type, int* value); + +cublasStatus_t CUBLASWINAPI cublasLtHeuristicsCacheGetCapacity(size_t* capacity); +cublasStatus_t CUBLASWINAPI cublasLtHeuristicsCacheSetCapacity(size_t capacity); + +/** Restricts usage of CPU instructions (ISA) specified by the flags in the mask. + * + * Flags can be combined with bitwise OR(|) operator. Supported flags: + * - 0x1 -- x86-64 AVX512 ISA + * + * Default mask: 0 (any applicable ISA is allowed). + * + * The function returns the previous value of the mask. + * The function takes precedence over the environment variable CUBLASLT_DISABLE_CPU_INSTRUCTIONS_MASK. + */ +unsigned CUBLASWINAPI cublasLtDisableCpuInstructionsSetMask(unsigned mask); + +/** Semi-opaque descriptor for matrix memory layout + */ +typedef struct { + uint64_t data[8]; +} cublasLtMatrixLayoutOpaque_t; + +/** Opaque descriptor for matrix memory layout + */ +typedef cublasLtMatrixLayoutOpaque_t* cublasLtMatrixLayout_t; + +/** Semi-opaque algorithm descriptor (to avoid complicated alloc/free schemes) + * + * This structure can be trivially serialized and later restored for use with the same version of cuBLAS library to save + * on selecting the right configuration again. + */ +typedef struct { + uint64_t data[8]; +} cublasLtMatmulAlgo_t; + +/** Semi-opaque descriptor for cublasLtMatmul() operation details + */ +typedef struct { + uint64_t data[23]; +} cublasLtMatmulDescOpaque_t; + +/** Opaque descriptor for cublasLtMatmul() operation details + */ +typedef cublasLtMatmulDescOpaque_t* cublasLtMatmulDesc_t; + +/** Semi-opaque descriptor for cublasLtMatrixTransform() operation details + */ +typedef struct { + uint64_t data[8]; +} cublasLtMatrixTransformDescOpaque_t; + +/** Opaque descriptor for cublasLtMatrixTransform() operation details + */ +typedef cublasLtMatrixTransformDescOpaque_t* cublasLtMatrixTransformDesc_t; + +/** Semi-opaque descriptor for cublasLtMatmulPreference() operation details + */ +typedef struct { + uint64_t data[8]; +} cublasLtMatmulPreferenceOpaque_t; + +/** Opaque descriptor for cublasLtMatmulAlgoGetHeuristic() configuration + */ +typedef cublasLtMatmulPreferenceOpaque_t* cublasLtMatmulPreference_t; + +/** Tile size (in C/D matrix Rows x Cols) + * + * General order of tile IDs is sorted by size first and by first dimension second. + */ +typedef enum { + CUBLASLT_MATMUL_TILE_UNDEFINED = 0, + CUBLASLT_MATMUL_TILE_8x8 = 1, + CUBLASLT_MATMUL_TILE_8x16 = 2, + CUBLASLT_MATMUL_TILE_16x8 = 3, + CUBLASLT_MATMUL_TILE_8x32 = 4, + CUBLASLT_MATMUL_TILE_16x16 = 5, + CUBLASLT_MATMUL_TILE_32x8 = 6, + CUBLASLT_MATMUL_TILE_8x64 = 7, + CUBLASLT_MATMUL_TILE_16x32 = 8, + CUBLASLT_MATMUL_TILE_32x16 = 9, + CUBLASLT_MATMUL_TILE_64x8 = 10, + CUBLASLT_MATMUL_TILE_32x32 = 11, + CUBLASLT_MATMUL_TILE_32x64 = 12, + CUBLASLT_MATMUL_TILE_64x32 = 13, + CUBLASLT_MATMUL_TILE_32x128 = 14, + CUBLASLT_MATMUL_TILE_64x64 = 15, + CUBLASLT_MATMUL_TILE_128x32 = 16, + CUBLASLT_MATMUL_TILE_64x128 = 17, + CUBLASLT_MATMUL_TILE_128x64 = 18, + CUBLASLT_MATMUL_TILE_64x256 = 19, + CUBLASLT_MATMUL_TILE_128x128 = 20, + CUBLASLT_MATMUL_TILE_256x64 = 21, + CUBLASLT_MATMUL_TILE_64x512 = 22, + CUBLASLT_MATMUL_TILE_128x256 = 23, + CUBLASLT_MATMUL_TILE_256x128 = 24, + CUBLASLT_MATMUL_TILE_512x64 = 25, + CUBLASLT_MATMUL_TILE_64x96 = 26, + CUBLASLT_MATMUL_TILE_96x64 = 27, + CUBLASLT_MATMUL_TILE_96x128 = 28, + CUBLASLT_MATMUL_TILE_128x160 = 29, + CUBLASLT_MATMUL_TILE_160x128 = 30, + CUBLASLT_MATMUL_TILE_192x128 = 31, + CUBLASLT_MATMUL_TILE_128x192 = 32, + CUBLASLT_MATMUL_TILE_128x96 = 33, + CUBLASLT_MATMUL_TILE_32x256 = 34, + CUBLASLT_MATMUL_TILE_256x32 = 35, + CUBLASLT_MATMUL_TILE_END +} cublasLtMatmulTile_t; + +/** Size and number of stages in which elements are read into shared memory + * + * General order of stages IDs is sorted by stage size first and by number of stages second. + */ +typedef enum { + CUBLASLT_MATMUL_STAGES_UNDEFINED = 0, + CUBLASLT_MATMUL_STAGES_16x1 = 1, + CUBLASLT_MATMUL_STAGES_16x2 = 2, + CUBLASLT_MATMUL_STAGES_16x3 = 3, + CUBLASLT_MATMUL_STAGES_16x4 = 4, + CUBLASLT_MATMUL_STAGES_16x5 = 5, + CUBLASLT_MATMUL_STAGES_16x6 = 6, + CUBLASLT_MATMUL_STAGES_32x1 = 7, + CUBLASLT_MATMUL_STAGES_32x2 = 8, + CUBLASLT_MATMUL_STAGES_32x3 = 9, + CUBLASLT_MATMUL_STAGES_32x4 = 10, + CUBLASLT_MATMUL_STAGES_32x5 = 11, + CUBLASLT_MATMUL_STAGES_32x6 = 12, + CUBLASLT_MATMUL_STAGES_64x1 = 13, + CUBLASLT_MATMUL_STAGES_64x2 = 14, + CUBLASLT_MATMUL_STAGES_64x3 = 15, + CUBLASLT_MATMUL_STAGES_64x4 = 16, + CUBLASLT_MATMUL_STAGES_64x5 = 17, + CUBLASLT_MATMUL_STAGES_64x6 = 18, + CUBLASLT_MATMUL_STAGES_128x1 = 19, + CUBLASLT_MATMUL_STAGES_128x2 = 20, + CUBLASLT_MATMUL_STAGES_128x3 = 21, + CUBLASLT_MATMUL_STAGES_128x4 = 22, + CUBLASLT_MATMUL_STAGES_128x5 = 23, + CUBLASLT_MATMUL_STAGES_128x6 = 24, + CUBLASLT_MATMUL_STAGES_32x10 = 25, + CUBLASLT_MATMUL_STAGES_8x4 = 26, + CUBLASLT_MATMUL_STAGES_16x10 = 27, + CUBLASLT_MATMUL_STAGES_8x5 = 28, + CUBLASLT_MATMUL_STAGES_8x3 = 31, + CUBLASLT_MATMUL_STAGES_8xAUTO = 32, + CUBLASLT_MATMUL_STAGES_16xAUTO = 33, + CUBLASLT_MATMUL_STAGES_32xAUTO = 34, + CUBLASLT_MATMUL_STAGES_64xAUTO = 35, + CUBLASLT_MATMUL_STAGES_128xAUTO = 36, + CUBLASLT_MATMUL_STAGES_END +} cublasLtMatmulStages_t; + +/** Thread Block Cluster size + * + * Typically dimensioned similar to cublasLtMatmulTile_t, with the third coordinate unused at this time. + */ +typedef enum { + /** Let library pick cluster shape automatically */ + CUBLASLT_CLUSTER_SHAPE_AUTO = 0, + CUBLASLT_CLUSTER_SHAPE_1x1x1 = 2, + CUBLASLT_CLUSTER_SHAPE_2x1x1 = 3, + CUBLASLT_CLUSTER_SHAPE_4x1x1 = 4, + CUBLASLT_CLUSTER_SHAPE_1x2x1 = 5, + CUBLASLT_CLUSTER_SHAPE_2x2x1 = 6, + CUBLASLT_CLUSTER_SHAPE_4x2x1 = 7, + CUBLASLT_CLUSTER_SHAPE_1x4x1 = 8, + CUBLASLT_CLUSTER_SHAPE_2x4x1 = 9, + CUBLASLT_CLUSTER_SHAPE_4x4x1 = 10, + CUBLASLT_CLUSTER_SHAPE_8x1x1 = 11, + CUBLASLT_CLUSTER_SHAPE_1x8x1 = 12, + CUBLASLT_CLUSTER_SHAPE_8x2x1 = 13, + CUBLASLT_CLUSTER_SHAPE_2x8x1 = 14, + CUBLASLT_CLUSTER_SHAPE_16x1x1 = 15, + CUBLASLT_CLUSTER_SHAPE_1x16x1 = 16, + CUBLASLT_CLUSTER_SHAPE_3x1x1 = 17, + CUBLASLT_CLUSTER_SHAPE_5x1x1 = 18, + CUBLASLT_CLUSTER_SHAPE_6x1x1 = 19, + CUBLASLT_CLUSTER_SHAPE_7x1x1 = 20, + CUBLASLT_CLUSTER_SHAPE_9x1x1 = 21, + CUBLASLT_CLUSTER_SHAPE_10x1x1 = 22, + CUBLASLT_CLUSTER_SHAPE_11x1x1 = 23, + CUBLASLT_CLUSTER_SHAPE_12x1x1 = 24, + CUBLASLT_CLUSTER_SHAPE_13x1x1 = 25, + CUBLASLT_CLUSTER_SHAPE_14x1x1 = 26, + CUBLASLT_CLUSTER_SHAPE_15x1x1 = 27, + CUBLASLT_CLUSTER_SHAPE_3x2x1 = 28, + CUBLASLT_CLUSTER_SHAPE_5x2x1 = 29, + CUBLASLT_CLUSTER_SHAPE_6x2x1 = 30, + CUBLASLT_CLUSTER_SHAPE_7x2x1 = 31, + CUBLASLT_CLUSTER_SHAPE_1x3x1 = 32, + CUBLASLT_CLUSTER_SHAPE_2x3x1 = 33, + CUBLASLT_CLUSTER_SHAPE_3x3x1 = 34, + CUBLASLT_CLUSTER_SHAPE_4x3x1 = 35, + CUBLASLT_CLUSTER_SHAPE_5x3x1 = 36, + CUBLASLT_CLUSTER_SHAPE_3x4x1 = 37, + CUBLASLT_CLUSTER_SHAPE_1x5x1 = 38, + CUBLASLT_CLUSTER_SHAPE_2x5x1 = 39, + CUBLASLT_CLUSTER_SHAPE_3x5x1 = 40, + CUBLASLT_CLUSTER_SHAPE_1x6x1 = 41, + CUBLASLT_CLUSTER_SHAPE_2x6x1 = 42, + CUBLASLT_CLUSTER_SHAPE_1x7x1 = 43, + CUBLASLT_CLUSTER_SHAPE_2x7x1 = 44, + CUBLASLT_CLUSTER_SHAPE_1x9x1 = 45, + CUBLASLT_CLUSTER_SHAPE_1x10x1 = 46, + CUBLASLT_CLUSTER_SHAPE_1x11x1 = 47, + CUBLASLT_CLUSTER_SHAPE_1x12x1 = 48, + CUBLASLT_CLUSTER_SHAPE_1x13x1 = 49, + CUBLASLT_CLUSTER_SHAPE_1x14x1 = 50, + CUBLASLT_CLUSTER_SHAPE_1x15x1 = 51, + CUBLASLT_CLUSTER_SHAPE_END +} cublasLtClusterShape_t; + +/** Inner size of the kernel + * + * Represents various aspects of internal kernel design, that don't impact CUDA grid size but may have other more subtle + * effects. + * + */ +typedef enum { + CUBLASLT_MATMUL_INNER_SHAPE_UNDEFINED = 0, + CUBLASLT_MATMUL_INNER_SHAPE_MMA884 = 1, + CUBLASLT_MATMUL_INNER_SHAPE_MMA1684 = 2, + CUBLASLT_MATMUL_INNER_SHAPE_MMA1688 = 3, + CUBLASLT_MATMUL_INNER_SHAPE_MMA16816 = 4, + CUBLASLT_MATMUL_INNER_SHAPE_END +} cublasLtMatmulInnerShape_t; + +/** Pointer mode to use for alpha/beta */ +typedef enum { + /** matches CUBLAS_POINTER_MODE_HOST, pointer targets a single value host memory */ + CUBLASLT_POINTER_MODE_HOST = CUBLAS_POINTER_MODE_HOST, + /** matches CUBLAS_POINTER_MODE_DEVICE, pointer targets a single value device memory */ + CUBLASLT_POINTER_MODE_DEVICE = CUBLAS_POINTER_MODE_DEVICE, + /** pointer targets an array in device memory */ + CUBLASLT_POINTER_MODE_DEVICE_VECTOR = 2, + /** alpha pointer targets an array in device memory, beta is zero. Note: + CUBLASLT_MATMUL_DESC_ALPHA_VECTOR_BATCH_STRIDE is not supported, must be 0. */ + CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_ZERO = 3, + /** alpha pointer targets an array in device memory, beta is a single value in host memory. */ + CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_HOST = 4, +} cublasLtPointerMode_t; + +/** Mask to define pointer mode capability */ +typedef enum { + /** see CUBLASLT_POINTER_MODE_HOST */ + CUBLASLT_POINTER_MODE_MASK_HOST = 1, + /** see CUBLASLT_POINTER_MODE_DEVICE */ + CUBLASLT_POINTER_MODE_MASK_DEVICE = 2, + /** see CUBLASLT_POINTER_MODE_DEVICE_VECTOR */ + CUBLASLT_POINTER_MODE_MASK_DEVICE_VECTOR = 4, + /** see CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_ZERO */ + CUBLASLT_POINTER_MODE_MASK_ALPHA_DEVICE_VECTOR_BETA_ZERO = 8, + /** see CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_HOST */ + CUBLASLT_POINTER_MODE_MASK_ALPHA_DEVICE_VECTOR_BETA_HOST = 16, +} cublasLtPointerModeMask_t; + +/** Implementation details that may affect numerical behavior of algorithms. */ +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_FMA (0x01ull << 0) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_HMMA (0x02ull << 0) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_IMMA (0x04ull << 0) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_DMMA (0x08ull << 0) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_TENSOR_OP_MASK (0xfeull << 0) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_OP_TYPE_MASK (0xffull << 0) + +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_16F (0x01ull << 8) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_32F (0x02ull << 8) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_64F (0x04ull << 8) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_32I (0x08ull << 8) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_TYPE_MASK (0xffull << 8) + +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_16F (0x01ull << 16) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_16BF (0x02ull << 16) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_TF32 (0x04ull << 16) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_32F (0x08ull << 16) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_64F (0x10ull << 16) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_8I (0x20ull << 16) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_8F_E4M3 (0x40ull << 16) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_8F_E5M2 (0x80ull << 16) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_OP_INPUT_TYPE_MASK (0xffull << 16) + +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_GAUSSIAN (0x01ull << 32) +typedef uint64_t cublasLtNumericalImplFlags_t; + +/** Execute matrix multiplication (D = alpha * op(A) * op(B) + beta * C). + * + * \retval CUBLAS_STATUS_NOT_INITIALIZED if cuBLASLt handle has not been initialized + * \retval CUBLAS_STATUS_INVALID_VALUE if parameters are in conflict or in an impossible configuration; e.g. + * when workspaceSizeInBytes is less than workspace required by configured + * algo + * \retval CUBLAS_STATUS_NOT_SUPPORTED if current implementation on selected device doesn't support configured + * operation + * \retval CUBLAS_STATUS_ARCH_MISMATCH if configured operation cannot be run using selected device + * \retval CUBLAS_STATUS_EXECUTION_FAILED if cuda reported execution error from the device + * \retval CUBLAS_STATUS_SUCCESS if the operation completed successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmul(cublasLtHandle_t lightHandle, + cublasLtMatmulDesc_t computeDesc, + const void* alpha, /* host or device pointer */ + const void* A, + cublasLtMatrixLayout_t Adesc, + const void* B, + cublasLtMatrixLayout_t Bdesc, + const void* beta, /* host or device pointer */ + const void* C, + cublasLtMatrixLayout_t Cdesc, + void* D, + cublasLtMatrixLayout_t Ddesc, + const cublasLtMatmulAlgo_t* algo, + void* workspace, + size_t workspaceSizeInBytes, + cudaStream_t stream); + +/** Matrix layout conversion helper (C = alpha * op(A) + beta * op(B)) + * + * Can be used to change memory order of data or to scale and shift the values. + * + * \retval CUBLAS_STATUS_NOT_INITIALIZED if cuBLASLt handle has not been initialized + * \retval CUBLAS_STATUS_INVALID_VALUE if parameters are in conflict or in an impossible configuration; e.g. + * when A is not NULL, but Adesc is NULL + * \retval CUBLAS_STATUS_NOT_SUPPORTED if current implementation on selected device doesn't support configured + * operation + * \retval CUBLAS_STATUS_ARCH_MISMATCH if configured operation cannot be run using selected device + * \retval CUBLAS_STATUS_EXECUTION_FAILED if cuda reported execution error from the device + * \retval CUBLAS_STATUS_SUCCESS if the operation completed successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtMatrixTransform(cublasLtHandle_t lightHandle, + cublasLtMatrixTransformDesc_t transformDesc, + const void* alpha, /* host or device pointer */ + const void* A, + cublasLtMatrixLayout_t Adesc, + const void* beta, /* host or device pointer */ + const void* B, + cublasLtMatrixLayout_t Bdesc, + void* C, + cublasLtMatrixLayout_t Cdesc, + cudaStream_t stream); + +/* ---------------------------------------------------------------------------------------*/ +/* Helper functions for cublasLtMatrixLayout_t */ +/* ---------------------------------------------------------------------------------------*/ + +/** Enum for data ordering */ +typedef enum { + /** Column-major + * + * Leading dimension is the stride (in elements) to the beginning of next column in memory. + */ + CUBLASLT_ORDER_COL = 0, + /** Row major + * + * Leading dimension is the stride (in elements) to the beginning of next row in memory. + */ + CUBLASLT_ORDER_ROW = 1, + /** Column-major ordered tiles of 32 columns. + * + * Leading dimension is the stride (in elements) to the beginning of next group of 32-columns. E.g. if matrix has 33 + * columns and 2 rows, ld must be at least (32) * 2 = 64. + */ + CUBLASLT_ORDER_COL32 = 2, + /** Column-major ordered tiles of composite tiles with total 32 columns and 8 rows, tile composed of interleaved + * inner tiles of 4 columns within 4 even or odd rows in an alternating pattern. + * + * Leading dimension is the stride (in elements) to the beginning of the first 32 column x 8 row tile for the next + * 32-wide group of columns. E.g. if matrix has 33 columns and 1 row, ld must be at least (32 * 8) * 1 = 256. + */ + CUBLASLT_ORDER_COL4_4R2_8C = 3, + /** Column-major ordered tiles of composite tiles with total 32 columns ands 32 rows. + * Element offset within the tile is calculated as (((row%8)/2*4+row/8)*2+row%2)*32+col. + * + * Leading dimension is the stride (in elements) to the beginning of the first 32 column x 32 row tile for the next + * 32-wide group of columns. E.g. if matrix has 33 columns and 1 row, ld must be at least (32*32)*1 = 1024. + */ + CUBLASLT_ORDER_COL32_2R_4R4 = 4, + +} cublasLtOrder_t; + +/** Attributes of memory layout */ +typedef enum { + /** Data type, see cudaDataType. + * + * uint32_t + */ + CUBLASLT_MATRIX_LAYOUT_TYPE = 0, + + /** Memory order of the data, see cublasLtOrder_t. + * + * int32_t, default: CUBLASLT_ORDER_COL + */ + CUBLASLT_MATRIX_LAYOUT_ORDER = 1, + + /** Number of rows. + * + * Usually only values that can be expressed as int32_t are supported. + * + * uint64_t + */ + CUBLASLT_MATRIX_LAYOUT_ROWS = 2, + + /** Number of columns. + * + * Usually only values that can be expressed as int32_t are supported. + * + * uint64_t + */ + CUBLASLT_MATRIX_LAYOUT_COLS = 3, + + /** Matrix leading dimension. + * + * For CUBLASLT_ORDER_COL this is stride (in elements) of matrix column, for more details and documentation for + * other memory orders see documentation for cublasLtOrder_t values. + * + * Currently only non-negative values are supported, must be large enough so that matrix memory locations are not + * overlapping (e.g. greater or equal to CUBLASLT_MATRIX_LAYOUT_ROWS in case of CUBLASLT_ORDER_COL). + * + * int64_t; + */ + CUBLASLT_MATRIX_LAYOUT_LD = 4, + + /** Number of matmul operations to perform in the batch. + * + * See also CUBLASLT_ALGO_CAP_STRIDED_BATCH_SUPPORT + * + * int32_t, default: 1 + */ + CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT = 5, + + /** Stride (in elements) to the next matrix for strided batch operation. + * + * When matrix type is planar-complex (CUBLASLT_MATRIX_LAYOUT_PLANE_OFFSET != 0), batch stride + * is interpreted by cublasLtMatmul() in number of real valued sub-elements. E.g. for data of type CUDA_C_16F, + * offset of 1024B is encoded as a stride of value 512 (since each element of the real and imaginary matrices + * is a 2B (16bit) floating point type). + * + * NOTE: A bug in cublasLtMatrixTransform() causes it to interpret the batch stride for a planar-complex matrix + * as if it was specified in number of complex elements. Therefore an offset of 1024B must be encoded as stride + * value 256 when calling cublasLtMatrixTransform() (each complex element is 4B with real and imaginary values 2B + * each). This behavior is expected to be corrected in the next major cuBLAS version. + * + * int64_t, default: 0 + */ + CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET = 6, + + /** Stride (in bytes) to the imaginary plane for planar complex layout. + * + * int64_t, default: 0 - 0 means that layout is regular (real and imaginary parts of complex numbers are interleaved + * in memory in each element) + */ + CUBLASLT_MATRIX_LAYOUT_PLANE_OFFSET = 7, +} cublasLtMatrixLayoutAttribute_t; + +/** Internal. Do not use directly. + */ +cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutInit_internal( // + cublasLtMatrixLayout_t matLayout, + size_t size, + cudaDataType type, + uint64_t rows, + uint64_t cols, + int64_t ld); + +/** Initialize matrix layout descriptor in pre-allocated space. + * + * \retval CUBLAS_STATUS_ALLOC_FAILED if size of the pre-allocated space is insufficient + * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully + */ +static inline cublasStatus_t cublasLtMatrixLayoutInit( + cublasLtMatrixLayout_t matLayout, cudaDataType type, uint64_t rows, uint64_t cols, int64_t ld) { + return cublasLtMatrixLayoutInit_internal(matLayout, sizeof(*matLayout), type, rows, cols, ld); +} + +/** Create new matrix layout descriptor. + * + * \retval CUBLAS_STATUS_ALLOC_FAILED if memory could not be allocated + * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutCreate( // + cublasLtMatrixLayout_t* matLayout, + cudaDataType type, + uint64_t rows, + uint64_t cols, + int64_t ld); + +/** Destroy matrix layout descriptor. + * + * \retval CUBLAS_STATUS_SUCCESS if operation was successful + */ +cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutDestroy(cublasLtMatrixLayout_t matLayout); + +/** Set matrix layout descriptor attribute. + * + * \param[in] matLayout The descriptor + * \param[in] attr The attribute + * \param[in] buf memory address containing the new value + * \param[in] sizeInBytes size of buf buffer for verification (in bytes) + * + * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for + * selected attribute + * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutSetAttribute( // + cublasLtMatrixLayout_t matLayout, + cublasLtMatrixLayoutAttribute_t attr, + const void* buf, + size_t sizeInBytes); + +/** Get matrix layout descriptor attribute. + * + * \param[in] matLayout The descriptor + * \param[in] attr The attribute + * \param[out] buf memory address containing the new value + * \param[in] sizeInBytes size of buf buffer for verification (in bytes) + * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of + * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents + * + * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero + * and buf is NULL or sizeInBytes doesn't match size of internal storage for + * selected attribute + * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory + */ +cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutGetAttribute( // + cublasLtMatrixLayout_t matLayout, + cublasLtMatrixLayoutAttribute_t attr, + void* buf, + size_t sizeInBytes, + size_t* sizeWritten); + +/* ---------------------------------------------------------------------------------------*/ +/* Helper functions for cublasLtMatmulDesc_t */ +/* ---------------------------------------------------------------------------------------*/ + +/** Matmul descriptor attributes to define details of the operation. */ +typedef enum { + /** Compute type, see cudaDataType. Defines data type used for multiply and accumulate operations and the + * accumulator during matrix multiplication. + * + * int32_t + */ + CUBLASLT_MATMUL_DESC_COMPUTE_TYPE = 0, + + /** Scale type, see cudaDataType. Defines data type of alpha and beta. Accumulator and value from matrix C are + * typically converted to scale type before final scaling. Value is then converted from scale type to type of matrix + * D before being stored in memory. + * + * int32_t, default: same as CUBLASLT_MATMUL_DESC_COMPUTE_TYPE + */ + CUBLASLT_MATMUL_DESC_SCALE_TYPE = 1, + + /** Pointer mode of alpha and beta, see cublasLtPointerMode_t. When CUBLASLT_POINTER_MODE_DEVICE_VECTOR is in use, + * alpha/beta vector lenghts must match number of output matrix rows. + * + * int32_t, default: CUBLASLT_POINTER_MODE_HOST + */ + CUBLASLT_MATMUL_DESC_POINTER_MODE = 2, + + /** Transform of matrix A, see cublasOperation_t. + * + * int32_t, default: CUBLAS_OP_N + */ + CUBLASLT_MATMUL_DESC_TRANSA = 3, + + /** Transform of matrix B, see cublasOperation_t. + * + * int32_t, default: CUBLAS_OP_N + */ + CUBLASLT_MATMUL_DESC_TRANSB = 4, + + /** Transform of matrix C, see cublasOperation_t. + * + * Currently only CUBLAS_OP_N is supported. + * + * int32_t, default: CUBLAS_OP_N + */ + CUBLASLT_MATMUL_DESC_TRANSC = 5, + + /** Matrix fill mode, see cublasFillMode_t. + * + * int32_t, default: CUBLAS_FILL_MODE_FULL + */ + CUBLASLT_MATMUL_DESC_FILL_MODE = 6, + + /** Epilogue function, see cublasLtEpilogue_t. + * + * uint32_t, default: CUBLASLT_EPILOGUE_DEFAULT + */ + CUBLASLT_MATMUL_DESC_EPILOGUE = 7, + + /** Bias or bias gradient vector pointer in the device memory. + * + * Bias case. See CUBLASLT_EPILOGUE_BIAS. + * For bias data type see CUBLASLT_MATMUL_DESC_BIAS_DATA_TYPE. + * + * Bias vector length must match matrix D rows count. + * + * Bias gradient case. See CUBLASLT_EPILOGUE_DRELU_BGRAD and CUBLASLT_EPILOGUE_DGELU_BGRAD. + * Bias gradient vector elements are the same type as the output elements + * (Ctype) with the exception of IMMA kernels (see above). + * + * Routines that don't dereference this pointer, like cublasLtMatmulAlgoGetHeuristic() + * depend on its value to determine expected pointer alignment. + * + * Bias case: const void *, default: NULL + * Bias gradient case: void *, default: NULL + */ + CUBLASLT_MATMUL_DESC_BIAS_POINTER = 8, + + /** Batch stride for bias or bias gradient vector. + * + * Used together with CUBLASLT_MATMUL_DESC_BIAS_POINTER when matrix D's CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT > 1. + * + * int64_t, default: 0 + */ + CUBLASLT_MATMUL_DESC_BIAS_BATCH_STRIDE = 10, + + /** Pointer for epilogue auxiliary buffer. + * + * - Output vector for ReLu bit-mask in forward pass when CUBLASLT_EPILOGUE_RELU_AUX + * or CUBLASLT_EPILOGUE_RELU_AUX_BIAS epilogue is used. + * - Input vector for ReLu bit-mask in backward pass when + * CUBLASLT_EPILOGUE_DRELU_BGRAD epilogue is used. + * + * - Output of GELU input matrix in forward pass when + * CUBLASLT_EPILOGUE_GELU_AUX_BIAS epilogue is used. + * - Input of GELU input matrix for backward pass when + * CUBLASLT_EPILOGUE_DGELU_BGRAD epilogue is used. + * + * For aux data type see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_DATA_TYPE. + * + * Routines that don't dereference this pointer, like cublasLtMatmulAlgoGetHeuristic() + * depend on its value to determine expected pointer alignment. + * + * Requires setting CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD attribute. + * + * Forward pass: void *, default: NULL + * Backward pass: const void *, default: NULL + */ + CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER = 11, + + /** Leading dimension for epilogue auxiliary buffer. + * + * - ReLu bit-mask matrix leading dimension in elements (i.e. bits) + * when CUBLASLT_EPILOGUE_RELU_AUX, CUBLASLT_EPILOGUE_RELU_AUX_BIAS or CUBLASLT_EPILOGUE_DRELU_BGRAD epilogue is + * used. Must be divisible by 128 and be no less than the number of rows in the output matrix. + * + * - GELU input matrix leading dimension in elements + * when CUBLASLT_EPILOGUE_GELU_AUX_BIAS or CUBLASLT_EPILOGUE_DGELU_BGRAD epilogue used. + * Must be divisible by 8 and be no less than the number of rows in the output matrix. + * + * int64_t, default: 0 + */ + CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD = 12, + + /** Batch stride for epilogue auxiliary buffer. + * + * - ReLu bit-mask matrix batch stride in elements (i.e. bits) + * when CUBLASLT_EPILOGUE_RELU_AUX, CUBLASLT_EPILOGUE_RELU_AUX_BIAS or CUBLASLT_EPILOGUE_DRELU_BGRAD epilogue is + * used. Must be divisible by 128. + * + * - GELU input matrix batch stride in elements + * when CUBLASLT_EPILOGUE_GELU_AUX_BIAS or CUBLASLT_EPILOGUE_DGELU_BGRAD epilogue used. + * Must be divisible by 8. + * + * int64_t, default: 0 + */ + CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_BATCH_STRIDE = 13, + + /** Batch stride for alpha vector. + * + * Used together with CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_HOST when matrix D's + * CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT > 1. If CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_ZERO is set then + * CUBLASLT_MATMUL_DESC_ALPHA_VECTOR_BATCH_STRIDE must be set to 0 as this mode doesnt supported batched alpha vector. + * + * int64_t, default: 0 + */ + CUBLASLT_MATMUL_DESC_ALPHA_VECTOR_BATCH_STRIDE = 14, + + /** Number of SMs to target for parallel execution. Optimizes heuristics for execution on a different number of SMs + * when user expects a concurrent stream to be using some of the device resources. + * + * int32_t, default: 0 - use the number reported by the device. + */ + CUBLASLT_MATMUL_DESC_SM_COUNT_TARGET = 15, + + /** Device pointer to the scale factor value that converts data in matrix A to the compute data type range. + * + * The scaling factor value must have the same type as the compute type. + * + * If not specified, or set to NULL, the scaling factor is assumed to be 1. + * + * If set for an unsupported matrix data, scale, and compute type combination, calling cublasLtMatmul() + * will return CUBLAS_INVALID_VALUE. + * + * const void *, default: NULL + */ + CUBLASLT_MATMUL_DESC_A_SCALE_POINTER = 17, + + /** Device pointer to the scale factor value to convert data in matrix B to compute data type range. + * + * The scaling factor value must have the same type as the compute type. + * + * If not specified, or set to NULL, the scaling factor is assumed to be 1. + * + * If set for an unsupported matrix data, scale, and compute type combination, calling cublasLtMatmul() + * will return CUBLAS_INVALID_VALUE. + * + * const void *, default: NULL + */ + CUBLASLT_MATMUL_DESC_B_SCALE_POINTER = 18, + + /** Device pointer to the scale factor value to convert data in matrix C to compute data type range. + * + * The scaling factor value must have the same type as the compute type. + * + * If not specified, or set to NULL, the scaling factor is assumed to be 1. + * + * If set for an unsupported matrix data, scale, and compute type combination, calling cublasLtMatmul() + * will return CUBLAS_INVALID_VALUE. + * + * const void *, default: NULL + */ + CUBLASLT_MATMUL_DESC_C_SCALE_POINTER = 19, + + /** Device pointer to the scale factor value to convert data in matrix D to compute data type range. + * + * The scaling factor value must have the same type as the compute type. + * + * If not specified, or set to NULL, the scaling factor is assumed to be 1. + * + * If set for an unsupported matrix data, scale, and compute type combination, calling cublasLtMatmul() + * will return CUBLAS_INVALID_VALUE. + * + * const void *, default: NULL + */ + CUBLASLT_MATMUL_DESC_D_SCALE_POINTER = 20, + + /** Device pointer to the memory location that on completion will be set to the maximum of absolute values in the + * output matrix. + * + * The computed value has the same type as the compute type. + * + * If not specified or set to NULL, the maximum absolute value is not computed. If set for an unsupported matrix + * data, scale, and compute type combination, calling cublasLtMatmul() will return CUBLAS_INVALID_VALUE. + * + * void *, default: NULL + */ + CUBLASLT_MATMUL_DESC_AMAX_D_POINTER = 21, + + /** Type of the data to be stored to the memory pointed to by CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER. + * + * If unset, the data type defaults to the type of elements of the output matrix with some exceptions, see details + * below. + * + * ReLu uses a bit-mask. + * + * GELU input matrix elements type is the same as the type of elements of + * the output matrix with some exceptions, see details below. + * + * For fp8 kernels with output type CUDA_R_8F_E4M3 the aux data type can be CUDA_R_8F_E4M3 or CUDA_R_16F with some + * restrictions. See https://docs.nvidia.com/cuda/cublas/index.html#cublasLtMatmulDescAttributes_t for more details. + * + * If set for an unsupported matrix data, scale, and compute type combination, calling cublasLtMatmul() + * will return CUBLAS_INVALID_VALUE. + * + * int32_t based on cudaDataType, default: -1 + */ + CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_DATA_TYPE = 22, + + /** Device pointer to the scaling factor value to convert results from compute type data range to storage + * data range in the auxiliary matrix that is set via CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER. + * + * The scaling factor value must have the same type as the compute type. + * + * If not specified, or set to NULL, the scaling factor is assumed to be 1. If set for an unsupported matrix data, + * scale, and compute type combination, calling cublasLtMatmul() will return CUBLAS_INVALID_VALUE. + * + * void *, default: NULL + */ + CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_SCALE_POINTER = 23, + + /** Device pointer to the memory location that on completion will be set to the maximum of absolute values in the + * buffer that is set via CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER. + * + * The computed value has the same type as the compute type. + * + * If not specified or set to NULL, the maximum absolute value is not computed. If set for an unsupported matrix + * data, scale, and compute type combination, calling cublasLtMatmul() will return CUBLAS_INVALID_VALUE. + * + * void *, default: NULL + */ + CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_AMAX_POINTER = 24, + + /** Flag for managing fp8 fast accumulation mode. + * When enabled, problem execution might be faster but at the cost of lower accuracy because intermediate results + * will not periodically be promoted to a higher precision. + * + * int8_t, default: 0 - fast accumulation mode is disabled. + */ + CUBLASLT_MATMUL_DESC_FAST_ACCUM = 25, + + /** Type of bias or bias gradient vector in the device memory. + * + * Bias case: see CUBLASLT_EPILOGUE_BIAS. + * + * Bias vector elements are the same type as the elements of output matrix (Dtype) with the following exceptions: + * - IMMA kernels with computeType=CUDA_R_32I and Ctype=CUDA_R_8I where the bias vector elements + * are the same type as alpha, beta (CUBLASLT_MATMUL_DESC_SCALE_TYPE=CUDA_R_32F) + * - fp8 kernels with an output type of CUDA_R_32F, CUDA_R_8F_E4M3 or CUDA_R_8F_E5M2, See + * https://docs.nvidia.com/cuda/cublas/index.html#cublasLtMatmul for details. + * + * int32_t based on cudaDataType, default: -1 + */ + CUBLASLT_MATMUL_DESC_BIAS_DATA_TYPE = 26, +} cublasLtMatmulDescAttributes_t; + +/** Internal. Do not use directly. + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulDescInit_internal( // + cublasLtMatmulDesc_t matmulDesc, + size_t size, + cublasComputeType_t computeType, + cudaDataType_t scaleType); + +/** Initialize matmul operation descriptor in pre-allocated space. + * + * \retval CUBLAS_STATUS_ALLOC_FAILED if size of the pre-allocated space is insufficient + * \retval CUBLAS_STATUS_SUCCESS if desciptor was initialized successfully + */ +static inline cublasStatus_t cublasLtMatmulDescInit( // + cublasLtMatmulDesc_t matmulDesc, + cublasComputeType_t computeType, + cudaDataType_t scaleType) { + return cublasLtMatmulDescInit_internal(matmulDesc, sizeof(*matmulDesc), computeType, scaleType); +} + +/** Create new matmul operation descriptor. + * + * \retval CUBLAS_STATUS_ALLOC_FAILED if memory could not be allocated + * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulDescCreate(cublasLtMatmulDesc_t* matmulDesc, + cublasComputeType_t computeType, + cudaDataType_t scaleType); + +/** Destroy matmul operation descriptor. + * + * \retval CUBLAS_STATUS_SUCCESS if operation was successful + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulDescDestroy(cublasLtMatmulDesc_t matmulDesc); + +/** Set matmul operation descriptor attribute. + * + * \param[in] matmulDesc The descriptor + * \param[in] attr The attribute + * \param[in] buf memory address containing the new value + * \param[in] sizeInBytes size of buf buffer for verification (in bytes) + * + * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for + * selected attribute + * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulDescSetAttribute( // + cublasLtMatmulDesc_t matmulDesc, + cublasLtMatmulDescAttributes_t attr, + const void* buf, + size_t sizeInBytes); + +/** Get matmul operation descriptor attribute. + * + * \param[in] matmulDesc The descriptor + * \param[in] attr The attribute + * \param[out] buf memory address containing the new value + * \param[in] sizeInBytes size of buf buffer for verification (in bytes) + * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of + * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents + * + * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero + * and buf is NULL or sizeInBytes doesn't match size of internal storage for + * selected attribute + * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulDescGetAttribute( // + cublasLtMatmulDesc_t matmulDesc, + cublasLtMatmulDescAttributes_t attr, + void* buf, + size_t sizeInBytes, + size_t* sizeWritten); + +/* ---------------------------------------------------------------------------------------*/ +/* Helper functions for cublasLtMatrixTransformDesc_t */ +/* ---------------------------------------------------------------------------------------*/ + +/** Matrix transform descriptor attributes to define details of the operation. + */ +typedef enum { + /** Scale type, see cudaDataType. Inputs are converted to scale type for scaling and summation and results are then + * converted to output type to store in memory. + * + * int32_t + */ + CUBLASLT_MATRIX_TRANSFORM_DESC_SCALE_TYPE, + + /** Pointer mode of alpha and beta, see cublasLtPointerMode_t. + * + * int32_t, default: CUBLASLT_POINTER_MODE_HOST + */ + CUBLASLT_MATRIX_TRANSFORM_DESC_POINTER_MODE, + + /** Transform of matrix A, see cublasOperation_t. + * + * int32_t, default: CUBLAS_OP_N + */ + CUBLASLT_MATRIX_TRANSFORM_DESC_TRANSA, + + /** Transform of matrix B, see cublasOperation_t. + * + * int32_t, default: CUBLAS_OP_N + */ + CUBLASLT_MATRIX_TRANSFORM_DESC_TRANSB, +} cublasLtMatrixTransformDescAttributes_t; + +/** Internal. Do not use directly. + */ +cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescInit_internal(cublasLtMatrixTransformDesc_t transformDesc, + size_t size, + cudaDataType scaleType); + +/** Initialize matrix transform operation descriptor in pre-allocated space. + * + * \retval CUBLAS_STATUS_ALLOC_FAILED if size of the pre-allocated space is insufficient + * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully + */ +static inline cublasStatus_t cublasLtMatrixTransformDescInit(cublasLtMatrixTransformDesc_t transformDesc, + cudaDataType scaleType) { + return cublasLtMatrixTransformDescInit_internal(transformDesc, sizeof(*transformDesc), scaleType); +} + +/** Create new matrix transform operation descriptor. + * + * \retval CUBLAS_STATUS_ALLOC_FAILED if memory could not be allocated + * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescCreate(cublasLtMatrixTransformDesc_t* transformDesc, + cudaDataType scaleType); + +/** Destroy matrix transform operation descriptor. + * + * \retval CUBLAS_STATUS_SUCCESS if operation was successful + */ +cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescDestroy(cublasLtMatrixTransformDesc_t transformDesc); + +/** Set matrix transform operation descriptor attribute. + * + * \param[in] transformDesc The descriptor + * \param[in] attr The attribute + * \param[in] buf memory address containing the new value + * \param[in] sizeInBytes size of buf buffer for verification (in bytes) + * + * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for + * selected attribute + * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescSetAttribute( // + cublasLtMatrixTransformDesc_t transformDesc, + cublasLtMatrixTransformDescAttributes_t attr, + const void* buf, + size_t sizeInBytes); + +/** Get matrix transform operation descriptor attribute. + * + * \param[in] transformDesc The descriptor + * \param[in] attr The attribute + * \param[out] buf memory address containing the new value + * \param[in] sizeInBytes size of buf buffer for verification (in bytes) + * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number + * of bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents + * + * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero + * and buf is NULL or sizeInBytes doesn't match size of internal storage for + * selected attribute + * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory + */ +cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescGetAttribute( // + cublasLtMatrixTransformDesc_t transformDesc, + cublasLtMatrixTransformDescAttributes_t attr, + void* buf, + size_t sizeInBytes, + size_t* sizeWritten); + +/** Reduction scheme for portions of the dot-product calculated in parallel (a. k. a. "split - K"). + */ +typedef enum { + /** No reduction scheme, dot-product shall be performed in one sequence. + */ + CUBLASLT_REDUCTION_SCHEME_NONE = 0, + + /** Reduction is performed "in place" - using the output buffer (and output data type) and counters (in workspace) to + * guarantee the sequentiality. + */ + CUBLASLT_REDUCTION_SCHEME_INPLACE = 1, + + /** Intermediate results are stored in compute type in the workspace and reduced in a separate step. + */ + CUBLASLT_REDUCTION_SCHEME_COMPUTE_TYPE = 2, + + /** Intermediate results are stored in output type in the workspace and reduced in a separate step. + */ + CUBLASLT_REDUCTION_SCHEME_OUTPUT_TYPE = 4, + + CUBLASLT_REDUCTION_SCHEME_MASK = 0x7, +} cublasLtReductionScheme_t; + +/** Postprocessing options for the epilogue + */ +typedef enum { + /** No special postprocessing, just scale and quantize results if necessary. + */ + CUBLASLT_EPILOGUE_DEFAULT = 1, + + /** ReLu, apply ReLu point-wise transform to the results (x:=max(x, 0)). + */ + CUBLASLT_EPILOGUE_RELU = 2, + + /** ReLu, apply ReLu point-wise transform to the results (x:=max(x, 0)). + * + * This epilogue mode produces an extra output, a ReLu bit-mask matrix, + * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER. + */ + CUBLASLT_EPILOGUE_RELU_AUX = (CUBLASLT_EPILOGUE_RELU | 128), + + /** Bias, apply (broadcasted) Bias from bias vector. Bias vector length must match matrix D rows, it must be packed + * (stride between vector elements is 1). Bias vector is broadcasted to all columns and added before applying final + * postprocessing. + */ + CUBLASLT_EPILOGUE_BIAS = 4, + + /** ReLu and Bias, apply Bias and then ReLu transform + */ + CUBLASLT_EPILOGUE_RELU_BIAS = (CUBLASLT_EPILOGUE_RELU | CUBLASLT_EPILOGUE_BIAS), + + /** ReLu and Bias, apply Bias and then ReLu transform + * + * This epilogue mode produces an extra output, a ReLu bit-mask matrix, + * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER. + */ + CUBLASLT_EPILOGUE_RELU_AUX_BIAS = (CUBLASLT_EPILOGUE_RELU_AUX | CUBLASLT_EPILOGUE_BIAS), + + /* ReLu gradient. Apply ReLu gradient to matmul output. Store ReLu gradient in the output matrix. + * + * This epilogue mode requires an extra input, + * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER. + */ + CUBLASLT_EPILOGUE_DRELU = 8 | 128, + + /* ReLu and Bias gradients. Apply independently ReLu and Bias gradient to + * matmul output. Store ReLu gradient in the output matrix, and Bias gradient + * in the auxiliary output (see CUBLASLT_MATMUL_DESC_BIAS_POINTER). + * + * This epilogue mode requires an extra input, + * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER. + */ + CUBLASLT_EPILOGUE_DRELU_BGRAD = CUBLASLT_EPILOGUE_DRELU | 16, + + /** GELU, apply GELU point-wise transform to the results (x:=GELU(x)). + */ + CUBLASLT_EPILOGUE_GELU = 32, + + /** GELU, apply GELU point-wise transform to the results (x:=GELU(x)). + * + * This epilogue mode outputs GELU input as a separate matrix (useful for training). + * See CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER. + */ + CUBLASLT_EPILOGUE_GELU_AUX = (CUBLASLT_EPILOGUE_GELU | 128), + + /** GELU and Bias, apply Bias and then GELU transform + */ + CUBLASLT_EPILOGUE_GELU_BIAS = (CUBLASLT_EPILOGUE_GELU | CUBLASLT_EPILOGUE_BIAS), + + /** GELU and Bias, apply Bias and then GELU transform + * + * This epilogue mode outputs GELU input as a separate matrix (useful for training). + * See CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER. + */ + CUBLASLT_EPILOGUE_GELU_AUX_BIAS = (CUBLASLT_EPILOGUE_GELU_AUX | CUBLASLT_EPILOGUE_BIAS), + + /* GELU gradient. Apply GELU gradient to matmul output. Store GELU gradient in the output matrix. + * + * This epilogue mode requires an extra input, + * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER. + */ + CUBLASLT_EPILOGUE_DGELU = 64 | 128, + + /* GELU and Bias gradients. Apply independently GELU and Bias gradient to + * matmul output. Store GELU gradient in the output matrix, and Bias gradient + * in the auxiliary output (see CUBLASLT_MATMUL_DESC_BIAS_POINTER). + * + * This epilogue mode requires an extra input, + * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER. + */ + CUBLASLT_EPILOGUE_DGELU_BGRAD = CUBLASLT_EPILOGUE_DGELU | 16, + + /** Bias gradient based on the input matrix A. + * + * The bias size corresponds to the number of rows of the matrix D. + * The reduction happens over the GEMM's "k" dimension. + * + * Stores Bias gradient in the auxiliary output + * (see CUBLASLT_MATMUL_DESC_BIAS_POINTER). + */ + CUBLASLT_EPILOGUE_BGRADA = 256, + + /** Bias gradient based on the input matrix B. + * + * The bias size corresponds to the number of columns of the matrix D. + * The reduction happens over the GEMM's "k" dimension. + * + * Stores Bias gradient in the auxiliary output + * (see CUBLASLT_MATMUL_DESC_BIAS_POINTER). + */ + CUBLASLT_EPILOGUE_BGRADB = 512, +} cublasLtEpilogue_t; + +/** Matmul heuristic search mode + */ +typedef enum { + /** ask heuristics for best algo for given usecase + */ + CUBLASLT_SEARCH_BEST_FIT = 0, + /** only try to find best config for preconfigured algo id + */ + CUBLASLT_SEARCH_LIMITED_BY_ALGO_ID = 1, + /** reserved for future use + */ + CUBLASLT_SEARCH_RESERVED_02 = 2, + /** reserved for future use + */ + CUBLASLT_SEARCH_RESERVED_03 = 3, + /** reserved for future use + */ + CUBLASLT_SEARCH_RESERVED_04 = 4, + /** reserved for future use + */ + CUBLASLT_SEARCH_RESERVED_05 = 5, +} cublasLtMatmulSearch_t; + +/** Algo search preference to fine tune the heuristic function. */ +typedef enum { + /** Search mode, see cublasLtMatmulSearch_t. + * + * uint32_t, default: CUBLASLT_SEARCH_BEST_FIT + */ + CUBLASLT_MATMUL_PREF_SEARCH_MODE = 0, + + /** Maximum allowed workspace size in bytes. + * + * uint64_t, default: 0 - no workspace allowed + */ + CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES = 1, + + /** Reduction scheme mask, see cublasLtReductionScheme_t. Filters heuristic result to only include algo configs that + * use one of the required modes. + * + * E.g. mask value of 0x03 will allow only INPLACE and COMPUTE_TYPE reduction schemes. + * + * uint32_t, default: CUBLASLT_REDUCTION_SCHEME_MASK (allows all reduction schemes) + */ + CUBLASLT_MATMUL_PREF_REDUCTION_SCHEME_MASK = 3, + + /** Minimum buffer alignment for matrix A (in bytes). + * + * Selecting a smaller value will exclude algorithms that can not work with matrix A that is not as strictly aligned + * as they need. + * + * uint32_t, default: 256 + */ + CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_A_BYTES = 5, + + /** Minimum buffer alignment for matrix B (in bytes). + * + * Selecting a smaller value will exclude algorithms that can not work with matrix B that is not as strictly aligned + * as they need. + * + * uint32_t, default: 256 + */ + CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_B_BYTES = 6, + + /** Minimum buffer alignment for matrix C (in bytes). + * + * Selecting a smaller value will exclude algorithms that can not work with matrix C that is not as strictly aligned + * as they need. + * + * uint32_t, default: 256 + */ + CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_C_BYTES = 7, + + /** Minimum buffer alignment for matrix D (in bytes). + * + * Selecting a smaller value will exclude algorithms that can not work with matrix D that is not as strictly aligned + * as they need. + * + * uint32_t, default: 256 + */ + CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_D_BYTES = 8, + + /** Maximum wave count. + * + * See cublasLtMatmulHeuristicResult_t::wavesCount. + * + * Selecting a non-zero value will exclude algorithms that report device utilization higher than specified. + * + * float, default: 0.0f + */ + CUBLASLT_MATMUL_PREF_MAX_WAVES_COUNT = 9, + + /** Numerical implementation details mask, see cublasLtNumericalImplFlags_t. Filters heuristic result to only include + * algorithms that use the allowed implementations. + * + * uint64_t, default: uint64_t(-1) (allow everything) + */ + CUBLASLT_MATMUL_PREF_IMPL_MASK = 12, +} cublasLtMatmulPreferenceAttributes_t; + +/** Internal. Do not use directly. + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceInit_internal(cublasLtMatmulPreference_t pref, size_t size); + +/** Initialize matmul heuristic search preference descriptor in pre-allocated space. + * + * \retval CUBLAS_STATUS_ALLOC_FAILED if size of the pre-allocated space is insufficient + * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully + */ +static inline cublasStatus_t cublasLtMatmulPreferenceInit(cublasLtMatmulPreference_t pref) { + return cublasLtMatmulPreferenceInit_internal(pref, sizeof(*pref)); +} + +/** Create new matmul heuristic search preference descriptor. + * + * \retval CUBLAS_STATUS_ALLOC_FAILED if memory could not be allocated + * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceCreate(cublasLtMatmulPreference_t* pref); + +/** Destroy matmul heuristic search preference descriptor. + * + * \retval CUBLAS_STATUS_SUCCESS if operation was successful + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceDestroy(cublasLtMatmulPreference_t pref); + +/** Set matmul heuristic search preference descriptor attribute. + * + * \param[in] pref The descriptor + * \param[in] attr The attribute + * \param[in] buf memory address containing the new value + * \param[in] sizeInBytes size of buf buffer for verification (in bytes) + * + * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for + * selected attribute + * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceSetAttribute( // + cublasLtMatmulPreference_t pref, + cublasLtMatmulPreferenceAttributes_t attr, + const void* buf, + size_t sizeInBytes); + +/** Get matmul heuristic search preference descriptor attribute. + * + * \param[in] pref The descriptor + * \param[in] attr The attribute + * \param[out] buf memory address containing the new value + * \param[in] sizeInBytes size of buf buffer for verification (in bytes) + * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of + * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents + * + * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero + * and buf is NULL or sizeInBytes doesn't match size of internal storage for + * selected attribute + * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceGetAttribute( // + cublasLtMatmulPreference_t pref, + cublasLtMatmulPreferenceAttributes_t attr, + void* buf, + size_t sizeInBytes, + size_t* sizeWritten); + +/** Results structure used by cublasLtMatmulGetAlgo. + * + * Holds returned configured algo descriptor and its runtime properties. + */ +typedef struct { + /** Matmul algorithm descriptor. + * + * Must be initialized with cublasLtMatmulAlgoInit() if preferences' CUBLASLT_MATMUL_PERF_SEARCH_MODE is set to + * CUBLASLT_SEARCH_LIMITED_BY_ALGO_ID + */ + cublasLtMatmulAlgo_t algo; + + /** Actual size of workspace memory required. + */ + size_t workspaceSize; + + /** Result status, other fields are only valid if after call to cublasLtMatmulAlgoGetHeuristic() this member is set to + * CUBLAS_STATUS_SUCCESS. + */ + cublasStatus_t state; + + /** Waves count - a device utilization metric. + * + * wavesCount value of 1.0f suggests that when kernel is launched it will fully occupy the GPU. + */ + float wavesCount; + + int reserved[4]; +} cublasLtMatmulHeuristicResult_t; + +/** Query cublasLt heuristic for algorithm appropriate for given use case. + * + * \param[in] lightHandle Pointer to the allocated cuBLASLt handle for the cuBLASLt + * context. See cublasLtHandle_t. + * \param[in] operationDesc Handle to the matrix multiplication descriptor. + * \param[in] Adesc Handle to the layout descriptors for matrix A. + * \param[in] Bdesc Handle to the layout descriptors for matrix B. + * \param[in] Cdesc Handle to the layout descriptors for matrix C. + * \param[in] Ddesc Handle to the layout descriptors for matrix D. + * \param[in] preference Pointer to the structure holding the heuristic search + * preferences descriptor. See cublasLtMatrixLayout_t. + * \param[in] requestedAlgoCount Size of heuristicResultsArray (in elements) and requested + * maximum number of algorithms to return. + * \param[in, out] heuristicResultsArray Output algorithms and associated runtime characteristics, + * ordered in increasing estimated compute time. + * \param[out] returnAlgoCount The number of heuristicResultsArray elements written. + * + * \retval CUBLAS_STATUS_INVALID_VALUE if requestedAlgoCount is less or equal to zero + * \retval CUBLAS_STATUS_NOT_SUPPORTED if no heuristic function available for current configuration + * \retval CUBLAS_STATUS_SUCCESS if query was successful, inspect + * heuristicResultsArray[0 to (returnAlgoCount - 1)].state + * for detail status of results + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoGetHeuristic(cublasLtHandle_t lightHandle, + cublasLtMatmulDesc_t operationDesc, + cublasLtMatrixLayout_t Adesc, + cublasLtMatrixLayout_t Bdesc, + cublasLtMatrixLayout_t Cdesc, + cublasLtMatrixLayout_t Ddesc, + cublasLtMatmulPreference_t preference, + int requestedAlgoCount, + cublasLtMatmulHeuristicResult_t heuristicResultsArray[], + int* returnAlgoCount); + +/* ---------------------------------------------------------------------------------------*/ +/* Lower level API to be able to implement own Heuristic and Find routines */ +/* ---------------------------------------------------------------------------------------*/ + +/** Routine to get all algo IDs that can potentially run + * + * \param[in] int requestedAlgoCount requested number of algos (must be less or equal to size of algoIdsA + * (in elements)) \param[out] algoIdsA array to write algoIds to \param[out] returnAlgoCount number of algoIds + * actually written + * + * \retval CUBLAS_STATUS_INVALID_VALUE if requestedAlgoCount is less or equal to zero + * \retval CUBLAS_STATUS_SUCCESS if query was successful, inspect returnAlgoCount to get actual number of IDs + * available + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoGetIds(cublasLtHandle_t lightHandle, + cublasComputeType_t computeType, + cudaDataType_t scaleType, + cudaDataType_t Atype, + cudaDataType_t Btype, + cudaDataType_t Ctype, + cudaDataType_t Dtype, + int requestedAlgoCount, + int algoIdsArray[], + int* returnAlgoCount); + +/** Initialize algo structure + * + * \retval CUBLAS_STATUS_INVALID_VALUE if algo is NULL or algoId is outside of recognized range + * \retval CUBLAS_STATUS_NOT_SUPPORTED if algoId is not supported for given combination of data types + * \retval CUBLAS_STATUS_SUCCESS if the structure was successfully initialized + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoInit(cublasLtHandle_t lightHandle, + cublasComputeType_t computeType, + cudaDataType_t scaleType, + cudaDataType_t Atype, + cudaDataType_t Btype, + cudaDataType_t Ctype, + cudaDataType_t Dtype, + int algoId, + cublasLtMatmulAlgo_t* algo); + +/** Check configured algo descriptor for correctness and support on current device. + * + * Result includes required workspace size and calculated wave count. + * + * CUBLAS_STATUS_SUCCESS doesn't fully guarantee algo will run (will fail if e.g. buffers are not correctly aligned); + * but if cublasLtMatmulAlgoCheck fails, the algo will not run. + * + * \param[in] algo algo configuration to check + * \param[out] result result structure to report algo runtime characteristics; algo field is never updated + * + * \retval CUBLAS_STATUS_INVALID_VALUE if matrix layout descriptors or operation descriptor don't match algo + * descriptor + * \retval CUBLAS_STATUS_NOT_SUPPORTED if algo configuration or data type combination is not currently supported on + * given device + * \retval CUBLAS_STATUS_ARCH_MISMATCH if algo configuration cannot be run using the selected device + * \retval CUBLAS_STATUS_SUCCESS if check was successful + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoCheck( // + cublasLtHandle_t lightHandle, + cublasLtMatmulDesc_t operationDesc, + cublasLtMatrixLayout_t Adesc, + cublasLtMatrixLayout_t Bdesc, + cublasLtMatrixLayout_t Cdesc, + cublasLtMatrixLayout_t Ddesc, + const cublasLtMatmulAlgo_t* algo, ///< may point to result->algo + cublasLtMatmulHeuristicResult_t* result); + +/** Capabilities Attributes that can be retrieved from an initialized Algo structure + */ +typedef enum { + /** support for split K, see CUBLASLT_ALGO_CONFIG_SPLITK_NUM + * + * int32_t, 0 means no support, supported otherwise + */ + CUBLASLT_ALGO_CAP_SPLITK_SUPPORT = 0, + + /** reduction scheme mask, see cublasLtReductionScheme_t; shows supported reduction schemes, if reduction scheme is + * not masked out it is supported. + * + * e.g. int isReductionSchemeComputeTypeSupported ? (reductionSchemeMask & CUBLASLT_REDUCTION_SCHEME_COMPUTE_TYPE) == + * CUBLASLT_REDUCTION_SCHEME_COMPUTE_TYPE ? 1 : 0; + * + * uint32_t + */ + CUBLASLT_ALGO_CAP_REDUCTION_SCHEME_MASK = 1, + + /** support for cta swizzling, see CUBLASLT_ALGO_CONFIG_CTA_SWIZZLING + * + * uint32_t, 0 means no support, 1 means supported value of 1, other values are reserved + */ + CUBLASLT_ALGO_CAP_CTA_SWIZZLING_SUPPORT = 2, + + /** support strided batch + * + * int32_t, 0 means no support, supported otherwise + */ + CUBLASLT_ALGO_CAP_STRIDED_BATCH_SUPPORT = 3, + + /** support results out of place (D != C in D = alpha.A.B + beta.C) + * + * int32_t, 0 means no support, supported otherwise + */ + CUBLASLT_ALGO_CAP_OUT_OF_PLACE_RESULT_SUPPORT = 4, + + /** syrk/herk support (on top of regular gemm) + * + * int32_t, 0 means no support, supported otherwise + */ + CUBLASLT_ALGO_CAP_UPLO_SUPPORT = 5, + + /** tile ids possible to use, see cublasLtMatmulTile_t; if no tile ids are supported use + * CUBLASLT_MATMUL_TILE_UNDEFINED + * + * use cublasLtMatmulAlgoCapGetAttribute() with sizeInBytes=0 to query actual count + * + * array of uint32_t + */ + CUBLASLT_ALGO_CAP_TILE_IDS = 6, + + /** custom option range is from 0 to CUBLASLT_ALGO_CAP_CUSTOM_OPTION_MAX (inclusive), see + * CUBLASLT_ALGO_CONFIG_CUSTOM_OPTION + * + * int32_t + */ + CUBLASLT_ALGO_CAP_CUSTOM_OPTION_MAX = 7, + + /** whether algorithm supports custom (not COL or ROW memory order), see cublasLtOrder_t + * + * int32_t 0 means only COL and ROW memory order is allowed, non-zero means that algo might have different + * requirements; + */ + CUBLASLT_ALGO_CAP_CUSTOM_MEMORY_ORDER = 10, + + /** bitmask enumerating pointer modes algorithm supports + * + * uint32_t, see cublasLtPointerModeMask_t + */ + CUBLASLT_ALGO_CAP_POINTER_MODE_MASK = 11, + + /** bitmask enumerating kinds of postprocessing algorithm supports in the epilogue + * + * uint32_t, see cublasLtEpilogue_t + */ + CUBLASLT_ALGO_CAP_EPILOGUE_MASK = 12, + + /** stages ids possible to use, see cublasLtMatmulStages_t; if no stages ids are supported use + * CUBLASLT_MATMUL_STAGES_UNDEFINED + * + * use cublasLtMatmulAlgoCapGetAttribute() with sizeInBytes=0 to query actual count + * + * array of uint32_t + */ + CUBLASLT_ALGO_CAP_STAGES_IDS = 13, + + /** support for nagative ld for all of the matrices + * + * int32_t 0 means no support, supported otherwise + */ + CUBLASLT_ALGO_CAP_LD_NEGATIVE = 14, + + /** details about algorithm's implementation that affect it's numerical behavior + * + * uint64_t, see cublasLtNumericalImplFlags_t + */ + CUBLASLT_ALGO_CAP_NUMERICAL_IMPL_FLAGS = 15, + + /** minimum alignment required for A matrix in bytes + * (required for buffer pointer, leading dimension, and possibly other strides defined for matrix memory order) + * + * uint32_t + */ + CUBLASLT_ALGO_CAP_MIN_ALIGNMENT_A_BYTES = 16, + + /** minimum alignment required for B matrix in bytes + * (required for buffer pointer, leading dimension, and possibly other strides defined for matrix memory order) + * + * uint32_t + */ + CUBLASLT_ALGO_CAP_MIN_ALIGNMENT_B_BYTES = 17, + + /** minimum alignment required for C matrix in bytes + * (required for buffer pointer, leading dimension, and possibly other strides defined for matrix memory order) + * + * uint32_t + */ + CUBLASLT_ALGO_CAP_MIN_ALIGNMENT_C_BYTES = 18, + + /** minimum alignment required for D matrix in bytes + * (required for buffer pointer, leading dimension, and possibly other strides defined for matrix memory order) + * + * uint32_t + */ + CUBLASLT_ALGO_CAP_MIN_ALIGNMENT_D_BYTES = 19, +} cublasLtMatmulAlgoCapAttributes_t; + +/** Get algo capability attribute. + * + * E.g. to get list of supported Tile IDs: + * cublasLtMatmulTile_t tiles[CUBLASLT_MATMUL_TILE_END]; + * size_t num_tiles, size_written; + * if (cublasLtMatmulAlgoCapGetAttribute(algo, CUBLASLT_ALGO_CAP_TILE_IDS, tiles, sizeof(tiles), size_written) == + * CUBLAS_STATUS_SUCCESS) { num_tiles = size_written / sizeof(tiles[0]); + * } + * + * \param[in] algo The algo descriptor + * \param[in] attr The attribute + * \param[out] buf memory address containing the new value + * \param[in] sizeInBytes size of buf buffer for verification (in bytes) + * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of + * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents + * + * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero + * and buf is NULL or sizeInBytes doesn't match size of internal storage for + * selected attribute + * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoCapGetAttribute(const cublasLtMatmulAlgo_t* algo, + cublasLtMatmulAlgoCapAttributes_t attr, + void* buf, + size_t sizeInBytes, + size_t* sizeWritten); + +/** Algo Configuration Attributes that can be set according to the Algo capabilities + */ +typedef enum { + /** algorithm index, see cublasLtMatmulAlgoGetIds() + * + * readonly, set by cublasLtMatmulAlgoInit() + * int32_t + */ + CUBLASLT_ALGO_CONFIG_ID = 0, + /** tile id, see cublasLtMatmulTile_t + * + * uint32_t, default: CUBLASLT_MATMUL_TILE_UNDEFINED + */ + CUBLASLT_ALGO_CONFIG_TILE_ID = 1, + /** Number of K splits. If the number of K splits is greater than one, SPLITK_NUM parts + * of matrix multiplication will be computed in parallel. The results will be accumulated + * according to CUBLASLT_ALGO_CONFIG_REDUCTION_SCHEME + * + * int32_t, default: 1 + */ + CUBLASLT_ALGO_CONFIG_SPLITK_NUM = 2, + /** reduction scheme, see cublasLtReductionScheme_t + * + * uint32_t, default: CUBLASLT_REDUCTION_SCHEME_NONE + */ + CUBLASLT_ALGO_CONFIG_REDUCTION_SCHEME = 3, + /** cta swizzling, change mapping from CUDA grid coordinates to parts of the matrices + * + * possible values: 0, 1, other values reserved + * + * uint32_t, default: 0 + */ + CUBLASLT_ALGO_CONFIG_CTA_SWIZZLING = 4, + /** custom option, each algorithm can support some custom options that don't fit description of the other config + * attributes, see CUBLASLT_ALGO_CAP_CUSTOM_OPTION_MAX to get accepted range for any specific case + * + * uint32_t, default: 0 + */ + CUBLASLT_ALGO_CONFIG_CUSTOM_OPTION = 5, + /** stages id, see cublasLtMatmulStages_t + * + * uint32_t, default: CUBLASLT_MATMUL_STAGES_UNDEFINED + */ + CUBLASLT_ALGO_CONFIG_STAGES_ID = 6, + /** inner shape id, see cublasLtMatmulInnerShape_t + * + * uint16_t, default: 0 (CUBLASLT_MATMUL_INNER_SHAPE_UNDEFINED) + */ + CUBLASLT_ALGO_CONFIG_INNER_SHAPE_ID = 7, + /** Thread Block Cluster shape id, see cublasLtClusterShape_t. Defines cluster size to use. + * + * uint16_t, default: 0 (CUBLASLT_CLUSTER_SHAPE_AUTO) + */ + CUBLASLT_ALGO_CONFIG_CLUSTER_SHAPE_ID = 8, +} cublasLtMatmulAlgoConfigAttributes_t; + +/** Set algo configuration attribute. + * + * \param[in] algo The algo descriptor + * \param[in] attr The attribute + * \param[in] buf memory address containing the new value + * \param[in] sizeInBytes size of buf buffer for verification (in bytes) + * + * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for + * selected attribute + * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoConfigSetAttribute(cublasLtMatmulAlgo_t* algo, + cublasLtMatmulAlgoConfigAttributes_t attr, + const void* buf, + size_t sizeInBytes); + +/** Get algo configuration attribute. + * + * \param[in] algo The algo descriptor + * \param[in] attr The attribute + * \param[out] buf memory address containing the new value + * \param[in] sizeInBytes size of buf buffer for verification (in bytes) + * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of + * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents + * + * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero + * and buf is NULL or sizeInBytes doesn't match size of internal storage for + * selected attribute + * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoConfigGetAttribute(const cublasLtMatmulAlgo_t* algo, + cublasLtMatmulAlgoConfigAttributes_t attr, + void* buf, + size_t sizeInBytes, + size_t* sizeWritten); + +/** Experimental: Logger callback type. + */ +typedef void (*cublasLtLoggerCallback_t)(int logLevel, const char* functionName, const char* message); + +/** Experimental: Logger callback setter. + * + * \param[in] callback a user defined callback function to be called by the logger + * + * \retval CUBLAS_STATUS_SUCCESS if callback was set successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtLoggerSetCallback(cublasLtLoggerCallback_t callback); + +/** Experimental: Log file setter. + * + * \param[in] file an open file with write permissions + * + * \retval CUBLAS_STATUS_SUCCESS if log file was set successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtLoggerSetFile(FILE* file); + +/** Experimental: Open log file. + * + * \param[in] logFile log file path. if the log file does not exist, it will be created + * + * \retval CUBLAS_STATUS_SUCCESS if log file was created successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtLoggerOpenFile(const char* logFile); + +/** Experimental: Log level setter. + * + * \param[in] level log level, should be one of the following: + * 0. Off + * 1. Errors + * 2. Performance Trace + * 3. Performance Hints + * 4. Heuristics Trace + * 5. API Trace + * + * \retval CUBLAS_STATUS_INVALID_VALUE if log level is not one of the above levels + * + * \retval CUBLAS_STATUS_SUCCESS if log level was set successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtLoggerSetLevel(int level); + +/** Experimental: Log mask setter. + * + * \param[in] mask log mask, should be a combination of the following masks: + * 0. Off + * 1. Errors + * 2. Performance Trace + * 4. Performance Hints + * 8. Heuristics Trace + * 16. API Trace + * + * \retval CUBLAS_STATUS_SUCCESS if log mask was set successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtLoggerSetMask(int mask); + +/** Experimental: Disable logging for the entire session. + * + * \retval CUBLAS_STATUS_SUCCESS if disabled logging + */ +cublasStatus_t CUBLASWINAPI cublasLtLoggerForceDisable(); + +#if defined(__cplusplus) +} +#endif /* __cplusplus */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cublas/include/cublasXt.h b/venv/lib/python3.10/site-packages/nvidia/cublas/include/cublasXt.h new file mode 100644 index 0000000000000000000000000000000000000000..fe0e6f99b952514874c45208e751f5330e71570c --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cublas/include/cublasXt.h @@ -0,0 +1,693 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* cublasXt : Host API, Out of Core and Multi-GPU BLAS Library + +*/ + +#if !defined(CUBLAS_XT_H_) +#define CUBLAS_XT_H_ + +#include "driver_types.h" +#include "cuComplex.h" /* import complex data type */ + +#include "cublas_v2.h" + +#if defined(__cplusplus) +extern "C" { +#endif /* __cplusplus */ + +struct cublasXtContext; +typedef struct cublasXtContext* cublasXtHandle_t; + +cublasStatus_t CUBLASWINAPI cublasXtCreate(cublasXtHandle_t* handle); +cublasStatus_t CUBLASWINAPI cublasXtDestroy(cublasXtHandle_t handle); +cublasStatus_t CUBLASWINAPI cublasXtGetNumBoards(int nbDevices, int deviceId[], int* nbBoards); +cublasStatus_t CUBLASWINAPI cublasXtMaxBoards(int* nbGpuBoards); +/* This routine selects the Gpus that the user want to use for CUBLAS-XT */ +cublasStatus_t CUBLASWINAPI cublasXtDeviceSelect(cublasXtHandle_t handle, int nbDevices, int deviceId[]); + +/* This routine allows to change the dimension of the tiles ( blockDim x blockDim ) */ +cublasStatus_t CUBLASWINAPI cublasXtSetBlockDim(cublasXtHandle_t handle, int blockDim); +cublasStatus_t CUBLASWINAPI cublasXtGetBlockDim(cublasXtHandle_t handle, int* blockDim); + +typedef enum { CUBLASXT_PINNING_DISABLED = 0, CUBLASXT_PINNING_ENABLED = 1 } cublasXtPinnedMemMode_t; +/* This routine allows to CUBLAS-XT to pin the Host memory if it find out that some of the matrix passed + are not pinned : Pinning/Unpinning the Host memory is still a costly operation + It is better if the user controls the memory on its own (by pinning/unpinning oly when necessary) +*/ +cublasStatus_t CUBLASWINAPI cublasXtGetPinningMemMode(cublasXtHandle_t handle, cublasXtPinnedMemMode_t* mode); +cublasStatus_t CUBLASWINAPI cublasXtSetPinningMemMode(cublasXtHandle_t handle, cublasXtPinnedMemMode_t mode); + +/* This routines is to provide a CPU Blas routines, used for too small sizes or hybrid computation */ +typedef enum { + CUBLASXT_FLOAT = 0, + CUBLASXT_DOUBLE = 1, + CUBLASXT_COMPLEX = 2, + CUBLASXT_DOUBLECOMPLEX = 3, +} cublasXtOpType_t; + +typedef enum { + CUBLASXT_GEMM = 0, + CUBLASXT_SYRK = 1, + CUBLASXT_HERK = 2, + CUBLASXT_SYMM = 3, + CUBLASXT_HEMM = 4, + CUBLASXT_TRSM = 5, + CUBLASXT_SYR2K = 6, + CUBLASXT_HER2K = 7, + + CUBLASXT_SPMM = 8, + CUBLASXT_SYRKX = 9, + CUBLASXT_HERKX = 10, + CUBLASXT_TRMM = 11, + CUBLASXT_ROUTINE_MAX = 12, +} cublasXtBlasOp_t; + +/* Currently only 32-bit integer BLAS routines are supported */ +cublasStatus_t CUBLASWINAPI cublasXtSetCpuRoutine(cublasXtHandle_t handle, + cublasXtBlasOp_t blasOp, + cublasXtOpType_t type, + void* blasFunctor); + +/* Specified the percentage of work that should done by the CPU, default is 0 (no work) */ +cublasStatus_t CUBLASWINAPI cublasXtSetCpuRatio(cublasXtHandle_t handle, + cublasXtBlasOp_t blasOp, + cublasXtOpType_t type, + float ratio); + +/* GEMM */ +cublasStatus_t CUBLASWINAPI cublasXtSgemm(cublasXtHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + size_t m, + size_t n, + size_t k, + const float* alpha, + const float* A, + size_t lda, + const float* B, + size_t ldb, + const float* beta, + float* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtDgemm(cublasXtHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + size_t m, + size_t n, + size_t k, + const double* alpha, + const double* A, + size_t lda, + const double* B, + size_t ldb, + const double* beta, + double* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtCgemm(cublasXtHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + size_t m, + size_t n, + size_t k, + const cuComplex* alpha, + const cuComplex* A, + size_t lda, + const cuComplex* B, + size_t ldb, + const cuComplex* beta, + cuComplex* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtZgemm(cublasXtHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + size_t m, + size_t n, + size_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + size_t lda, + const cuDoubleComplex* B, + size_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + size_t ldc); +/* ------------------------------------------------------- */ +/* SYRK */ +cublasStatus_t CUBLASWINAPI cublasXtSsyrk(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const float* alpha, + const float* A, + size_t lda, + const float* beta, + float* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtDsyrk(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const double* alpha, + const double* A, + size_t lda, + const double* beta, + double* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtCsyrk(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const cuComplex* alpha, + const cuComplex* A, + size_t lda, + const cuComplex* beta, + cuComplex* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtZsyrk(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + size_t lda, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + size_t ldc); +/* -------------------------------------------------------------------- */ +/* HERK */ +cublasStatus_t CUBLASWINAPI cublasXtCherk(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const float* alpha, + const cuComplex* A, + size_t lda, + const float* beta, + cuComplex* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtZherk(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const double* alpha, + const cuDoubleComplex* A, + size_t lda, + const double* beta, + cuDoubleComplex* C, + size_t ldc); +/* -------------------------------------------------------------------- */ +/* SYR2K */ +cublasStatus_t CUBLASWINAPI cublasXtSsyr2k(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const float* alpha, + const float* A, + size_t lda, + const float* B, + size_t ldb, + const float* beta, + float* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtDsyr2k(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const double* alpha, + const double* A, + size_t lda, + const double* B, + size_t ldb, + const double* beta, + double* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtCsyr2k(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const cuComplex* alpha, + const cuComplex* A, + size_t lda, + const cuComplex* B, + size_t ldb, + const cuComplex* beta, + cuComplex* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtZsyr2k(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + size_t lda, + const cuDoubleComplex* B, + size_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + size_t ldc); +/* -------------------------------------------------------------------- */ +/* HERKX : variant extension of HERK */ +cublasStatus_t CUBLASWINAPI cublasXtCherkx(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const cuComplex* alpha, + const cuComplex* A, + size_t lda, + const cuComplex* B, + size_t ldb, + const float* beta, + cuComplex* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtZherkx(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + size_t lda, + const cuDoubleComplex* B, + size_t ldb, + const double* beta, + cuDoubleComplex* C, + size_t ldc); + +/* -------------------------------------------------------------------- */ +/* TRSM */ +cublasStatus_t CUBLASWINAPI cublasXtStrsm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + size_t m, + size_t n, + const float* alpha, + const float* A, + size_t lda, + float* B, + size_t ldb); + +cublasStatus_t CUBLASWINAPI cublasXtDtrsm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + size_t m, + size_t n, + const double* alpha, + const double* A, + size_t lda, + double* B, + size_t ldb); + +cublasStatus_t CUBLASWINAPI cublasXtCtrsm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + size_t m, + size_t n, + const cuComplex* alpha, + const cuComplex* A, + size_t lda, + cuComplex* B, + size_t ldb); + +cublasStatus_t CUBLASWINAPI cublasXtZtrsm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + size_t m, + size_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + size_t lda, + cuDoubleComplex* B, + size_t ldb); +/* -------------------------------------------------------------------- */ +/* SYMM : Symmetric Multiply Matrix*/ +cublasStatus_t CUBLASWINAPI cublasXtSsymm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + size_t m, + size_t n, + const float* alpha, + const float* A, + size_t lda, + const float* B, + size_t ldb, + const float* beta, + float* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtDsymm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + size_t m, + size_t n, + const double* alpha, + const double* A, + size_t lda, + const double* B, + size_t ldb, + const double* beta, + double* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtCsymm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + size_t m, + size_t n, + const cuComplex* alpha, + const cuComplex* A, + size_t lda, + const cuComplex* B, + size_t ldb, + const cuComplex* beta, + cuComplex* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtZsymm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + size_t m, + size_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + size_t lda, + const cuDoubleComplex* B, + size_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + size_t ldc); +/* -------------------------------------------------------------------- */ +/* HEMM : Hermitian Matrix Multiply */ +cublasStatus_t CUBLASWINAPI cublasXtChemm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + size_t m, + size_t n, + const cuComplex* alpha, + const cuComplex* A, + size_t lda, + const cuComplex* B, + size_t ldb, + const cuComplex* beta, + cuComplex* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtZhemm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + size_t m, + size_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + size_t lda, + const cuDoubleComplex* B, + size_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + size_t ldc); + +/* -------------------------------------------------------------------- */ +/* SYRKX : variant extension of SYRK */ +cublasStatus_t CUBLASWINAPI cublasXtSsyrkx(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const float* alpha, + const float* A, + size_t lda, + const float* B, + size_t ldb, + const float* beta, + float* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtDsyrkx(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const double* alpha, + const double* A, + size_t lda, + const double* B, + size_t ldb, + const double* beta, + double* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtCsyrkx(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const cuComplex* alpha, + const cuComplex* A, + size_t lda, + const cuComplex* B, + size_t ldb, + const cuComplex* beta, + cuComplex* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtZsyrkx(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + size_t lda, + const cuDoubleComplex* B, + size_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + size_t ldc); +/* -------------------------------------------------------------------- */ +/* HER2K : variant extension of HERK */ +cublasStatus_t CUBLASWINAPI cublasXtCher2k(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const cuComplex* alpha, + const cuComplex* A, + size_t lda, + const cuComplex* B, + size_t ldb, + const float* beta, + cuComplex* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtZher2k(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + size_t lda, + const cuDoubleComplex* B, + size_t ldb, + const double* beta, + cuDoubleComplex* C, + size_t ldc); + +/* -------------------------------------------------------------------- */ +/* SPMM : Symmetric Packed Multiply Matrix*/ +cublasStatus_t CUBLASWINAPI cublasXtSspmm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + size_t m, + size_t n, + const float* alpha, + const float* AP, + const float* B, + size_t ldb, + const float* beta, + float* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtDspmm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + size_t m, + size_t n, + const double* alpha, + const double* AP, + const double* B, + size_t ldb, + const double* beta, + double* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtCspmm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + size_t m, + size_t n, + const cuComplex* alpha, + const cuComplex* AP, + const cuComplex* B, + size_t ldb, + const cuComplex* beta, + cuComplex* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtZspmm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + size_t m, + size_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* AP, + const cuDoubleComplex* B, + size_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + size_t ldc); + +/* -------------------------------------------------------------------- */ +/* TRMM */ +cublasStatus_t CUBLASWINAPI cublasXtStrmm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + size_t m, + size_t n, + const float* alpha, + const float* A, + size_t lda, + const float* B, + size_t ldb, + float* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtDtrmm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + size_t m, + size_t n, + const double* alpha, + const double* A, + size_t lda, + const double* B, + size_t ldb, + double* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtCtrmm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + size_t m, + size_t n, + const cuComplex* alpha, + const cuComplex* A, + size_t lda, + const cuComplex* B, + size_t ldb, + cuComplex* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtZtrmm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + size_t m, + size_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + size_t lda, + const cuDoubleComplex* B, + size_t ldb, + cuDoubleComplex* C, + size_t ldc); + +#if defined(__cplusplus) +} +#endif /* __cplusplus */ + +#endif /* !defined(CUBLAS_XT_H_) */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cublas/include/cublas_api.h b/venv/lib/python3.10/site-packages/nvidia/cublas/include/cublas_api.h new file mode 100644 index 0000000000000000000000000000000000000000..a8c3d8cddea009fa826689e053126ddc6dbd5b1f --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cublas/include/cublas_api.h @@ -0,0 +1,5725 @@ +/* + * Copyright 1993-2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * This is the public header file for the CUBLAS library, defining the API + * + * CUBLAS is an implementation of BLAS (Basic Linear Algebra Subroutines) + * on top of the CUDA runtime. + */ + +#if !defined(CUBLAS_API_H_) +#define CUBLAS_API_H_ + +#ifndef CUBLASWINAPI +#ifdef _WIN32 +#define CUBLASWINAPI __stdcall +#else +#define CUBLASWINAPI +#endif +#endif + +#ifndef CUBLASAPI +#error "This file should not be included without defining CUBLASAPI" +#endif + +#include + +#include "driver_types.h" +#include "cuComplex.h" /* import complex data type */ + +#include +#include + +#include + +#if defined(__cplusplus) +extern "C" { +#endif /* __cplusplus */ + +#define CUBLAS_VER_MAJOR 12 +#define CUBLAS_VER_MINOR 1 +#define CUBLAS_VER_PATCH 3 +#define CUBLAS_VER_BUILD 1 +#define CUBLAS_VERSION (CUBLAS_VER_MAJOR * 10000 + CUBLAS_VER_MINOR * 100 + CUBLAS_VER_PATCH) + +/* CUBLAS status type returns */ +typedef enum { + CUBLAS_STATUS_SUCCESS = 0, + CUBLAS_STATUS_NOT_INITIALIZED = 1, + CUBLAS_STATUS_ALLOC_FAILED = 3, + CUBLAS_STATUS_INVALID_VALUE = 7, + CUBLAS_STATUS_ARCH_MISMATCH = 8, + CUBLAS_STATUS_MAPPING_ERROR = 11, + CUBLAS_STATUS_EXECUTION_FAILED = 13, + CUBLAS_STATUS_INTERNAL_ERROR = 14, + CUBLAS_STATUS_NOT_SUPPORTED = 15, + CUBLAS_STATUS_LICENSE_ERROR = 16 +} cublasStatus_t; + +typedef enum { CUBLAS_FILL_MODE_LOWER = 0, CUBLAS_FILL_MODE_UPPER = 1, CUBLAS_FILL_MODE_FULL = 2 } cublasFillMode_t; + +typedef enum { CUBLAS_DIAG_NON_UNIT = 0, CUBLAS_DIAG_UNIT = 1 } cublasDiagType_t; + +typedef enum { CUBLAS_SIDE_LEFT = 0, CUBLAS_SIDE_RIGHT = 1 } cublasSideMode_t; + +typedef enum { + CUBLAS_OP_N = 0, + CUBLAS_OP_T = 1, + CUBLAS_OP_C = 2, + CUBLAS_OP_HERMITAN = 2, /* synonym if CUBLAS_OP_C */ + CUBLAS_OP_CONJG = 3 /* conjugate, placeholder - not supported in the current release */ +} cublasOperation_t; + +typedef enum { CUBLAS_POINTER_MODE_HOST = 0, CUBLAS_POINTER_MODE_DEVICE = 1 } cublasPointerMode_t; + +typedef enum { CUBLAS_ATOMICS_NOT_ALLOWED = 0, CUBLAS_ATOMICS_ALLOWED = 1 } cublasAtomicsMode_t; + +/*For different GEMM algorithm */ +typedef enum { + CUBLAS_GEMM_DFALT = -1, + CUBLAS_GEMM_DEFAULT = -1, + CUBLAS_GEMM_ALGO0 = 0, + CUBLAS_GEMM_ALGO1 = 1, + CUBLAS_GEMM_ALGO2 = 2, + CUBLAS_GEMM_ALGO3 = 3, + CUBLAS_GEMM_ALGO4 = 4, + CUBLAS_GEMM_ALGO5 = 5, + CUBLAS_GEMM_ALGO6 = 6, + CUBLAS_GEMM_ALGO7 = 7, + CUBLAS_GEMM_ALGO8 = 8, + CUBLAS_GEMM_ALGO9 = 9, + CUBLAS_GEMM_ALGO10 = 10, + CUBLAS_GEMM_ALGO11 = 11, + CUBLAS_GEMM_ALGO12 = 12, + CUBLAS_GEMM_ALGO13 = 13, + CUBLAS_GEMM_ALGO14 = 14, + CUBLAS_GEMM_ALGO15 = 15, + CUBLAS_GEMM_ALGO16 = 16, + CUBLAS_GEMM_ALGO17 = 17, + CUBLAS_GEMM_ALGO18 = 18, // sliced 32x32 + CUBLAS_GEMM_ALGO19 = 19, // sliced 64x32 + CUBLAS_GEMM_ALGO20 = 20, // sliced 128x32 + CUBLAS_GEMM_ALGO21 = 21, // sliced 32x32 -splitK + CUBLAS_GEMM_ALGO22 = 22, // sliced 64x32 -splitK + CUBLAS_GEMM_ALGO23 = 23, // sliced 128x32 -splitK + CUBLAS_GEMM_DEFAULT_TENSOR_OP = 99, + CUBLAS_GEMM_DFALT_TENSOR_OP = 99, + CUBLAS_GEMM_ALGO0_TENSOR_OP = 100, + CUBLAS_GEMM_ALGO1_TENSOR_OP = 101, + CUBLAS_GEMM_ALGO2_TENSOR_OP = 102, + CUBLAS_GEMM_ALGO3_TENSOR_OP = 103, + CUBLAS_GEMM_ALGO4_TENSOR_OP = 104, + CUBLAS_GEMM_ALGO5_TENSOR_OP = 105, + CUBLAS_GEMM_ALGO6_TENSOR_OP = 106, + CUBLAS_GEMM_ALGO7_TENSOR_OP = 107, + CUBLAS_GEMM_ALGO8_TENSOR_OP = 108, + CUBLAS_GEMM_ALGO9_TENSOR_OP = 109, + CUBLAS_GEMM_ALGO10_TENSOR_OP = 110, + CUBLAS_GEMM_ALGO11_TENSOR_OP = 111, + CUBLAS_GEMM_ALGO12_TENSOR_OP = 112, + CUBLAS_GEMM_ALGO13_TENSOR_OP = 113, + CUBLAS_GEMM_ALGO14_TENSOR_OP = 114, + CUBLAS_GEMM_ALGO15_TENSOR_OP = 115 +} cublasGemmAlgo_t; + +/*Enum for default math mode/tensor operation*/ +typedef enum { + CUBLAS_DEFAULT_MATH = 0, + + /* deprecated, same effect as using CUBLAS_COMPUTE_32F_FAST_16F, will be removed in a future release */ + CUBLAS_TENSOR_OP_MATH = 1, + + /* same as using matching _PEDANTIC compute type when using cublasroutine calls or cublasEx() calls with + cudaDataType as compute type */ + CUBLAS_PEDANTIC_MATH = 2, + + /* allow accelerating single precision routines using TF32 tensor cores */ + CUBLAS_TF32_TENSOR_OP_MATH = 3, + + /* flag to force any reductons to use the accumulator type and not output type in case of mixed precision routines + with lower size output type */ + CUBLAS_MATH_DISALLOW_REDUCED_PRECISION_REDUCTION = 16, +} cublasMath_t; + +/* For backward compatibility purposes */ +typedef cudaDataType cublasDataType_t; + +/* Enum for compute type + * + * - default types provide best available performance using all available hardware features + * and guarantee internal storage precision with at least the same precision and range; + * - _PEDANTIC types ensure standard arithmetic and exact specified internal storage format; + * - _FAST types allow for some loss of precision to enable higher throughput arithmetic. + */ +typedef enum { + CUBLAS_COMPUTE_16F = 64, /* half - default */ + CUBLAS_COMPUTE_16F_PEDANTIC = 65, /* half - pedantic */ + CUBLAS_COMPUTE_32F = 68, /* float - default */ + CUBLAS_COMPUTE_32F_PEDANTIC = 69, /* float - pedantic */ + CUBLAS_COMPUTE_32F_FAST_16F = 74, /* float - fast, allows down-converting inputs to half or TF32 */ + CUBLAS_COMPUTE_32F_FAST_16BF = 75, /* float - fast, allows down-converting inputs to bfloat16 or TF32 */ + CUBLAS_COMPUTE_32F_FAST_TF32 = 77, /* float - fast, allows down-converting inputs to TF32 */ + CUBLAS_COMPUTE_64F = 70, /* double - default */ + CUBLAS_COMPUTE_64F_PEDANTIC = 71, /* double - pedantic */ + CUBLAS_COMPUTE_32I = 72, /* signed 32-bit int - default */ + CUBLAS_COMPUTE_32I_PEDANTIC = 73, /* signed 32-bit int - pedantic */ +} cublasComputeType_t; + +/* Opaque structure holding CUBLAS library context */ +struct cublasContext; +typedef struct cublasContext* cublasHandle_t; + +/* Cublas logging */ +typedef void (*cublasLogCallback)(const char* msg); + +/* cuBLAS Exported API {{{ */ + +/* --------------- CUBLAS Helper Functions ---------------- */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCreate_v2(cublasHandle_t* handle); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDestroy_v2(cublasHandle_t handle); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGetVersion_v2(cublasHandle_t handle, int* version); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGetProperty(libraryPropertyType type, int* value); + +CUBLASAPI size_t CUBLASWINAPI cublasGetCudartVersion(void); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSetWorkspace_v2(cublasHandle_t handle, + void* workspace, + size_t workspaceSizeInBytes); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSetStream_v2(cublasHandle_t handle, cudaStream_t streamId); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGetStream_v2(cublasHandle_t handle, cudaStream_t* streamId); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGetPointerMode_v2(cublasHandle_t handle, cublasPointerMode_t* mode); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSetPointerMode_v2(cublasHandle_t handle, cublasPointerMode_t mode); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGetAtomicsMode(cublasHandle_t handle, cublasAtomicsMode_t* mode); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSetAtomicsMode(cublasHandle_t handle, cublasAtomicsMode_t mode); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGetMathMode(cublasHandle_t handle, cublasMath_t* mode); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSetMathMode(cublasHandle_t handle, cublasMath_t mode); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGetSmCountTarget(cublasHandle_t handle, int* smCountTarget); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSetSmCountTarget(cublasHandle_t handle, int smCountTarget); + +CUBLASAPI const char* CUBLASWINAPI cublasGetStatusName(cublasStatus_t status); + +CUBLASAPI const char* CUBLASWINAPI cublasGetStatusString(cublasStatus_t status); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasLoggerConfigure(int logIsOn, + int logToStdOut, + int logToStdErr, + const char* logFileName); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSetLoggerCallback(cublasLogCallback userCallback); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGetLoggerCallback(cublasLogCallback* userCallback); + +cublasStatus_t CUBLASWINAPI cublasSetVector(int n, int elemSize, const void* x, int incx, void* devicePtr, int incy); + +cublasStatus_t CUBLASWINAPI +cublasSetVector_64(int64_t n, int64_t elemSize, const void* x, int64_t incx, void* devicePtr, int64_t incy); + +cublasStatus_t CUBLASWINAPI cublasGetVector(int n, int elemSize, const void* x, int incx, void* y, int incy); + +cublasStatus_t CUBLASWINAPI +cublasGetVector_64(int64_t n, int64_t elemSize, const void* x, int64_t incx, void* y, int64_t incy); + +cublasStatus_t CUBLASWINAPI cublasSetMatrix(int rows, int cols, int elemSize, const void* A, int lda, void* B, int ldb); + +cublasStatus_t CUBLASWINAPI +cublasSetMatrix_64(int64_t rows, int64_t cols, int64_t elemSize, const void* A, int64_t lda, void* B, int64_t ldb); + +cublasStatus_t CUBLASWINAPI cublasGetMatrix(int rows, int cols, int elemSize, const void* A, int lda, void* B, int ldb); + +cublasStatus_t CUBLASWINAPI +cublasGetMatrix_64(int64_t rows, int64_t cols, int64_t elemSize, const void* A, int64_t lda, void* B, int64_t ldb); + +cublasStatus_t CUBLASWINAPI cublasSetVectorAsync( + int n, int elemSize, const void* hostPtr, int incx, void* devicePtr, int incy, cudaStream_t stream); + +cublasStatus_t CUBLASWINAPI cublasSetVectorAsync_64( + int64_t n, int64_t elemSize, const void* hostPtr, int64_t incx, void* devicePtr, int64_t incy, cudaStream_t stream); + +cublasStatus_t CUBLASWINAPI cublasGetVectorAsync( + int n, int elemSize, const void* devicePtr, int incx, void* hostPtr, int incy, cudaStream_t stream); + +cublasStatus_t CUBLASWINAPI cublasGetVectorAsync_64( + int64_t n, int64_t elemSize, const void* devicePtr, int64_t incx, void* hostPtr, int64_t incy, cudaStream_t stream); + +cublasStatus_t CUBLASWINAPI +cublasSetMatrixAsync(int rows, int cols, int elemSize, const void* A, int lda, void* B, int ldb, cudaStream_t stream); + +cublasStatus_t CUBLASWINAPI cublasSetMatrixAsync_64(int64_t rows, + int64_t cols, + int64_t elemSize, + const void* A, + int64_t lda, + void* B, + int64_t ldb, + cudaStream_t stream); + +cublasStatus_t CUBLASWINAPI +cublasGetMatrixAsync(int rows, int cols, int elemSize, const void* A, int lda, void* B, int ldb, cudaStream_t stream); + +cublasStatus_t CUBLASWINAPI cublasGetMatrixAsync_64(int64_t rows, + int64_t cols, + int64_t elemSize, + const void* A, + int64_t lda, + void* B, + int64_t ldb, + cudaStream_t stream); + +CUBLASAPI void CUBLASWINAPI cublasXerbla(const char* srName, int info); + +/* --------------- CUBLAS BLAS1 Functions ---------------- */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasNrm2Ex(cublasHandle_t handle, + int n, + const void* x, + cudaDataType xType, + int incx, + void* result, + cudaDataType resultType, + cudaDataType executionType); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasNrm2Ex_64(cublasHandle_t handle, + int64_t n, + const void* x, + cudaDataType xType, + int64_t incx, + void* result, + cudaDataType resultType, + cudaDataType executionType); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSnrm2_v2(cublasHandle_t handle, int n, const float* x, int incx, float* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSnrm2_v2_64(cublasHandle_t handle, int64_t n, const float* x, int64_t incx, float* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDnrm2_v2(cublasHandle_t handle, int n, const double* x, int incx, double* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDnrm2_v2_64(cublasHandle_t handle, int64_t n, const double* x, int64_t incx, double* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasScnrm2_v2(cublasHandle_t handle, int n, const cuComplex* x, int incx, float* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasScnrm2_v2_64(cublasHandle_t handle, int64_t n, const cuComplex* x, int64_t incx, float* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDznrm2_v2(cublasHandle_t handle, int n, const cuDoubleComplex* x, int incx, double* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDznrm2_v2_64(cublasHandle_t handle, int64_t n, const cuDoubleComplex* x, int64_t incx, double* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDotEx(cublasHandle_t handle, + int n, + const void* x, + cudaDataType xType, + int incx, + const void* y, + cudaDataType yType, + int incy, + void* result, + cudaDataType resultType, + cudaDataType executionType); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDotEx_64(cublasHandle_t handle, + int64_t n, + const void* x, + cudaDataType xType, + int64_t incx, + const void* y, + cudaDataType yType, + int64_t incy, + void* result, + cudaDataType resultType, + cudaDataType executionType); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDotcEx(cublasHandle_t handle, + int n, + const void* x, + cudaDataType xType, + int incx, + const void* y, + cudaDataType yType, + int incy, + void* result, + cudaDataType resultType, + cudaDataType executionType); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDotcEx_64(cublasHandle_t handle, + int64_t n, + const void* x, + cudaDataType xType, + int64_t incx, + const void* y, + cudaDataType yType, + int64_t incy, + void* result, + cudaDataType resultType, + cudaDataType executionType); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSdot_v2(cublasHandle_t handle, int n, const float* x, int incx, const float* y, int incy, float* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSdot_v2_64( + cublasHandle_t handle, int64_t n, const float* x, int64_t incx, const float* y, int64_t incy, float* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDdot_v2(cublasHandle_t handle, int n, const double* x, int incx, const double* y, int incy, double* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDdot_v2_64( + cublasHandle_t handle, int64_t n, const double* x, int64_t incx, const double* y, int64_t incy, double* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCdotu_v2( + cublasHandle_t handle, int n, const cuComplex* x, int incx, const cuComplex* y, int incy, cuComplex* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCdotu_v2_64(cublasHandle_t handle, + int64_t n, + const cuComplex* x, + int64_t incx, + const cuComplex* y, + int64_t incy, + cuComplex* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCdotc_v2( + cublasHandle_t handle, int n, const cuComplex* x, int incx, const cuComplex* y, int incy, cuComplex* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCdotc_v2_64(cublasHandle_t handle, + int64_t n, + const cuComplex* x, + int64_t incx, + const cuComplex* y, + int64_t incy, + cuComplex* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZdotu_v2(cublasHandle_t handle, + int n, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* y, + int incy, + cuDoubleComplex* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZdotu_v2_64(cublasHandle_t handle, + int64_t n, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* y, + int64_t incy, + cuDoubleComplex* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZdotc_v2(cublasHandle_t handle, + int n, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* y, + int incy, + cuDoubleComplex* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZdotc_v2_64(cublasHandle_t handle, + int64_t n, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* y, + int64_t incy, + cuDoubleComplex* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasScalEx(cublasHandle_t handle, + int n, + const void* alpha, + cudaDataType alphaType, + void* x, + cudaDataType xType, + int incx, + cudaDataType executionType); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasScalEx_64(cublasHandle_t handle, + int64_t n, + const void* alpha, + cudaDataType alphaType, + void* x, + cudaDataType xType, + int64_t incx, + cudaDataType executionType); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSscal_v2(cublasHandle_t handle, int n, const float* alpha, float* x, int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSscal_v2_64(cublasHandle_t handle, int64_t n, const float* alpha, float* x, int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDscal_v2(cublasHandle_t handle, int n, const double* alpha, double* x, int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDscal_v2_64(cublasHandle_t handle, int64_t n, const double* alpha, double* x, int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasCscal_v2(cublasHandle_t handle, int n, const cuComplex* alpha, cuComplex* x, int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasCscal_v2_64(cublasHandle_t handle, int64_t n, const cuComplex* alpha, cuComplex* x, int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasCsscal_v2(cublasHandle_t handle, int n, const float* alpha, cuComplex* x, int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasCsscal_v2_64(cublasHandle_t handle, int64_t n, const float* alpha, cuComplex* x, int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasZscal_v2(cublasHandle_t handle, int n, const cuDoubleComplex* alpha, cuDoubleComplex* x, int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasZscal_v2_64(cublasHandle_t handle, int64_t n, const cuDoubleComplex* alpha, cuDoubleComplex* x, int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasZdscal_v2(cublasHandle_t handle, int n, const double* alpha, cuDoubleComplex* x, int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasZdscal_v2_64(cublasHandle_t handle, int64_t n, const double* alpha, cuDoubleComplex* x, int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasAxpyEx(cublasHandle_t handle, + int n, + const void* alpha, + cudaDataType alphaType, + const void* x, + cudaDataType xType, + int incx, + void* y, + cudaDataType yType, + int incy, + cudaDataType executiontype); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasAxpyEx_64(cublasHandle_t handle, + int64_t n, + const void* alpha, + cudaDataType alphaType, + const void* x, + cudaDataType xType, + int64_t incx, + void* y, + cudaDataType yType, + int64_t incy, + cudaDataType executiontype); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSaxpy_v2(cublasHandle_t handle, int n, const float* alpha, const float* x, int incx, float* y, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSaxpy_v2_64( + cublasHandle_t handle, int64_t n, const float* alpha, const float* x, int64_t incx, float* y, int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDaxpy_v2(cublasHandle_t handle, int n, const double* alpha, const double* x, int incx, double* y, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDaxpy_v2_64( + cublasHandle_t handle, int64_t n, const double* alpha, const double* x, int64_t incx, double* y, int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCaxpy_v2( + cublasHandle_t handle, int n, const cuComplex* alpha, const cuComplex* x, int incx, cuComplex* y, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCaxpy_v2_64(cublasHandle_t handle, + int64_t n, + const cuComplex* alpha, + const cuComplex* x, + int64_t incx, + cuComplex* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZaxpy_v2(cublasHandle_t handle, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int incx, + cuDoubleComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZaxpy_v2_64(cublasHandle_t handle, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int64_t incx, + cuDoubleComplex* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCopyEx( + cublasHandle_t handle, int n, const void* x, cudaDataType xType, int incx, void* y, cudaDataType yType, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCopyEx_64(cublasHandle_t handle, + int64_t n, + const void* x, + cudaDataType xType, + int64_t incx, + void* y, + cudaDataType yType, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasScopy_v2(cublasHandle_t handle, int n, const float* x, int incx, float* y, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasScopy_v2_64(cublasHandle_t handle, int64_t n, const float* x, int64_t incx, float* y, int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDcopy_v2(cublasHandle_t handle, int n, const double* x, int incx, double* y, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDcopy_v2_64(cublasHandle_t handle, int64_t n, const double* x, int64_t incx, double* y, int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasCcopy_v2(cublasHandle_t handle, int n, const cuComplex* x, int incx, cuComplex* y, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasCcopy_v2_64(cublasHandle_t handle, int64_t n, const cuComplex* x, int64_t incx, cuComplex* y, int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasZcopy_v2(cublasHandle_t handle, int n, const cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZcopy_v2_64( + cublasHandle_t handle, int64_t n, const cuDoubleComplex* x, int64_t incx, cuDoubleComplex* y, int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSswap_v2(cublasHandle_t handle, int n, float* x, int incx, float* y, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSswap_v2_64(cublasHandle_t handle, int64_t n, float* x, int64_t incx, float* y, int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDswap_v2(cublasHandle_t handle, int n, double* x, int incx, double* y, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDswap_v2_64(cublasHandle_t handle, int64_t n, double* x, int64_t incx, double* y, int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasCswap_v2(cublasHandle_t handle, int n, cuComplex* x, int incx, cuComplex* y, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasCswap_v2_64(cublasHandle_t handle, int64_t n, cuComplex* x, int64_t incx, cuComplex* y, int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasZswap_v2(cublasHandle_t handle, int n, cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasZswap_v2_64(cublasHandle_t handle, int64_t n, cuDoubleComplex* x, int64_t incx, cuDoubleComplex* y, int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSwapEx( + cublasHandle_t handle, int n, void* x, cudaDataType xType, int incx, void* y, cudaDataType yType, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSwapEx_64(cublasHandle_t handle, + int64_t n, + void* x, + cudaDataType xType, + int64_t incx, + void* y, + cudaDataType yType, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIsamax_v2(cublasHandle_t handle, int n, const float* x, int incx, int* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIsamax_v2_64(cublasHandle_t handle, int64_t n, const float* x, int64_t incx, int64_t* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIdamax_v2(cublasHandle_t handle, int n, const double* x, int incx, int* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIdamax_v2_64(cublasHandle_t handle, int64_t n, const double* x, int64_t incx, int64_t* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIcamax_v2(cublasHandle_t handle, int n, const cuComplex* x, int incx, int* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIcamax_v2_64(cublasHandle_t handle, int64_t n, const cuComplex* x, int64_t incx, int64_t* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIzamax_v2(cublasHandle_t handle, int n, const cuDoubleComplex* x, int incx, int* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIzamax_v2_64(cublasHandle_t handle, int64_t n, const cuDoubleComplex* x, int64_t incx, int64_t* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIamaxEx(cublasHandle_t handle, int n, const void* x, cudaDataType xType, int incx, int* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIamaxEx_64(cublasHandle_t handle, int64_t n, const void* x, cudaDataType xType, int64_t incx, int64_t* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIsamin_v2(cublasHandle_t handle, int n, const float* x, int incx, int* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIsamin_v2_64(cublasHandle_t handle, int64_t n, const float* x, int64_t incx, int64_t* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIdamin_v2(cublasHandle_t handle, int n, const double* x, int incx, int* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIdamin_v2_64(cublasHandle_t handle, int64_t n, const double* x, int64_t incx, int64_t* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIcamin_v2(cublasHandle_t handle, int n, const cuComplex* x, int incx, int* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIcamin_v2_64(cublasHandle_t handle, int64_t n, const cuComplex* x, int64_t incx, int64_t* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIzamin_v2(cublasHandle_t handle, int n, const cuDoubleComplex* x, int incx, int* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIzamin_v2_64(cublasHandle_t handle, int64_t n, const cuDoubleComplex* x, int64_t incx, int64_t* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIaminEx(cublasHandle_t handle, int n, const void* x, cudaDataType xType, int incx, int* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIaminEx_64(cublasHandle_t handle, int64_t n, const void* x, cudaDataType xType, int64_t incx, int64_t* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasAsumEx(cublasHandle_t handle, + int n, + const void* x, + cudaDataType xType, + int incx, + void* result, + cudaDataType resultType, + cudaDataType executiontype); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasAsumEx_64(cublasHandle_t handle, + int64_t n, + const void* x, + cudaDataType xType, + int64_t incx, + void* result, + cudaDataType resultType, + cudaDataType executiontype); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSasum_v2(cublasHandle_t handle, int n, const float* x, int incx, float* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSasum_v2_64(cublasHandle_t handle, int64_t n, const float* x, int64_t incx, float* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDasum_v2(cublasHandle_t handle, int n, const double* x, int incx, double* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDasum_v2_64(cublasHandle_t handle, int64_t n, const double* x, int64_t incx, double* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasScasum_v2(cublasHandle_t handle, int n, const cuComplex* x, int incx, float* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasScasum_v2_64(cublasHandle_t handle, int64_t n, const cuComplex* x, int64_t incx, float* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDzasum_v2(cublasHandle_t handle, int n, const cuDoubleComplex* x, int incx, double* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDzasum_v2_64(cublasHandle_t handle, int64_t n, const cuDoubleComplex* x, int64_t incx, double* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSrot_v2(cublasHandle_t handle, int n, float* x, int incx, float* y, int incy, const float* c, const float* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSrot_v2_64( + cublasHandle_t handle, int64_t n, float* x, int64_t incx, float* y, int64_t incy, const float* c, const float* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDrot_v2(cublasHandle_t handle, int n, double* x, int incx, double* y, int incy, const double* c, const double* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDrot_v2_64(cublasHandle_t handle, + int64_t n, + double* x, + int64_t incx, + double* y, + int64_t incy, + const double* c, + const double* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCrot_v2( + cublasHandle_t handle, int n, cuComplex* x, int incx, cuComplex* y, int incy, const float* c, const cuComplex* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCrot_v2_64(cublasHandle_t handle, + int64_t n, + cuComplex* x, + int64_t incx, + cuComplex* y, + int64_t incy, + const float* c, + const cuComplex* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsrot_v2( + cublasHandle_t handle, int n, cuComplex* x, int incx, cuComplex* y, int incy, const float* c, const float* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsrot_v2_64(cublasHandle_t handle, + int64_t n, + cuComplex* x, + int64_t incx, + cuComplex* y, + int64_t incy, + const float* c, + const float* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZrot_v2(cublasHandle_t handle, + int n, + cuDoubleComplex* x, + int incx, + cuDoubleComplex* y, + int incy, + const double* c, + const cuDoubleComplex* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZrot_v2_64(cublasHandle_t handle, + int64_t n, + cuDoubleComplex* x, + int64_t incx, + cuDoubleComplex* y, + int64_t incy, + const double* c, + const cuDoubleComplex* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZdrot_v2(cublasHandle_t handle, + int n, + cuDoubleComplex* x, + int incx, + cuDoubleComplex* y, + int incy, + const double* c, + const double* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZdrot_v2_64(cublasHandle_t handle, + int64_t n, + cuDoubleComplex* x, + int64_t incx, + cuDoubleComplex* y, + int64_t incy, + const double* c, + const double* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasRotEx(cublasHandle_t handle, + int n, + void* x, + cudaDataType xType, + int incx, + void* y, + cudaDataType yType, + int incy, + const void* c, + const void* s, + cudaDataType csType, + cudaDataType executiontype); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasRotEx_64(cublasHandle_t handle, + int64_t n, + void* x, + cudaDataType xType, + int64_t incx, + void* y, + cudaDataType yType, + int64_t incy, + const void* c, + const void* s, + cudaDataType csType, + cudaDataType executiontype); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSrotg_v2(cublasHandle_t handle, float* a, float* b, float* c, float* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDrotg_v2(cublasHandle_t handle, double* a, double* b, double* c, double* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasCrotg_v2(cublasHandle_t handle, cuComplex* a, cuComplex* b, float* c, cuComplex* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasZrotg_v2(cublasHandle_t handle, cuDoubleComplex* a, cuDoubleComplex* b, double* c, cuDoubleComplex* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasRotgEx(cublasHandle_t handle, + void* a, + void* b, + cudaDataType abType, + void* c, + void* s, + cudaDataType csType, + cudaDataType executiontype); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSrotm_v2(cublasHandle_t handle, int n, float* x, int incx, float* y, int incy, const float* param); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSrotm_v2_64(cublasHandle_t handle, int64_t n, float* x, int64_t incx, float* y, int64_t incy, const float* param); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDrotm_v2(cublasHandle_t handle, int n, double* x, int incx, double* y, int incy, const double* param); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDrotm_v2_64( + cublasHandle_t handle, int64_t n, double* x, int64_t incx, double* y, int64_t incy, const double* param); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasRotmEx(cublasHandle_t handle, + int n, + void* x, + cudaDataType xType, + int incx, + void* y, + cudaDataType yType, + int incy, + const void* param, + cudaDataType paramType, + cudaDataType executiontype); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasRotmEx_64(cublasHandle_t handle, + int64_t n, + void* x, + cudaDataType xType, + int64_t incx, + void* y, + cudaDataType yType, + int64_t incy, + const void* param, + cudaDataType paramType, + cudaDataType executiontype); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSrotmg_v2(cublasHandle_t handle, float* d1, float* d2, float* x1, const float* y1, float* param); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDrotmg_v2(cublasHandle_t handle, double* d1, double* d2, double* x1, const double* y1, double* param); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasRotmgEx(cublasHandle_t handle, + void* d1, + cudaDataType d1Type, + void* d2, + cudaDataType d2Type, + void* x1, + cudaDataType x1Type, + const void* y1, + cudaDataType y1Type, + void* param, + cudaDataType paramType, + cudaDataType executiontype); + +/* --------------- CUBLAS BLAS2 Functions ---------------- */ + +/* GEMV */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemv_v2(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const float* alpha, + const float* A, + int lda, + const float* x, + int incx, + const float* beta, + float* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemv_v2_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const float* alpha, + const float* A, + int64_t lda, + const float* x, + int64_t incx, + const float* beta, + float* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemv_v2(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const double* alpha, + const double* A, + int lda, + const double* x, + int incx, + const double* beta, + double* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemv_v2_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const double* alpha, + const double* A, + int64_t lda, + const double* x, + int64_t incx, + const double* beta, + double* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemv_v2(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* x, + int incx, + const cuComplex* beta, + cuComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemv_v2_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* x, + int64_t incx, + const cuComplex* beta, + cuComplex* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemv_v2(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemv_v2_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int64_t incy); + +/* GBMV */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgbmv_v2(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + int kl, + int ku, + const float* alpha, + const float* A, + int lda, + const float* x, + int incx, + const float* beta, + float* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgbmv_v2_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + int64_t kl, + int64_t ku, + const float* alpha, + const float* A, + int64_t lda, + const float* x, + int64_t incx, + const float* beta, + float* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgbmv_v2(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + int kl, + int ku, + const double* alpha, + const double* A, + int lda, + const double* x, + int incx, + const double* beta, + double* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgbmv_v2_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + int64_t kl, + int64_t ku, + const double* alpha, + const double* A, + int64_t lda, + const double* x, + int64_t incx, + const double* beta, + double* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgbmv_v2(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + int kl, + int ku, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* x, + int incx, + const cuComplex* beta, + cuComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgbmv_v2_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + int64_t kl, + int64_t ku, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* x, + int64_t incx, + const cuComplex* beta, + cuComplex* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgbmv_v2(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + int kl, + int ku, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgbmv_v2_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + int64_t kl, + int64_t ku, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int64_t incy); + +/* TRMV */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStrmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const float* A, + int lda, + float* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStrmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const float* A, + int64_t lda, + float* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtrmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const double* A, + int lda, + double* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtrmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const double* A, + int64_t lda, + double* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtrmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const cuComplex* A, + int lda, + cuComplex* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtrmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const cuComplex* A, + int64_t lda, + cuComplex* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtrmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const cuDoubleComplex* A, + int lda, + cuDoubleComplex* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtrmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const cuDoubleComplex* A, + int64_t lda, + cuDoubleComplex* x, + int64_t incx); + +/* TBMV */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStbmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + int k, + const float* A, + int lda, + float* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStbmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + int64_t k, + const float* A, + int64_t lda, + float* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtbmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + int k, + const double* A, + int lda, + double* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtbmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + int64_t k, + const double* A, + int64_t lda, + double* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtbmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + int k, + const cuComplex* A, + int lda, + cuComplex* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtbmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + int64_t k, + const cuComplex* A, + int64_t lda, + cuComplex* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtbmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + int k, + const cuDoubleComplex* A, + int lda, + cuDoubleComplex* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtbmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + int64_t k, + const cuDoubleComplex* A, + int64_t lda, + cuDoubleComplex* x, + int64_t incx); + +/* TPMV */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStpmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const float* AP, + float* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStpmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const float* AP, + float* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtpmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const double* AP, + double* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtpmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const double* AP, + double* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtpmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const cuComplex* AP, + cuComplex* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtpmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const cuComplex* AP, + cuComplex* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtpmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const cuDoubleComplex* AP, + cuDoubleComplex* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtpmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const cuDoubleComplex* AP, + cuDoubleComplex* x, + int64_t incx); + +/* TRSV */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStrsv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const float* A, + int lda, + float* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStrsv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const float* A, + int64_t lda, + float* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtrsv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const double* A, + int lda, + double* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtrsv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const double* A, + int64_t lda, + double* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtrsv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const cuComplex* A, + int lda, + cuComplex* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtrsv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const cuComplex* A, + int64_t lda, + cuComplex* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtrsv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const cuDoubleComplex* A, + int lda, + cuDoubleComplex* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtrsv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const cuDoubleComplex* A, + int64_t lda, + cuDoubleComplex* x, + int64_t incx); + +/* TPSV */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStpsv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const float* AP, + float* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStpsv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const float* AP, + float* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtpsv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const double* AP, + double* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtpsv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const double* AP, + double* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtpsv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const cuComplex* AP, + cuComplex* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtpsv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const cuComplex* AP, + cuComplex* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtpsv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const cuDoubleComplex* AP, + cuDoubleComplex* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtpsv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const cuDoubleComplex* AP, + cuDoubleComplex* x, + int64_t incx); + +/* TBSV */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStbsv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + int k, + const float* A, + int lda, + float* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStbsv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + int64_t k, + const float* A, + int64_t lda, + float* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtbsv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + int k, + const double* A, + int lda, + double* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtbsv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + int64_t k, + const double* A, + int64_t lda, + double* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtbsv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + int k, + const cuComplex* A, + int lda, + cuComplex* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtbsv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + int64_t k, + const cuComplex* A, + int64_t lda, + cuComplex* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtbsv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + int k, + const cuDoubleComplex* A, + int lda, + cuDoubleComplex* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtbsv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + int64_t k, + const cuDoubleComplex* A, + int64_t lda, + cuDoubleComplex* x, + int64_t incx); + +/* SYMV/HEMV */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsymv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const float* alpha, + const float* A, + int lda, + const float* x, + int incx, + const float* beta, + float* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsymv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const float* alpha, + const float* A, + int64_t lda, + const float* x, + int64_t incx, + const float* beta, + float* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsymv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const double* alpha, + const double* A, + int lda, + const double* x, + int incx, + const double* beta, + double* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsymv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const double* alpha, + const double* A, + int64_t lda, + const double* x, + int64_t incx, + const double* beta, + double* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsymv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* x, + int incx, + const cuComplex* beta, + cuComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsymv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* x, + int64_t incx, + const cuComplex* beta, + cuComplex* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsymv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsymv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasChemv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* x, + int incx, + const cuComplex* beta, + cuComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasChemv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* x, + int64_t incx, + const cuComplex* beta, + cuComplex* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZhemv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZhemv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int64_t incy); + +/* SBMV/HBMV */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsbmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + int k, + const float* alpha, + const float* A, + int lda, + const float* x, + int incx, + const float* beta, + float* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsbmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + int64_t k, + const float* alpha, + const float* A, + int64_t lda, + const float* x, + int64_t incx, + const float* beta, + float* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsbmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + int k, + const double* alpha, + const double* A, + int lda, + const double* x, + int incx, + const double* beta, + double* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsbmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + int64_t k, + const double* alpha, + const double* A, + int64_t lda, + const double* x, + int64_t incx, + const double* beta, + double* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasChbmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + int k, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* x, + int incx, + const cuComplex* beta, + cuComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasChbmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + int64_t k, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* x, + int64_t incx, + const cuComplex* beta, + cuComplex* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZhbmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + int k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZhbmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + int64_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int64_t incy); + +/* SPMV/HPMV */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSspmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const float* alpha, + const float* AP, + const float* x, + int incx, + const float* beta, + float* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSspmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const float* alpha, + const float* AP, + const float* x, + int64_t incx, + const float* beta, + float* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDspmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const double* alpha, + const double* AP, + const double* x, + int incx, + const double* beta, + double* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDspmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const double* alpha, + const double* AP, + const double* x, + int64_t incx, + const double* beta, + double* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasChpmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuComplex* alpha, + const cuComplex* AP, + const cuComplex* x, + int incx, + const cuComplex* beta, + cuComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasChpmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuComplex* alpha, + const cuComplex* AP, + const cuComplex* x, + int64_t incx, + const cuComplex* beta, + cuComplex* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZhpmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* AP, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZhpmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* AP, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int64_t incy); + +/* GER */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSger_v2(cublasHandle_t handle, + int m, + int n, + const float* alpha, + const float* x, + int incx, + const float* y, + int incy, + float* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSger_v2_64(cublasHandle_t handle, + int64_t m, + int64_t n, + const float* alpha, + const float* x, + int64_t incx, + const float* y, + int64_t incy, + float* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDger_v2(cublasHandle_t handle, + int m, + int n, + const double* alpha, + const double* x, + int incx, + const double* y, + int incy, + double* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDger_v2_64(cublasHandle_t handle, + int64_t m, + int64_t n, + const double* alpha, + const double* x, + int64_t incx, + const double* y, + int64_t incy, + double* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgeru_v2(cublasHandle_t handle, + int m, + int n, + const cuComplex* alpha, + const cuComplex* x, + int incx, + const cuComplex* y, + int incy, + cuComplex* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgeru_v2_64(cublasHandle_t handle, + int64_t m, + int64_t n, + const cuComplex* alpha, + const cuComplex* x, + int64_t incx, + const cuComplex* y, + int64_t incy, + cuComplex* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgerc_v2(cublasHandle_t handle, + int m, + int n, + const cuComplex* alpha, + const cuComplex* x, + int incx, + const cuComplex* y, + int incy, + cuComplex* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgerc_v2_64(cublasHandle_t handle, + int64_t m, + int64_t n, + const cuComplex* alpha, + const cuComplex* x, + int64_t incx, + const cuComplex* y, + int64_t incy, + cuComplex* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgeru_v2(cublasHandle_t handle, + int m, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* y, + int incy, + cuDoubleComplex* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgeru_v2_64(cublasHandle_t handle, + int64_t m, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* y, + int64_t incy, + cuDoubleComplex* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgerc_v2(cublasHandle_t handle, + int m, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* y, + int incy, + cuDoubleComplex* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgerc_v2_64(cublasHandle_t handle, + int64_t m, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* y, + int64_t incy, + cuDoubleComplex* A, + int64_t lda); + +/* SYR/HER */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsyr_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const float* alpha, + const float* x, + int incx, + float* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsyr_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const float* alpha, + const float* x, + int64_t incx, + float* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsyr_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const double* alpha, + const double* x, + int incx, + double* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsyr_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const double* alpha, + const double* x, + int64_t incx, + double* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyr_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuComplex* alpha, + const cuComplex* x, + int incx, + cuComplex* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyr_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuComplex* alpha, + const cuComplex* x, + int64_t incx, + cuComplex* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsyr_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int incx, + cuDoubleComplex* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsyr_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int64_t incx, + cuDoubleComplex* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCher_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const float* alpha, + const cuComplex* x, + int incx, + cuComplex* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCher_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const float* alpha, + const cuComplex* x, + int64_t incx, + cuComplex* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZher_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const double* alpha, + const cuDoubleComplex* x, + int incx, + cuDoubleComplex* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZher_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const double* alpha, + const cuDoubleComplex* x, + int64_t incx, + cuDoubleComplex* A, + int64_t lda); + +/* SPR/HPR */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSspr_v2( + cublasHandle_t handle, cublasFillMode_t uplo, int n, const float* alpha, const float* x, int incx, float* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSspr_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const float* alpha, + const float* x, + int64_t incx, + float* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDspr_v2( + cublasHandle_t handle, cublasFillMode_t uplo, int n, const double* alpha, const double* x, int incx, double* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDspr_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const double* alpha, + const double* x, + int64_t incx, + double* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasChpr_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const float* alpha, + const cuComplex* x, + int incx, + cuComplex* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasChpr_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const float* alpha, + const cuComplex* x, + int64_t incx, + cuComplex* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZhpr_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const double* alpha, + const cuDoubleComplex* x, + int incx, + cuDoubleComplex* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZhpr_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const double* alpha, + const cuDoubleComplex* x, + int64_t incx, + cuDoubleComplex* AP); + +/* SYR2/HER2 */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsyr2_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const float* alpha, + const float* x, + int incx, + const float* y, + int incy, + float* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsyr2_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const float* alpha, + const float* x, + int64_t incx, + const float* y, + int64_t incy, + float* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsyr2_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const double* alpha, + const double* x, + int incx, + const double* y, + int incy, + double* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsyr2_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const double* alpha, + const double* x, + int64_t incx, + const double* y, + int64_t incy, + double* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyr2_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuComplex* alpha, + const cuComplex* x, + int incx, + const cuComplex* y, + int incy, + cuComplex* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyr2_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuComplex* alpha, + const cuComplex* x, + int64_t incx, + const cuComplex* y, + int64_t incy, + cuComplex* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsyr2_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* y, + int incy, + cuDoubleComplex* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsyr2_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* y, + int64_t incy, + cuDoubleComplex* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCher2_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuComplex* alpha, + const cuComplex* x, + int incx, + const cuComplex* y, + int incy, + cuComplex* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCher2_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuComplex* alpha, + const cuComplex* x, + int64_t incx, + const cuComplex* y, + int64_t incy, + cuComplex* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZher2_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* y, + int incy, + cuDoubleComplex* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZher2_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* y, + int64_t incy, + cuDoubleComplex* A, + int64_t lda); + +/* SPR2/HPR2 */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSspr2_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const float* alpha, + const float* x, + int incx, + const float* y, + int incy, + float* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSspr2_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const float* alpha, + const float* x, + int64_t incx, + const float* y, + int64_t incy, + float* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDspr2_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const double* alpha, + const double* x, + int incx, + const double* y, + int incy, + double* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDspr2_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const double* alpha, + const double* x, + int64_t incx, + const double* y, + int64_t incy, + double* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasChpr2_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuComplex* alpha, + const cuComplex* x, + int incx, + const cuComplex* y, + int incy, + cuComplex* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasChpr2_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuComplex* alpha, + const cuComplex* x, + int64_t incx, + const cuComplex* y, + int64_t incy, + cuComplex* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZhpr2_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* y, + int incy, + cuDoubleComplex* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZhpr2_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* y, + int64_t incy, + cuDoubleComplex* AP); + +/* BATCH GEMV */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemvBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const float* alpha, + const float* const Aarray[], + int lda, + const float* const xarray[], + int incx, + const float* beta, + float* const yarray[], + int incy, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemvBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const float* alpha, + const float* const Aarray[], + int64_t lda, + const float* const xarray[], + int64_t incx, + const float* beta, + float* const yarray[], + int64_t incy, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemvBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const double* alpha, + const double* const Aarray[], + int lda, + const double* const xarray[], + int incx, + const double* beta, + double* const yarray[], + int incy, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemvBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const double* alpha, + const double* const Aarray[], + int64_t lda, + const double* const xarray[], + int64_t incx, + const double* beta, + double* const yarray[], + int64_t incy, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemvBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const cuComplex* alpha, + const cuComplex* const Aarray[], + int lda, + const cuComplex* const xarray[], + int incx, + const cuComplex* beta, + cuComplex* const yarray[], + int incy, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemvBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const cuComplex* alpha, + const cuComplex* const Aarray[], + int64_t lda, + const cuComplex* const xarray[], + int64_t incx, + const cuComplex* beta, + cuComplex* const yarray[], + int64_t incy, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemvBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* const Aarray[], + int lda, + const cuDoubleComplex* const xarray[], + int incx, + const cuDoubleComplex* beta, + cuDoubleComplex* const yarray[], + int incy, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemvBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* const Aarray[], + int64_t lda, + const cuDoubleComplex* const xarray[], + int64_t incx, + const cuDoubleComplex* beta, + cuDoubleComplex* const yarray[], + int64_t incy, + int64_t batchCount); + +#if defined(__cplusplus) + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHSHgemvBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const float* alpha, + const __half* const Aarray[], + int lda, + const __half* const xarray[], + int incx, + const float* beta, + __half* const yarray[], + int incy, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHSHgemvBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const float* alpha, + const __half* const Aarray[], + int64_t lda, + const __half* const xarray[], + int64_t incx, + const float* beta, + __half* const yarray[], + int64_t incy, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHSSgemvBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const float* alpha, + const __half* const Aarray[], + int lda, + const __half* const xarray[], + int incx, + const float* beta, + float* const yarray[], + int incy, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHSSgemvBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const float* alpha, + const __half* const Aarray[], + int64_t lda, + const __half* const xarray[], + int64_t incx, + const float* beta, + float* const yarray[], + int64_t incy, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasTSTgemvBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const float* alpha, + const __nv_bfloat16* const Aarray[], + int lda, + const __nv_bfloat16* const xarray[], + int incx, + const float* beta, + __nv_bfloat16* const yarray[], + int incy, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasTSTgemvBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const float* alpha, + const __nv_bfloat16* const Aarray[], + int64_t lda, + const __nv_bfloat16* const xarray[], + int64_t incx, + const float* beta, + __nv_bfloat16* const yarray[], + int64_t incy, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasTSSgemvBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const float* alpha, + const __nv_bfloat16* const Aarray[], + int lda, + const __nv_bfloat16* const xarray[], + int incx, + const float* beta, + float* const yarray[], + int incy, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasTSSgemvBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const float* alpha, + const __nv_bfloat16* const Aarray[], + int64_t lda, + const __nv_bfloat16* const xarray[], + int64_t incx, + const float* beta, + float* const yarray[], + int64_t incy, + int64_t batchCount); + +#endif + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemvStridedBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const float* alpha, + const float* A, + int lda, + long long int strideA, + const float* x, + int incx, + long long int stridex, + const float* beta, + float* y, + int incy, + long long int stridey, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemvStridedBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const float* alpha, + const float* A, + int64_t lda, + long long int strideA, + const float* x, + int64_t incx, + long long int stridex, + const float* beta, + float* y, + int64_t incy, + long long int stridey, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemvStridedBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const double* alpha, + const double* A, + int lda, + long long int strideA, + const double* x, + int incx, + long long int stridex, + const double* beta, + double* y, + int incy, + long long int stridey, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemvStridedBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const double* alpha, + const double* A, + int64_t lda, + long long int strideA, + const double* x, + int64_t incx, + long long int stridex, + const double* beta, + double* y, + int64_t incy, + long long int stridey, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemvStridedBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const cuComplex* alpha, + const cuComplex* A, + int lda, + long long int strideA, + const cuComplex* x, + int incx, + long long int stridex, + const cuComplex* beta, + cuComplex* y, + int incy, + long long int stridey, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemvStridedBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + long long int strideA, + const cuComplex* x, + int64_t incx, + long long int stridex, + const cuComplex* beta, + cuComplex* y, + int64_t incy, + long long int stridey, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemvStridedBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + long long int strideA, + const cuDoubleComplex* x, + int incx, + long long int stridex, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int incy, + long long int stridey, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemvStridedBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + long long int strideA, + const cuDoubleComplex* x, + int64_t incx, + long long int stridex, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int64_t incy, + long long int stridey, + int64_t batchCount); + +#if defined(__cplusplus) + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHSHgemvStridedBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const float* alpha, + const __half* A, + int lda, + long long int strideA, + const __half* x, + int incx, + long long int stridex, + const float* beta, + __half* y, + int incy, + long long int stridey, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHSHgemvStridedBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const float* alpha, + const __half* A, + int64_t lda, + long long int strideA, + const __half* x, + int64_t incx, + long long int stridex, + const float* beta, + __half* y, + int64_t incy, + long long int stridey, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHSSgemvStridedBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const float* alpha, + const __half* A, + int lda, + long long int strideA, + const __half* x, + int incx, + long long int stridex, + const float* beta, + float* y, + int incy, + long long int stridey, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHSSgemvStridedBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const float* alpha, + const __half* A, + int64_t lda, + long long int strideA, + const __half* x, + int64_t incx, + long long int stridex, + const float* beta, + float* y, + int64_t incy, + long long int stridey, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasTSTgemvStridedBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const float* alpha, + const __nv_bfloat16* A, + int lda, + long long int strideA, + const __nv_bfloat16* x, + int incx, + long long int stridex, + const float* beta, + __nv_bfloat16* y, + int incy, + long long int stridey, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasTSTgemvStridedBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const float* alpha, + const __nv_bfloat16* A, + int64_t lda, + long long int strideA, + const __nv_bfloat16* x, + int64_t incx, + long long int stridex, + const float* beta, + __nv_bfloat16* y, + int64_t incy, + long long int stridey, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasTSSgemvStridedBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const float* alpha, + const __nv_bfloat16* A, + int lda, + long long int strideA, + const __nv_bfloat16* x, + int incx, + long long int stridex, + const float* beta, + float* y, + int incy, + long long int stridey, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasTSSgemvStridedBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const float* alpha, + const __nv_bfloat16* A, + int64_t lda, + long long int strideA, + const __nv_bfloat16* x, + int64_t incx, + long long int stridex, + const float* beta, + float* y, + int64_t incy, + long long int stridey, + int64_t batchCount); + +#endif + +/* ---------------- CUBLAS BLAS3 Functions ---------------- */ + +/* GEMM */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemm_v2(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const float* alpha, + const float* A, + int lda, + const float* B, + int ldb, + const float* beta, + float* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemm_v2_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const float* alpha, + const float* A, + int64_t lda, + const float* B, + int64_t ldb, + const float* beta, + float* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemm_v2(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const double* alpha, + const double* A, + int lda, + const double* B, + int ldb, + const double* beta, + double* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemm_v2_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const double* alpha, + const double* A, + int64_t lda, + const double* B, + int64_t ldb, + const double* beta, + double* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemm_v2(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + const cuComplex* beta, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemm_v2_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* B, + int64_t ldb, + const cuComplex* beta, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemm3m(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + const cuComplex* beta, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemm3m_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* B, + int64_t ldb, + const cuComplex* beta, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemm3mEx(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const cuComplex* alpha, + const void* A, + cudaDataType Atype, + int lda, + const void* B, + cudaDataType Btype, + int ldb, + const cuComplex* beta, + void* C, + cudaDataType Ctype, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemm3mEx_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const cuComplex* alpha, + const void* A, + cudaDataType Atype, + int64_t lda, + const void* B, + cudaDataType Btype, + int64_t ldb, + const cuComplex* beta, + void* C, + cudaDataType Ctype, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemm_v2(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemm_v2_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* B, + int64_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemm3m(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemm3m_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* B, + int64_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int64_t ldc); + +#if defined(__cplusplus) + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHgemm(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const __half* alpha, + const __half* A, + int lda, + const __half* B, + int ldb, + const __half* beta, + __half* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHgemm_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const __half* alpha, + const __half* A, + int64_t lda, + const __half* B, + int64_t ldb, + const __half* beta, + __half* C, + int64_t ldc); + +#endif + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemmEx(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const float* alpha, + const void* A, + cudaDataType Atype, + int lda, + const void* B, + cudaDataType Btype, + int ldb, + const float* beta, + void* C, + cudaDataType Ctype, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemmEx_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const float* alpha, + const void* A, + cudaDataType Atype, + int64_t lda, + const void* B, + cudaDataType Btype, + int64_t ldb, + const float* beta, + void* C, + cudaDataType Ctype, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGemmEx(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const void* alpha, + const void* A, + cudaDataType Atype, + int lda, + const void* B, + cudaDataType Btype, + int ldb, + const void* beta, + void* C, + cudaDataType Ctype, + int ldc, + cublasComputeType_t computeType, + cublasGemmAlgo_t algo); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGemmEx_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const void* alpha, + const void* A, + cudaDataType Atype, + int64_t lda, + const void* B, + cudaDataType Btype, + int64_t ldb, + const void* beta, + void* C, + cudaDataType Ctype, + int64_t ldc, + cublasComputeType_t computeType, + cublasGemmAlgo_t algo); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemmEx(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const cuComplex* alpha, + const void* A, + cudaDataType Atype, + int lda, + const void* B, + cudaDataType Btype, + int ldb, + const cuComplex* beta, + void* C, + cudaDataType Ctype, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemmEx_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const cuComplex* alpha, + const void* A, + cudaDataType Atype, + int64_t lda, + const void* B, + cudaDataType Btype, + int64_t ldb, + const cuComplex* beta, + void* C, + cudaDataType Ctype, + int64_t ldc); + +/* SYRK */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsyrk_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const float* alpha, + const float* A, + int lda, + const float* beta, + float* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsyrk_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const float* alpha, + const float* A, + int64_t lda, + const float* beta, + float* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsyrk_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const double* alpha, + const double* A, + int lda, + const double* beta, + double* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsyrk_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const double* alpha, + const double* A, + int64_t lda, + const double* beta, + double* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyrk_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* beta, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyrk_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* beta, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsyrk_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsyrk_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyrkEx(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const cuComplex* alpha, + const void* A, + cudaDataType Atype, + int lda, + const cuComplex* beta, + void* C, + cudaDataType Ctype, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyrkEx_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const cuComplex* alpha, + const void* A, + cudaDataType Atype, + int64_t lda, + const cuComplex* beta, + void* C, + cudaDataType Ctype, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyrk3mEx(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const cuComplex* alpha, + const void* A, + cudaDataType Atype, + int lda, + const cuComplex* beta, + void* C, + cudaDataType Ctype, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyrk3mEx_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const cuComplex* alpha, + const void* A, + cudaDataType Atype, + int64_t lda, + const cuComplex* beta, + void* C, + cudaDataType Ctype, + int64_t ldc); + +/* HERK */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCherk_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const float* alpha, + const cuComplex* A, + int lda, + const float* beta, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCherk_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const float* alpha, + const cuComplex* A, + int64_t lda, + const float* beta, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZherk_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const double* alpha, + const cuDoubleComplex* A, + int lda, + const double* beta, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZherk_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const double* alpha, + const cuDoubleComplex* A, + int64_t lda, + const double* beta, + cuDoubleComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCherkEx(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const float* alpha, + const void* A, + cudaDataType Atype, + int lda, + const float* beta, + void* C, + cudaDataType Ctype, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCherkEx_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const float* alpha, + const void* A, + cudaDataType Atype, + int64_t lda, + const float* beta, + void* C, + cudaDataType Ctype, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCherk3mEx(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const float* alpha, + const void* A, + cudaDataType Atype, + int lda, + const float* beta, + void* C, + cudaDataType Ctype, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCherk3mEx_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const float* alpha, + const void* A, + cudaDataType Atype, + int64_t lda, + const float* beta, + void* C, + cudaDataType Ctype, + int64_t ldc); + +/* SYR2K / HER2K */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsyr2k_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const float* alpha, + const float* A, + int lda, + const float* B, + int ldb, + const float* beta, + float* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsyr2k_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const float* alpha, + const float* A, + int64_t lda, + const float* B, + int64_t ldb, + const float* beta, + float* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsyr2k_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const double* alpha, + const double* A, + int lda, + const double* B, + int ldb, + const double* beta, + double* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsyr2k_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const double* alpha, + const double* A, + int64_t lda, + const double* B, + int64_t ldb, + const double* beta, + double* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyr2k_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + const cuComplex* beta, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyr2k_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* B, + int64_t ldb, + const cuComplex* beta, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsyr2k_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsyr2k_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* B, + int64_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCher2k_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + const float* beta, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCher2k_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* B, + int64_t ldb, + const float* beta, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZher2k_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + const double* beta, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZher2k_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* B, + int64_t ldb, + const double* beta, + cuDoubleComplex* C, + int64_t ldc); + +/* SYRKX / HERKX */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsyrkx(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const float* alpha, + const float* A, + int lda, + const float* B, + int ldb, + const float* beta, + float* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsyrkx_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const float* alpha, + const float* A, + int64_t lda, + const float* B, + int64_t ldb, + const float* beta, + float* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsyrkx(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const double* alpha, + const double* A, + int lda, + const double* B, + int ldb, + const double* beta, + double* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsyrkx_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const double* alpha, + const double* A, + int64_t lda, + const double* B, + int64_t ldb, + const double* beta, + double* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyrkx(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + const cuComplex* beta, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyrkx_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* B, + int64_t ldb, + const cuComplex* beta, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsyrkx(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsyrkx_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* B, + int64_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCherkx(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + const float* beta, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCherkx_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* B, + int64_t ldb, + const float* beta, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZherkx(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + const double* beta, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZherkx_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* B, + int64_t ldb, + const double* beta, + cuDoubleComplex* C, + int64_t ldc); + +/* SYMM */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsymm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + int m, + int n, + const float* alpha, + const float* A, + int lda, + const float* B, + int ldb, + const float* beta, + float* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsymm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + int64_t m, + int64_t n, + const float* alpha, + const float* A, + int64_t lda, + const float* B, + int64_t ldb, + const float* beta, + float* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsymm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + int m, + int n, + const double* alpha, + const double* A, + int lda, + const double* B, + int ldb, + const double* beta, + double* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsymm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + int64_t m, + int64_t n, + const double* alpha, + const double* A, + int64_t lda, + const double* B, + int64_t ldb, + const double* beta, + double* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsymm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + int m, + int n, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + const cuComplex* beta, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsymm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + int64_t m, + int64_t n, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* B, + int64_t ldb, + const cuComplex* beta, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsymm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + int m, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsymm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + int64_t m, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* B, + int64_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int64_t ldc); + +/* HEMM */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasChemm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + int m, + int n, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + const cuComplex* beta, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasChemm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + int64_t m, + int64_t n, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* B, + int64_t ldb, + const cuComplex* beta, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZhemm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + int m, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZhemm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + int64_t m, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* B, + int64_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int64_t ldc); + +/* TRSM */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStrsm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int m, + int n, + const float* alpha, + const float* A, + int lda, + float* B, + int ldb); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStrsm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t m, + int64_t n, + const float* alpha, + const float* A, + int64_t lda, + float* B, + int64_t ldb); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtrsm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int m, + int n, + const double* alpha, + const double* A, + int lda, + double* B, + int ldb); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtrsm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t m, + int64_t n, + const double* alpha, + const double* A, + int64_t lda, + double* B, + int64_t ldb); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtrsm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int m, + int n, + const cuComplex* alpha, + const cuComplex* A, + int lda, + cuComplex* B, + int ldb); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtrsm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t m, + int64_t n, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + cuComplex* B, + int64_t ldb); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtrsm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int m, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + cuDoubleComplex* B, + int ldb); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtrsm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t m, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + cuDoubleComplex* B, + int64_t ldb); + +/* TRMM */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStrmm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int m, + int n, + const float* alpha, + const float* A, + int lda, + const float* B, + int ldb, + float* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStrmm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t m, + int64_t n, + const float* alpha, + const float* A, + int64_t lda, + const float* B, + int64_t ldb, + float* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtrmm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int m, + int n, + const double* alpha, + const double* A, + int lda, + const double* B, + int ldb, + double* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtrmm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t m, + int64_t n, + const double* alpha, + const double* A, + int64_t lda, + const double* B, + int64_t ldb, + double* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtrmm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int m, + int n, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtrmm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t m, + int64_t n, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* B, + int64_t ldb, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtrmm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int m, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtrmm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t m, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* B, + int64_t ldb, + cuDoubleComplex* C, + int64_t ldc); + +/* BATCH GEMM */ + +#if defined(__cplusplus) + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHgemmBatched(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const __half* alpha, + const __half* const Aarray[], + int lda, + const __half* const Barray[], + int ldb, + const __half* beta, + __half* const Carray[], + int ldc, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHgemmBatched_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const __half* alpha, + const __half* const Aarray[], + int64_t lda, + const __half* const Barray[], + int64_t ldb, + const __half* beta, + __half* const Carray[], + int64_t ldc, + int64_t batchCount); + +#endif + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemmBatched(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const float* alpha, + const float* const Aarray[], + int lda, + const float* const Barray[], + int ldb, + const float* beta, + float* const Carray[], + int ldc, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemmBatched_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const float* alpha, + const float* const Aarray[], + int64_t lda, + const float* const Barray[], + int64_t ldb, + const float* beta, + float* const Carray[], + int64_t ldc, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemmBatched(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const double* alpha, + const double* const Aarray[], + int lda, + const double* const Barray[], + int ldb, + const double* beta, + double* const Carray[], + int ldc, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemmBatched_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const double* alpha, + const double* const Aarray[], + int64_t lda, + const double* const Barray[], + int64_t ldb, + const double* beta, + double* const Carray[], + int64_t ldc, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemmBatched(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const cuComplex* alpha, + const cuComplex* const Aarray[], + int lda, + const cuComplex* const Barray[], + int ldb, + const cuComplex* beta, + cuComplex* const Carray[], + int ldc, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemmBatched_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const cuComplex* alpha, + const cuComplex* const Aarray[], + int64_t lda, + const cuComplex* const Barray[], + int64_t ldb, + const cuComplex* beta, + cuComplex* const Carray[], + int64_t ldc, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemm3mBatched(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const cuComplex* alpha, + const cuComplex* const Aarray[], + int lda, + const cuComplex* const Barray[], + int ldb, + const cuComplex* beta, + cuComplex* const Carray[], + int ldc, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemm3mBatched_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const cuComplex* alpha, + const cuComplex* const Aarray[], + int64_t lda, + const cuComplex* const Barray[], + int64_t ldb, + const cuComplex* beta, + cuComplex* const Carray[], + int64_t ldc, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemmBatched(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* const Aarray[], + int lda, + const cuDoubleComplex* const Barray[], + int ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* const Carray[], + int ldc, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemmBatched_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* const Aarray[], + int64_t lda, + const cuDoubleComplex* const Barray[], + int64_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* const Carray[], + int64_t ldc, + int64_t batchCount); + +#if defined(__cplusplus) + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHgemmStridedBatched(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const __half* alpha, + const __half* A, + int lda, + long long int strideA, + const __half* B, + int ldb, + long long int strideB, + const __half* beta, + __half* C, + int ldc, + long long int strideC, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHgemmStridedBatched_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const __half* alpha, + const __half* A, + int64_t lda, + long long int strideA, + const __half* B, + int64_t ldb, + long long int strideB, + const __half* beta, + __half* C, + int64_t ldc, + long long int strideC, + int64_t batchCount); + +#endif + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemmStridedBatched(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const float* alpha, + const float* A, + int lda, + long long int strideA, + const float* B, + int ldb, + long long int strideB, + const float* beta, + float* C, + int ldc, + long long int strideC, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemmStridedBatched_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const float* alpha, + const float* A, + int64_t lda, + long long int strideA, + const float* B, + int64_t ldb, + long long int strideB, + const float* beta, + float* C, + int64_t ldc, + long long int strideC, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemmStridedBatched(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const double* alpha, + const double* A, + int lda, + long long int strideA, + const double* B, + int ldb, + long long int strideB, + const double* beta, + double* C, + int ldc, + long long int strideC, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemmStridedBatched_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const double* alpha, + const double* A, + int64_t lda, + long long int strideA, + const double* B, + int64_t ldb, + long long int strideB, + const double* beta, + double* C, + int64_t ldc, + long long int strideC, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemmStridedBatched(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const cuComplex* alpha, + const cuComplex* A, + int lda, + long long int strideA, + const cuComplex* B, + int ldb, + long long int strideB, + const cuComplex* beta, + cuComplex* C, + int ldc, + long long int strideC, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemmStridedBatched_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + long long int strideA, + const cuComplex* B, + int64_t ldb, + long long int strideB, + const cuComplex* beta, + cuComplex* C, + int64_t ldc, + long long int strideC, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemm3mStridedBatched(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const cuComplex* alpha, + const cuComplex* A, + int lda, + long long int strideA, + const cuComplex* B, + int ldb, + long long int strideB, + const cuComplex* beta, + cuComplex* C, + int ldc, + long long int strideC, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemm3mStridedBatched_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + long long int strideA, + const cuComplex* B, + int64_t ldb, + long long int strideB, + const cuComplex* beta, + cuComplex* C, + int64_t ldc, + long long int strideC, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemmStridedBatched(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + long long int strideA, + const cuDoubleComplex* B, + int ldb, + long long int strideB, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int ldc, + long long int strideC, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemmStridedBatched_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + long long int strideA, + const cuDoubleComplex* B, + int64_t ldb, + long long int strideB, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int64_t ldc, + long long int strideC, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGemmBatchedEx(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const void* alpha, + const void* const Aarray[], + cudaDataType Atype, + int lda, + const void* const Barray[], + cudaDataType Btype, + int ldb, + const void* beta, + void* const Carray[], + cudaDataType Ctype, + int ldc, + int batchCount, + cublasComputeType_t computeType, + cublasGemmAlgo_t algo); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGemmBatchedEx_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const void* alpha, + const void* const Aarray[], + cudaDataType Atype, + int64_t lda, + const void* const Barray[], + cudaDataType Btype, + int64_t ldb, + const void* beta, + void* const Carray[], + cudaDataType Ctype, + int64_t ldc, + int64_t batchCount, + cublasComputeType_t computeType, + cublasGemmAlgo_t algo); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGemmStridedBatchedEx(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const void* alpha, + const void* A, + cudaDataType Atype, + int lda, + long long int strideA, + const void* B, + cudaDataType Btype, + int ldb, + long long int strideB, + const void* beta, + void* C, + cudaDataType Ctype, + int ldc, + long long int strideC, + int batchCount, + cublasComputeType_t computeType, + cublasGemmAlgo_t algo); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGemmStridedBatchedEx_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const void* alpha, + const void* A, + cudaDataType Atype, + int64_t lda, + long long int strideA, + const void* B, + cudaDataType Btype, + int64_t ldb, + long long int strideB, + const void* beta, + void* C, + cudaDataType Ctype, + int64_t ldc, + long long int strideC, + int64_t batchCount, + cublasComputeType_t computeType, + cublasGemmAlgo_t algo); + +/* ---------------- CUBLAS BLAS-like Extension ---------------- */ + +/* GEAM */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgeam(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + const float* alpha, + const float* A, + int lda, + const float* beta, + const float* B, + int ldb, + float* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgeam_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + const float* alpha, + const float* A, + int64_t lda, + const float* beta, + const float* B, + int64_t ldb, + float* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgeam(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + const double* alpha, + const double* A, + int lda, + const double* beta, + const double* B, + int ldb, + double* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgeam_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + const double* alpha, + const double* A, + int64_t lda, + const double* beta, + const double* B, + int64_t ldb, + double* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgeam(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* beta, + const cuComplex* B, + int ldb, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgeam_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* beta, + const cuComplex* B, + int64_t ldb, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgeam(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* beta, + const cuDoubleComplex* B, + int ldb, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgeam_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* beta, + const cuDoubleComplex* B, + int64_t ldb, + cuDoubleComplex* C, + int64_t ldc); + +/* TRSM - Batched Triangular Solver */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStrsmBatched(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int m, + int n, + const float* alpha, + const float* const A[], + int lda, + float* const B[], + int ldb, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStrsmBatched_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t m, + int64_t n, + const float* alpha, + const float* const A[], + int64_t lda, + float* const B[], + int64_t ldb, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtrsmBatched(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int m, + int n, + const double* alpha, + const double* const A[], + int lda, + double* const B[], + int ldb, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtrsmBatched_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t m, + int64_t n, + const double* alpha, + const double* const A[], + int64_t lda, + double* const B[], + int64_t ldb, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtrsmBatched(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int m, + int n, + const cuComplex* alpha, + const cuComplex* const A[], + int lda, + cuComplex* const B[], + int ldb, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtrsmBatched_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t m, + int64_t n, + const cuComplex* alpha, + const cuComplex* const A[], + int64_t lda, + cuComplex* const B[], + int64_t ldb, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtrsmBatched(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int m, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* const A[], + int lda, + cuDoubleComplex* const B[], + int ldb, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtrsmBatched_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t m, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* const A[], + int64_t lda, + cuDoubleComplex* const B[], + int64_t ldb, + int64_t batchCount); + +/* DGMM */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSdgmm(cublasHandle_t handle, + cublasSideMode_t mode, + int m, + int n, + const float* A, + int lda, + const float* x, + int incx, + float* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSdgmm_64(cublasHandle_t handle, + cublasSideMode_t mode, + int64_t m, + int64_t n, + const float* A, + int64_t lda, + const float* x, + int64_t incx, + float* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDdgmm(cublasHandle_t handle, + cublasSideMode_t mode, + int m, + int n, + const double* A, + int lda, + const double* x, + int incx, + double* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDdgmm_64(cublasHandle_t handle, + cublasSideMode_t mode, + int64_t m, + int64_t n, + const double* A, + int64_t lda, + const double* x, + int64_t incx, + double* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCdgmm(cublasHandle_t handle, + cublasSideMode_t mode, + int m, + int n, + const cuComplex* A, + int lda, + const cuComplex* x, + int incx, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCdgmm_64(cublasHandle_t handle, + cublasSideMode_t mode, + int64_t m, + int64_t n, + const cuComplex* A, + int64_t lda, + const cuComplex* x, + int64_t incx, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZdgmm(cublasHandle_t handle, + cublasSideMode_t mode, + int m, + int n, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* x, + int incx, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZdgmm_64(cublasHandle_t handle, + cublasSideMode_t mode, + int64_t m, + int64_t n, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* x, + int64_t incx, + cuDoubleComplex* C, + int64_t ldc); + +/* Batched - MATINV*/ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSmatinvBatched(cublasHandle_t handle, + int n, + const float* const A[], + int lda, + float* const Ainv[], + int lda_inv, + int* info, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDmatinvBatched(cublasHandle_t handle, + int n, + const double* const A[], + int lda, + double* const Ainv[], + int lda_inv, + int* info, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCmatinvBatched(cublasHandle_t handle, + int n, + const cuComplex* const A[], + int lda, + cuComplex* const Ainv[], + int lda_inv, + int* info, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZmatinvBatched(cublasHandle_t handle, + int n, + const cuDoubleComplex* const A[], + int lda, + cuDoubleComplex* const Ainv[], + int lda_inv, + int* info, + int batchSize); + +/* Batch QR Factorization */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgeqrfBatched(cublasHandle_t handle, + int m, + int n, + float* const Aarray[], + int lda, + float* const TauArray[], + int* info, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgeqrfBatched(cublasHandle_t handle, + int m, + int n, + double* const Aarray[], + int lda, + double* const TauArray[], + int* info, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgeqrfBatched(cublasHandle_t handle, + int m, + int n, + cuComplex* const Aarray[], + int lda, + cuComplex* const TauArray[], + int* info, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgeqrfBatched(cublasHandle_t handle, + int m, + int n, + cuDoubleComplex* const Aarray[], + int lda, + cuDoubleComplex* const TauArray[], + int* info, + int batchSize); + +/* Least Square Min only m >= n and Non-transpose supported */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgelsBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + int nrhs, + float* const Aarray[], + int lda, + float* const Carray[], + int ldc, + int* info, + int* devInfoArray, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgelsBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + int nrhs, + double* const Aarray[], + int lda, + double* const Carray[], + int ldc, + int* info, + int* devInfoArray, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgelsBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + int nrhs, + cuComplex* const Aarray[], + int lda, + cuComplex* const Carray[], + int ldc, + int* info, + int* devInfoArray, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgelsBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + int nrhs, + cuDoubleComplex* const Aarray[], + int lda, + cuDoubleComplex* const Carray[], + int ldc, + int* info, + int* devInfoArray, + int batchSize); + +/* TPTTR : Triangular Pack format to Triangular format */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasStpttr(cublasHandle_t handle, cublasFillMode_t uplo, int n, const float* AP, float* A, int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDtpttr(cublasHandle_t handle, cublasFillMode_t uplo, int n, const double* AP, double* A, int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasCtpttr(cublasHandle_t handle, cublasFillMode_t uplo, int n, const cuComplex* AP, cuComplex* A, int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtpttr( + cublasHandle_t handle, cublasFillMode_t uplo, int n, const cuDoubleComplex* AP, cuDoubleComplex* A, int lda); + +/* TRTTP : Triangular format to Triangular Pack format */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasStrttp(cublasHandle_t handle, cublasFillMode_t uplo, int n, const float* A, int lda, float* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDtrttp(cublasHandle_t handle, cublasFillMode_t uplo, int n, const double* A, int lda, double* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasCtrttp(cublasHandle_t handle, cublasFillMode_t uplo, int n, const cuComplex* A, int lda, cuComplex* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtrttp( + cublasHandle_t handle, cublasFillMode_t uplo, int n, const cuDoubleComplex* A, int lda, cuDoubleComplex* AP); + +/* Batched LU - GETRF*/ + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSgetrfBatched(cublasHandle_t handle, int n, float* const A[], int lda, int* P, int* info, int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDgetrfBatched(cublasHandle_t handle, int n, double* const A[], int lda, int* P, int* info, int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasCgetrfBatched(cublasHandle_t handle, int n, cuComplex* const A[], int lda, int* P, int* info, int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgetrfBatched( + cublasHandle_t handle, int n, cuDoubleComplex* const A[], int lda, int* P, int* info, int batchSize); + +/* Batched inversion based on LU factorization from getrf */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgetriBatched(cublasHandle_t handle, + int n, + const float* const A[], + int lda, + const int* P, + float* const C[], + int ldc, + int* info, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgetriBatched(cublasHandle_t handle, + int n, + const double* const A[], + int lda, + const int* P, + double* const C[], + int ldc, + int* info, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgetriBatched(cublasHandle_t handle, + int n, + const cuComplex* const A[], + int lda, + const int* P, + cuComplex* const C[], + int ldc, + int* info, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgetriBatched(cublasHandle_t handle, + int n, + const cuDoubleComplex* const A[], + int lda, + const int* P, + cuDoubleComplex* const C[], + int ldc, + int* info, + int batchSize); + +/* Batched solver based on LU factorization from getrf */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgetrsBatched(cublasHandle_t handle, + cublasOperation_t trans, + int n, + int nrhs, + const float* const Aarray[], + int lda, + const int* devIpiv, + float* const Barray[], + int ldb, + int* info, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgetrsBatched(cublasHandle_t handle, + cublasOperation_t trans, + int n, + int nrhs, + const double* const Aarray[], + int lda, + const int* devIpiv, + double* const Barray[], + int ldb, + int* info, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgetrsBatched(cublasHandle_t handle, + cublasOperation_t trans, + int n, + int nrhs, + const cuComplex* const Aarray[], + int lda, + const int* devIpiv, + cuComplex* const Barray[], + int ldb, + int* info, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgetrsBatched(cublasHandle_t handle, + cublasOperation_t trans, + int n, + int nrhs, + const cuDoubleComplex* const Aarray[], + int lda, + const int* devIpiv, + cuDoubleComplex* const Barray[], + int ldb, + int* info, + int batchSize); + +/* Deprecated */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasUint8gemmBias(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + cublasOperation_t transc, + int m, + int n, + int k, + const unsigned char* A, + int A_bias, + int lda, + const unsigned char* B, + int B_bias, + int ldb, + unsigned char* C, + int C_bias, + int ldc, + int C_mult, + int C_shift); + +/* }}} cuBLAS Exported API */ + +#if defined(__cplusplus) +} + +static inline cublasStatus_t cublasMigrateComputeType(cublasHandle_t handle, + cudaDataType_t dataType, + cublasComputeType_t* computeType) { + cublasMath_t mathMode = CUBLAS_DEFAULT_MATH; + cublasStatus_t status = CUBLAS_STATUS_SUCCESS; + + status = cublasGetMathMode(handle, &mathMode); + if (status != CUBLAS_STATUS_SUCCESS) { + return status; + } + + bool isPedantic = ((mathMode & 0xf) == CUBLAS_PEDANTIC_MATH); + + switch (dataType) { + case CUDA_R_32F: + case CUDA_C_32F: + *computeType = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F; + return CUBLAS_STATUS_SUCCESS; + case CUDA_R_64F: + case CUDA_C_64F: + *computeType = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F; + return CUBLAS_STATUS_SUCCESS; + case CUDA_R_16F: + *computeType = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F; + return CUBLAS_STATUS_SUCCESS; + case CUDA_R_32I: + *computeType = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I; + return CUBLAS_STATUS_SUCCESS; + default: + return CUBLAS_STATUS_NOT_SUPPORTED; + } +} +/* wrappers to accept old code with cudaDataType computeType when referenced from c++ code */ +static inline cublasStatus_t cublasGemmEx(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const void* alpha, /* host or device pointer */ + const void* A, + cudaDataType Atype, + int lda, + const void* B, + cudaDataType Btype, + int ldb, + const void* beta, /* host or device pointer */ + void* C, + cudaDataType Ctype, + int ldc, + cudaDataType computeType, + cublasGemmAlgo_t algo) { + cublasComputeType_t migratedComputeType = CUBLAS_COMPUTE_32F; + cublasStatus_t status = CUBLAS_STATUS_SUCCESS; + status = cublasMigrateComputeType(handle, computeType, &migratedComputeType); + if (status != CUBLAS_STATUS_SUCCESS) { + return status; + } + + return cublasGemmEx(handle, + transa, + transb, + m, + n, + k, + alpha, + A, + Atype, + lda, + B, + Btype, + ldb, + beta, + C, + Ctype, + ldc, + migratedComputeType, + algo); +} + +static inline cublasStatus_t cublasGemmBatchedEx(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const void* alpha, /* host or device pointer */ + const void* const Aarray[], + cudaDataType Atype, + int lda, + const void* const Barray[], + cudaDataType Btype, + int ldb, + const void* beta, /* host or device pointer */ + void* const Carray[], + cudaDataType Ctype, + int ldc, + int batchCount, + cudaDataType computeType, + cublasGemmAlgo_t algo) { + cublasComputeType_t migratedComputeType; + cublasStatus_t status; + status = cublasMigrateComputeType(handle, computeType, &migratedComputeType); + if (status != CUBLAS_STATUS_SUCCESS) { + return status; + } + + return cublasGemmBatchedEx(handle, + transa, + transb, + m, + n, + k, + alpha, + Aarray, + Atype, + lda, + Barray, + Btype, + ldb, + beta, + Carray, + Ctype, + ldc, + batchCount, + migratedComputeType, + algo); +} + +static inline cublasStatus_t cublasGemmStridedBatchedEx(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const void* alpha, /* host or device pointer */ + const void* A, + cudaDataType Atype, + int lda, + long long int strideA, /* purposely signed */ + const void* B, + cudaDataType Btype, + int ldb, + long long int strideB, + const void* beta, /* host or device pointer */ + void* C, + cudaDataType Ctype, + int ldc, + long long int strideC, + int batchCount, + cudaDataType computeType, + cublasGemmAlgo_t algo) { + cublasComputeType_t migratedComputeType; + cublasStatus_t status; + status = cublasMigrateComputeType(handle, computeType, &migratedComputeType); + if (status != CUBLAS_STATUS_SUCCESS) { + return status; + } + + return cublasGemmStridedBatchedEx(handle, + transa, + transb, + m, + n, + k, + alpha, + A, + Atype, + lda, + strideA, + B, + Btype, + ldb, + strideB, + beta, + C, + Ctype, + ldc, + strideC, + batchCount, + migratedComputeType, + algo); +} +#endif /* __cplusplus */ + +#endif /* !defined(CUBLAS_API_H_) */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cublas/include/cublas_v2.h b/venv/lib/python3.10/site-packages/nvidia/cublas/include/cublas_v2.h new file mode 100644 index 0000000000000000000000000000000000000000..bd81a3b1d8e7e3d04d6c54f4c0640af7d8893eab --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cublas/include/cublas_v2.h @@ -0,0 +1,478 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * This is the public header file for the new CUBLAS library API, it mapped the generic + * Cublas name functions to the actual _v2 implementations. + */ + +#if !defined(CUBLAS_V2_H_) +#define CUBLAS_V2_H_ + +#if defined(CUBLAS_H_) +#error "It is an error to include both cublas.h and cublas_v2.h" +#endif + +#undef CUBLASAPI +#ifdef __CUDACC__ +#define CUBLASAPI __host__ __device__ +#else +#define CUBLASAPI +#endif + +#include "cublas_api.h" + +#define cublasCreate cublasCreate_v2 +#define cublasDestroy cublasDestroy_v2 +#define cublasGetVersion cublasGetVersion_v2 +#define cublasSetWorkspace cublasSetWorkspace_v2 +#define cublasSetStream cublasSetStream_v2 +#define cublasGetStream cublasGetStream_v2 +#define cublasGetPointerMode cublasGetPointerMode_v2 +#define cublasSetPointerMode cublasSetPointerMode_v2 + +/* 32-bit integer */ + +/* Blas1 Routines */ + +#define cublasSnrm2 cublasSnrm2_v2 +#define cublasDnrm2 cublasDnrm2_v2 +#define cublasScnrm2 cublasScnrm2_v2 +#define cublasDznrm2 cublasDznrm2_v2 + +#define cublasSdot cublasSdot_v2 +#define cublasDdot cublasDdot_v2 +#define cublasCdotu cublasCdotu_v2 +#define cublasCdotc cublasCdotc_v2 +#define cublasZdotu cublasZdotu_v2 +#define cublasZdotc cublasZdotc_v2 + +#define cublasSscal cublasSscal_v2 +#define cublasDscal cublasDscal_v2 +#define cublasCscal cublasCscal_v2 +#define cublasCsscal cublasCsscal_v2 +#define cublasZscal cublasZscal_v2 +#define cublasZdscal cublasZdscal_v2 + +#define cublasSaxpy cublasSaxpy_v2 +#define cublasDaxpy cublasDaxpy_v2 +#define cublasCaxpy cublasCaxpy_v2 +#define cublasZaxpy cublasZaxpy_v2 + +#define cublasScopy cublasScopy_v2 +#define cublasDcopy cublasDcopy_v2 +#define cublasCcopy cublasCcopy_v2 +#define cublasZcopy cublasZcopy_v2 + +#define cublasSswap cublasSswap_v2 +#define cublasDswap cublasDswap_v2 +#define cublasCswap cublasCswap_v2 +#define cublasZswap cublasZswap_v2 + +#define cublasIsamax cublasIsamax_v2 +#define cublasIdamax cublasIdamax_v2 +#define cublasIcamax cublasIcamax_v2 +#define cublasIzamax cublasIzamax_v2 + +#define cublasIsamin cublasIsamin_v2 +#define cublasIdamin cublasIdamin_v2 +#define cublasIcamin cublasIcamin_v2 +#define cublasIzamin cublasIzamin_v2 + +#define cublasSasum cublasSasum_v2 +#define cublasDasum cublasDasum_v2 +#define cublasScasum cublasScasum_v2 +#define cublasDzasum cublasDzasum_v2 + +#define cublasSrot cublasSrot_v2 +#define cublasDrot cublasDrot_v2 +#define cublasCrot cublasCrot_v2 +#define cublasCsrot cublasCsrot_v2 +#define cublasZrot cublasZrot_v2 +#define cublasZdrot cublasZdrot_v2 + +#define cublasSrotg cublasSrotg_v2 +#define cublasDrotg cublasDrotg_v2 +#define cublasCrotg cublasCrotg_v2 +#define cublasZrotg cublasZrotg_v2 + +#define cublasSrotm cublasSrotm_v2 +#define cublasDrotm cublasDrotm_v2 + +#define cublasSrotmg cublasSrotmg_v2 +#define cublasDrotmg cublasDrotmg_v2 + +/* Blas2 Routines */ + +#define cublasSgemv cublasSgemv_v2 +#define cublasDgemv cublasDgemv_v2 +#define cublasCgemv cublasCgemv_v2 +#define cublasZgemv cublasZgemv_v2 + +#define cublasSgbmv cublasSgbmv_v2 +#define cublasDgbmv cublasDgbmv_v2 +#define cublasCgbmv cublasCgbmv_v2 +#define cublasZgbmv cublasZgbmv_v2 + +#define cublasStrmv cublasStrmv_v2 +#define cublasDtrmv cublasDtrmv_v2 +#define cublasCtrmv cublasCtrmv_v2 +#define cublasZtrmv cublasZtrmv_v2 + +#define cublasStbmv cublasStbmv_v2 +#define cublasDtbmv cublasDtbmv_v2 +#define cublasCtbmv cublasCtbmv_v2 +#define cublasZtbmv cublasZtbmv_v2 + +#define cublasStpmv cublasStpmv_v2 +#define cublasDtpmv cublasDtpmv_v2 +#define cublasCtpmv cublasCtpmv_v2 +#define cublasZtpmv cublasZtpmv_v2 + +#define cublasStrsv cublasStrsv_v2 +#define cublasDtrsv cublasDtrsv_v2 +#define cublasCtrsv cublasCtrsv_v2 +#define cublasZtrsv cublasZtrsv_v2 + +#define cublasStpsv cublasStpsv_v2 +#define cublasDtpsv cublasDtpsv_v2 +#define cublasCtpsv cublasCtpsv_v2 +#define cublasZtpsv cublasZtpsv_v2 + +#define cublasStbsv cublasStbsv_v2 +#define cublasDtbsv cublasDtbsv_v2 +#define cublasCtbsv cublasCtbsv_v2 +#define cublasZtbsv cublasZtbsv_v2 + +#define cublasSsymv cublasSsymv_v2 +#define cublasDsymv cublasDsymv_v2 +#define cublasCsymv cublasCsymv_v2 +#define cublasZsymv cublasZsymv_v2 +#define cublasChemv cublasChemv_v2 +#define cublasZhemv cublasZhemv_v2 + +#define cublasSsbmv cublasSsbmv_v2 +#define cublasDsbmv cublasDsbmv_v2 +#define cublasChbmv cublasChbmv_v2 +#define cublasZhbmv cublasZhbmv_v2 + +#define cublasSspmv cublasSspmv_v2 +#define cublasDspmv cublasDspmv_v2 +#define cublasChpmv cublasChpmv_v2 +#define cublasZhpmv cublasZhpmv_v2 + +#define cublasSger cublasSger_v2 +#define cublasDger cublasDger_v2 +#define cublasCgeru cublasCgeru_v2 +#define cublasCgerc cublasCgerc_v2 +#define cublasZgeru cublasZgeru_v2 +#define cublasZgerc cublasZgerc_v2 + +#define cublasSsyr cublasSsyr_v2 +#define cublasDsyr cublasDsyr_v2 +#define cublasCsyr cublasCsyr_v2 +#define cublasZsyr cublasZsyr_v2 +#define cublasCher cublasCher_v2 +#define cublasZher cublasZher_v2 + +#define cublasSspr cublasSspr_v2 +#define cublasDspr cublasDspr_v2 +#define cublasChpr cublasChpr_v2 +#define cublasZhpr cublasZhpr_v2 + +#define cublasSsyr2 cublasSsyr2_v2 +#define cublasDsyr2 cublasDsyr2_v2 +#define cublasCsyr2 cublasCsyr2_v2 +#define cublasZsyr2 cublasZsyr2_v2 +#define cublasCher2 cublasCher2_v2 +#define cublasZher2 cublasZher2_v2 + +#define cublasSspr2 cublasSspr2_v2 +#define cublasDspr2 cublasDspr2_v2 +#define cublasChpr2 cublasChpr2_v2 +#define cublasZhpr2 cublasZhpr2_v2 + +/* Blas3 Routines */ + +#define cublasSgemm cublasSgemm_v2 +#define cublasDgemm cublasDgemm_v2 +#define cublasCgemm cublasCgemm_v2 +#define cublasZgemm cublasZgemm_v2 + +#define cublasSsyrk cublasSsyrk_v2 +#define cublasDsyrk cublasDsyrk_v2 +#define cublasCsyrk cublasCsyrk_v2 +#define cublasZsyrk cublasZsyrk_v2 +#define cublasCherk cublasCherk_v2 +#define cublasZherk cublasZherk_v2 + +#define cublasSsyr2k cublasSsyr2k_v2 +#define cublasDsyr2k cublasDsyr2k_v2 +#define cublasCsyr2k cublasCsyr2k_v2 +#define cublasZsyr2k cublasZsyr2k_v2 +#define cublasCher2k cublasCher2k_v2 +#define cublasZher2k cublasZher2k_v2 + +#define cublasSsymm cublasSsymm_v2 +#define cublasDsymm cublasDsymm_v2 +#define cublasCsymm cublasCsymm_v2 +#define cublasZsymm cublasZsymm_v2 +#define cublasChemm cublasChemm_v2 +#define cublasZhemm cublasZhemm_v2 + +#define cublasStrsm cublasStrsm_v2 +#define cublasDtrsm cublasDtrsm_v2 +#define cublasCtrsm cublasCtrsm_v2 +#define cublasZtrsm cublasZtrsm_v2 + +#define cublasStrmm cublasStrmm_v2 +#define cublasDtrmm cublasDtrmm_v2 +#define cublasCtrmm cublasCtrmm_v2 +#define cublasZtrmm cublasZtrmm_v2 + +/* 64-bit integer */ + +/* Blas1 Routines */ + +#define cublasSnrm2_64 cublasSnrm2_v2_64 +#define cublasDnrm2_64 cublasDnrm2_v2_64 +#define cublasScnrm2_64 cublasScnrm2_v2_64 +#define cublasDznrm2_64 cublasDznrm2_v2_64 + +#define cublasSdot_64 cublasSdot_v2_64 +#define cublasDdot_64 cublasDdot_v2_64 +#define cublasCdotu_64 cublasCdotu_v2_64 +#define cublasCdotc_64 cublasCdotc_v2_64 +#define cublasZdotu_64 cublasZdotu_v2_64 +#define cublasZdotc_64 cublasZdotc_v2_64 + +#define cublasSscal_64 cublasSscal_v2_64 +#define cublasDscal_64 cublasDscal_v2_64 +#define cublasCscal_64 cublasCscal_v2_64 +#define cublasCsscal_64 cublasCsscal_v2_64 +#define cublasZscal_64 cublasZscal_v2_64 +#define cublasZdscal_64 cublasZdscal_v2_64 + +#define cublasSaxpy_64 cublasSaxpy_v2_64 +#define cublasDaxpy_64 cublasDaxpy_v2_64 +#define cublasCaxpy_64 cublasCaxpy_v2_64 +#define cublasZaxpy_64 cublasZaxpy_v2_64 + +#define cublasScopy_64 cublasScopy_v2_64 +#define cublasDcopy_64 cublasDcopy_v2_64 +#define cublasCcopy_64 cublasCcopy_v2_64 +#define cublasZcopy_64 cublasZcopy_v2_64 + +#define cublasSswap_64 cublasSswap_v2_64 +#define cublasDswap_64 cublasDswap_v2_64 +#define cublasCswap_64 cublasCswap_v2_64 +#define cublasZswap_64 cublasZswap_v2_64 + +#define cublasIsamax_64 cublasIsamax_v2_64 +#define cublasIdamax_64 cublasIdamax_v2_64 +#define cublasIcamax_64 cublasIcamax_v2_64 +#define cublasIzamax_64 cublasIzamax_v2_64 + +#define cublasIsamin_64 cublasIsamin_v2_64 +#define cublasIdamin_64 cublasIdamin_v2_64 +#define cublasIcamin_64 cublasIcamin_v2_64 +#define cublasIzamin_64 cublasIzamin_v2_64 + +#define cublasSasum_64 cublasSasum_v2_64 +#define cublasDasum_64 cublasDasum_v2_64 +#define cublasScasum_64 cublasScasum_v2_64 +#define cublasDzasum_64 cublasDzasum_v2_64 + +#define cublasSrot_64 cublasSrot_v2_64 +#define cublasDrot_64 cublasDrot_v2_64 +#define cublasCrot_64 cublasCrot_v2_64 +#define cublasCsrot_64 cublasCsrot_v2_64 +#define cublasZrot_64 cublasZrot_v2_64 +#define cublasZdrot_64 cublasZdrot_v2_64 + +#define cublasSrotg_64 cublasSrotg_v2_64 +#define cublasDrotg_64 cublasDrotg_v2_64 +#define cublasCrotg_64 cublasCrotg_v2_64 +#define cublasZrotg_64 cublasZrotg_v2_64 + +#define cublasSrotm_64 cublasSrotm_v2_64 +#define cublasDrotm_64 cublasDrotm_v2_64 + +#define cublasSrotmg_64 cublasSrotmg_v2_64 +#define cublasDrotmg_64 cublasDrotmg_v2_64 + +/* Blas2 Routines */ + +#define cublasSgemv_64 cublasSgemv_v2_64 +#define cublasDgemv_64 cublasDgemv_v2_64 +#define cublasCgemv_64 cublasCgemv_v2_64 +#define cublasZgemv_64 cublasZgemv_v2_64 + +#define cublasSgbmv_64 cublasSgbmv_v2_64 +#define cublasDgbmv_64 cublasDgbmv_v2_64 +#define cublasCgbmv_64 cublasCgbmv_v2_64 +#define cublasZgbmv_64 cublasZgbmv_v2_64 + +#define cublasStrmv_64 cublasStrmv_v2_64 +#define cublasDtrmv_64 cublasDtrmv_v2_64 +#define cublasCtrmv_64 cublasCtrmv_v2_64 +#define cublasZtrmv_64 cublasZtrmv_v2_64 + +#define cublasStbmv_64 cublasStbmv_v2_64 +#define cublasDtbmv_64 cublasDtbmv_v2_64 +#define cublasCtbmv_64 cublasCtbmv_v2_64 +#define cublasZtbmv_64 cublasZtbmv_v2_64 + +#define cublasStpmv_64 cublasStpmv_v2_64 +#define cublasDtpmv_64 cublasDtpmv_v2_64 +#define cublasCtpmv_64 cublasCtpmv_v2_64 +#define cublasZtpmv_64 cublasZtpmv_v2_64 + +#define cublasStrsv_64 cublasStrsv_v2_64 +#define cublasDtrsv_64 cublasDtrsv_v2_64 +#define cublasCtrsv_64 cublasCtrsv_v2_64 +#define cublasZtrsv_64 cublasZtrsv_v2_64 + +#define cublasStpsv_64 cublasStpsv_v2_64 +#define cublasDtpsv_64 cublasDtpsv_v2_64 +#define cublasCtpsv_64 cublasCtpsv_v2_64 +#define cublasZtpsv_64 cublasZtpsv_v2_64 + +#define cublasStbsv_64 cublasStbsv_v2_64 +#define cublasDtbsv_64 cublasDtbsv_v2_64 +#define cublasCtbsv_64 cublasCtbsv_v2_64 +#define cublasZtbsv_64 cublasZtbsv_v2_64 + +#define cublasSsymv_64 cublasSsymv_v2_64 +#define cublasDsymv_64 cublasDsymv_v2_64 +#define cublasCsymv_64 cublasCsymv_v2_64 +#define cublasZsymv_64 cublasZsymv_v2_64 +#define cublasChemv_64 cublasChemv_v2_64 +#define cublasZhemv_64 cublasZhemv_v2_64 + +#define cublasSsbmv_64 cublasSsbmv_v2_64 +#define cublasDsbmv_64 cublasDsbmv_v2_64 +#define cublasChbmv_64 cublasChbmv_v2_64 +#define cublasZhbmv_64 cublasZhbmv_v2_64 + +#define cublasSspmv_64 cublasSspmv_v2_64 +#define cublasDspmv_64 cublasDspmv_v2_64 +#define cublasChpmv_64 cublasChpmv_v2_64 +#define cublasZhpmv_64 cublasZhpmv_v2_64 + +#define cublasSger_64 cublasSger_v2_64 +#define cublasDger_64 cublasDger_v2_64 +#define cublasCgeru_64 cublasCgeru_v2_64 +#define cublasCgerc_64 cublasCgerc_v2_64 +#define cublasZgeru_64 cublasZgeru_v2_64 +#define cublasZgerc_64 cublasZgerc_v2_64 + +#define cublasSsyr_64 cublasSsyr_v2_64 +#define cublasDsyr_64 cublasDsyr_v2_64 +#define cublasCsyr_64 cublasCsyr_v2_64 +#define cublasZsyr_64 cublasZsyr_v2_64 +#define cublasCher_64 cublasCher_v2_64 +#define cublasZher_64 cublasZher_v2_64 + +#define cublasSspr_64 cublasSspr_v2_64 +#define cublasDspr_64 cublasDspr_v2_64 +#define cublasChpr_64 cublasChpr_v2_64 +#define cublasZhpr_64 cublasZhpr_v2_64 + +#define cublasSsyr2_64 cublasSsyr2_v2_64 +#define cublasDsyr2_64 cublasDsyr2_v2_64 +#define cublasCsyr2_64 cublasCsyr2_v2_64 +#define cublasZsyr2_64 cublasZsyr2_v2_64 +#define cublasCher2_64 cublasCher2_v2_64 +#define cublasZher2_64 cublasZher2_v2_64 + +#define cublasSspr2_64 cublasSspr2_v2_64 +#define cublasDspr2_64 cublasDspr2_v2_64 +#define cublasChpr2_64 cublasChpr2_v2_64 +#define cublasZhpr2_64 cublasZhpr2_v2_64 + +/* Blas3 Routines */ + +#define cublasSgemm_64 cublasSgemm_v2_64 +#define cublasDgemm_64 cublasDgemm_v2_64 +#define cublasCgemm_64 cublasCgemm_v2_64 +#define cublasZgemm_64 cublasZgemm_v2_64 + +#define cublasSsyrk_64 cublasSsyrk_v2_64 +#define cublasDsyrk_64 cublasDsyrk_v2_64 +#define cublasCsyrk_64 cublasCsyrk_v2_64 +#define cublasZsyrk_64 cublasZsyrk_v2_64 +#define cublasCherk_64 cublasCherk_v2_64 +#define cublasZherk_64 cublasZherk_v2_64 + +#define cublasSsyr2k_64 cublasSsyr2k_v2_64 +#define cublasDsyr2k_64 cublasDsyr2k_v2_64 +#define cublasCsyr2k_64 cublasCsyr2k_v2_64 +#define cublasZsyr2k_64 cublasZsyr2k_v2_64 +#define cublasCher2k_64 cublasCher2k_v2_64 +#define cublasZher2k_64 cublasZher2k_v2_64 + +#define cublasSsymm_64 cublasSsymm_v2_64 +#define cublasDsymm_64 cublasDsymm_v2_64 +#define cublasCsymm_64 cublasCsymm_v2_64 +#define cublasZsymm_64 cublasZsymm_v2_64 +#define cublasChemm_64 cublasChemm_v2_64 +#define cublasZhemm_64 cublasZhemm_v2_64 + +#define cublasStrsm_64 cublasStrsm_v2_64 +#define cublasDtrsm_64 cublasDtrsm_v2_64 +#define cublasCtrsm_64 cublasCtrsm_v2_64 +#define cublasZtrsm_64 cublasZtrsm_v2_64 + +#define cublasStrmm_64 cublasStrmm_v2_64 +#define cublasDtrmm_64 cublasDtrmm_v2_64 +#define cublasCtrmm_64 cublasCtrmm_v2_64 +#define cublasZtrmm_64 cublasZtrmm_v2_64 + +#endif /* !defined(CUBLAS_V2_H_) */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cublas/include/nvblas.h b/venv/lib/python3.10/site-packages/nvidia/cublas/include/nvblas.h new file mode 100644 index 0000000000000000000000000000000000000000..29ea9153faf7b3e62a6d53c0be1980ae79c49f51 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cublas/include/nvblas.h @@ -0,0 +1,824 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(NVBLAS_H_) +#define NVBLAS_H_ + +#include "driver_types.h" +#include "cuComplex.h" /* import complex data type */ + +#if defined(__cplusplus) +extern "C" { +#endif + +/* GEMM */ +void sgemm_(const char* transa, + const char* transb, + const int* m, + const int* n, + const int* k, + const float* alpha, + const float* a, + const int* lda, + const float* b, + const int* ldb, + const float* beta, + float* c, + const int* ldc); + +void dgemm_(const char* transa, + const char* transb, + const int* m, + const int* n, + const int* k, + const double* alpha, + const double* a, + const int* lda, + const double* b, + const int* ldb, + const double* beta, + double* c, + const int* ldc); + +void cgemm_(const char* transa, + const char* transb, + const int* m, + const int* n, + const int* k, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + const cuComplex* b, + const int* ldb, + const cuComplex* beta, + cuComplex* c, + const int* ldc); + +void zgemm_(const char* transa, + const char* transb, + const int* m, + const int* n, + const int* k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + const cuDoubleComplex* b, + const int* ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* c, + const int* ldc); + +void sgemm(const char* transa, + const char* transb, + const int* m, + const int* n, + const int* k, + const float* alpha, + const float* a, + const int* lda, + const float* b, + const int* ldb, + const float* beta, + float* c, + const int* ldc); + +void dgemm(const char* transa, + const char* transb, + const int* m, + const int* n, + const int* k, + const double* alpha, + const double* a, + const int* lda, + const double* b, + const int* ldb, + const double* beta, + double* c, + const int* ldc); + +void cgemm(const char* transa, + const char* transb, + const int* m, + const int* n, + const int* k, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + const cuComplex* b, + const int* ldb, + const cuComplex* beta, + cuComplex* c, + const int* ldc); + +void zgemm(const char* transa, + const char* transb, + const int* m, + const int* n, + const int* k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + const cuDoubleComplex* b, + const int* ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* c, + const int* ldc); + +/* SYRK */ +void ssyrk_(const char* uplo, + const char* trans, + const int* n, + const int* k, + const float* alpha, + const float* a, + const int* lda, + const float* beta, + float* c, + const int* ldc); + +void dsyrk_(const char* uplo, + const char* trans, + const int* n, + const int* k, + const double* alpha, + const double* a, + const int* lda, + const double* beta, + double* c, + const int* ldc); + +void csyrk_(const char* uplo, + const char* trans, + const int* n, + const int* k, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + const cuComplex* beta, + cuComplex* c, + const int* ldc); + +void zsyrk_(const char* uplo, + const char* trans, + const int* n, + const int* k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + const cuDoubleComplex* beta, + cuDoubleComplex* c, + const int* ldc); + +void ssyrk(const char* uplo, + const char* trans, + const int* n, + const int* k, + const float* alpha, + const float* a, + const int* lda, + const float* beta, + float* c, + const int* ldc); + +void dsyrk(const char* uplo, + const char* trans, + const int* n, + const int* k, + const double* alpha, + const double* a, + const int* lda, + const double* beta, + double* c, + const int* ldc); + +void csyrk(const char* uplo, + const char* trans, + const int* n, + const int* k, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + const cuComplex* beta, + cuComplex* c, + const int* ldc); + +void zsyrk(const char* uplo, + const char* trans, + const int* n, + const int* k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + const cuDoubleComplex* beta, + cuDoubleComplex* c, + const int* ldc); + +/* HERK */ +void cherk_(const char* uplo, + const char* trans, + const int* n, + const int* k, + const float* alpha, + const cuComplex* a, + const int* lda, + const float* beta, + cuComplex* c, + const int* ldc); + +void zherk_(const char* uplo, + const char* trans, + const int* n, + const int* k, + const double* alpha, + const cuDoubleComplex* a, + const int* lda, + const double* beta, + cuDoubleComplex* c, + const int* ldc); + +void cherk(const char* uplo, + const char* trans, + const int* n, + const int* k, + const float* alpha, + const cuComplex* a, + const int* lda, + const float* beta, + cuComplex* c, + const int* ldc); + +void zherk(const char* uplo, + const char* trans, + const int* n, + const int* k, + const double* alpha, + const cuDoubleComplex* a, + const int* lda, + const double* beta, + cuDoubleComplex* c, + const int* ldc); + +/* TRSM */ +void strsm_(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const float* alpha, + const float* a, + const int* lda, + float* b, + const int* ldb); + +void dtrsm_(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const double* alpha, + const double* a, + const int* lda, + double* b, + const int* ldb); + +void ctrsm_(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + cuComplex* b, + const int* ldb); + +void ztrsm_(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + cuDoubleComplex* b, + const int* ldb); + +void strsm(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const float* alpha, + const float* a, + const int* lda, + float* b, + const int* ldb); + +void dtrsm(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const double* alpha, + const double* a, + const int* lda, + double* b, + const int* ldb); + +void ctrsm(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + cuComplex* b, + const int* ldb); + +void ztrsm(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + cuDoubleComplex* b, + const int* ldb); + +/* SYMM */ +void ssymm_(const char* side, + const char* uplo, + const int* m, + const int* n, + const float* alpha, + const float* a, + const int* lda, + const float* b, + const int* ldb, + const float* beta, + float* c, + const int* ldc); + +void dsymm_(const char* side, + const char* uplo, + const int* m, + const int* n, + const double* alpha, + const double* a, + const int* lda, + const double* b, + const int* ldb, + const double* beta, + double* c, + const int* ldc); + +void csymm_(const char* side, + const char* uplo, + const int* m, + const int* n, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + const cuComplex* b, + const int* ldb, + const cuComplex* beta, + cuComplex* c, + const int* ldc); + +void zsymm_(const char* side, + const char* uplo, + const int* m, + const int* n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + const cuDoubleComplex* b, + const int* ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* c, + const int* ldc); + +void ssymm(const char* side, + const char* uplo, + const int* m, + const int* n, + const float* alpha, + const float* a, + const int* lda, + const float* b, + const int* ldb, + const float* beta, + float* c, + const int* ldc); + +void dsymm(const char* side, + const char* uplo, + const int* m, + const int* n, + const double* alpha, + const double* a, + const int* lda, + const double* b, + const int* ldb, + const double* beta, + double* c, + const int* ldc); + +void csymm(const char* side, + const char* uplo, + const int* m, + const int* n, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + const cuComplex* b, + const int* ldb, + const cuComplex* beta, + cuComplex* c, + const int* ldc); + +void zsymm(const char* side, + const char* uplo, + const int* m, + const int* n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + const cuDoubleComplex* b, + const int* ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* c, + const int* ldc); + +/* HEMM */ +void chemm_(const char* side, + const char* uplo, + const int* m, + const int* n, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + const cuComplex* b, + const int* ldb, + const cuComplex* beta, + cuComplex* c, + const int* ldc); + +void zhemm_(const char* side, + const char* uplo, + const int* m, + const int* n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + const cuDoubleComplex* b, + const int* ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* c, + const int* ldc); + +/* HEMM with no underscore*/ +void chemm(const char* side, + const char* uplo, + const int* m, + const int* n, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + const cuComplex* b, + const int* ldb, + const cuComplex* beta, + cuComplex* c, + const int* ldc); + +void zhemm(const char* side, + const char* uplo, + const int* m, + const int* n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + const cuDoubleComplex* b, + const int* ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* c, + const int* ldc); + +/* SYR2K */ +void ssyr2k_(const char* uplo, + const char* trans, + const int* n, + const int* k, + const float* alpha, + const float* a, + const int* lda, + const float* b, + const int* ldb, + const float* beta, + float* c, + const int* ldc); + +void dsyr2k_(const char* uplo, + const char* trans, + const int* n, + const int* k, + const double* alpha, + const double* a, + const int* lda, + const double* b, + const int* ldb, + const double* beta, + double* c, + const int* ldc); + +void csyr2k_(const char* uplo, + const char* trans, + const int* n, + const int* k, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + const cuComplex* b, + const int* ldb, + const cuComplex* beta, + cuComplex* c, + const int* ldc); + +void zsyr2k_(const char* uplo, + const char* trans, + const int* n, + const int* k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + const cuDoubleComplex* b, + const int* ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* c, + const int* ldc); + +/* SYR2K no_underscore*/ +void ssyr2k(const char* uplo, + const char* trans, + const int* n, + const int* k, + const float* alpha, + const float* a, + const int* lda, + const float* b, + const int* ldb, + const float* beta, + float* c, + const int* ldc); + +void dsyr2k(const char* uplo, + const char* trans, + const int* n, + const int* k, + const double* alpha, + const double* a, + const int* lda, + const double* b, + const int* ldb, + const double* beta, + double* c, + const int* ldc); + +void csyr2k(const char* uplo, + const char* trans, + const int* n, + const int* k, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + const cuComplex* b, + const int* ldb, + const cuComplex* beta, + cuComplex* c, + const int* ldc); + +void zsyr2k(const char* uplo, + const char* trans, + const int* n, + const int* k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + const cuDoubleComplex* b, + const int* ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* c, + const int* ldc); + +/* HERK */ +void cher2k_(const char* uplo, + const char* trans, + const int* n, + const int* k, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + const cuComplex* b, + const int* ldb, + const float* beta, + cuComplex* c, + const int* ldc); + +void zher2k_(const char* uplo, + const char* trans, + const int* n, + const int* k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + const cuDoubleComplex* b, + const int* ldb, + const double* beta, + cuDoubleComplex* c, + const int* ldc); + +/* HER2K with no underscore */ +void cher2k(const char* uplo, + const char* trans, + const int* n, + const int* k, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + const cuComplex* b, + const int* ldb, + const float* beta, + cuComplex* c, + const int* ldc); + +void zher2k(const char* uplo, + const char* trans, + const int* n, + const int* k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + const cuDoubleComplex* b, + const int* ldb, + const double* beta, + cuDoubleComplex* c, + const int* ldc); + +/* TRMM */ +void strmm_(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const float* alpha, + const float* a, + const int* lda, + float* b, + const int* ldb); + +void dtrmm_(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const double* alpha, + const double* a, + const int* lda, + double* b, + const int* ldb); + +void ctrmm_(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + cuComplex* b, + const int* ldb); + +void ztrmm_(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + cuDoubleComplex* b, + const int* ldb); + +void strmm(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const float* alpha, + const float* a, + const int* lda, + float* b, + const int* ldb); + +void dtrmm(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const double* alpha, + const double* a, + const int* lda, + double* b, + const int* ldb); + +void ctrmm(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + cuComplex* b, + const int* ldb); + +void ztrmm(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + cuDoubleComplex* b, + const int* ldb); + +#if defined(__cplusplus) +} +#endif /* __cplusplus */ + +#endif /* !defined(NVBLAS_H_) */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cublas/lib/__init__.py b/venv/lib/python3.10/site-packages/nvidia/cublas/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/nvidia/cublas/lib/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nvidia/cublas/lib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ea8ceacd0a7c9ae281bf32a98c3df7bac6bd21e Binary files /dev/null and b/venv/lib/python3.10/site-packages/nvidia/cublas/lib/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nvidia/cublas/lib/libnvblas.so.12 b/venv/lib/python3.10/site-packages/nvidia/cublas/lib/libnvblas.so.12 new file mode 100644 index 0000000000000000000000000000000000000000..5e74d14a5c1ef3a8e9c42783921cfe0fe86a1236 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nvidia/cublas/lib/libnvblas.so.12 differ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/__init__.py b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f3e1962664d8d93e96a94a9b91883a423c581dc Binary files /dev/null and b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/Openacc/cupti_openacc.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/Openacc/cupti_openacc.h new file mode 100644 index 0000000000000000000000000000000000000000..b7ea50da7beb2187e77f7606dd70faed0e4b4add --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/Openacc/cupti_openacc.h @@ -0,0 +1,98 @@ +/* + * Copyright 2017 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#include + +#if !defined(_CUPTI_OPENACC_H_) +#define _CUPTI_OPENACC_H_ + +#ifndef CUPTIAPI +#ifdef _WIN32 +#define CUPTIAPI __stdcall +#else +#define CUPTIAPI +#endif +#endif + +#if defined(__LP64__) +#define CUPTILP64 1 +#elif defined(_WIN64) +#define CUPTILP64 1 +#else +#undef CUPTILP64 +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility push(default) +#endif + +/** + * \brief Initialize OpenACC support + * + * \param profRegister function of type acc_prof_reg as obtained from acc_register_library + * \param profUnregister function of type acc_prof_reg as obtained from acc_register_library + * \param profLookup function of type acc_prof_lookup as obtained from acc_register_library + */ +CUptiResult CUPTIAPI +cuptiOpenACCInitialize(void *profRegister, void *profUnregister, void *profLookup); + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility pop +#endif + +#if defined(__cplusplus) +} +#endif + +#endif /*_CUPTI_OPENACC_H_*/ + diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/Openmp/cupti_openmp.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/Openmp/cupti_openmp.h new file mode 100644 index 0000000000000000000000000000000000000000..303dd42878fb02774d872c197ccc27b17f2af69e --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/Openmp/cupti_openmp.h @@ -0,0 +1,100 @@ +/* + * Copyright 2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#include +#include "Openmp/omp-tools.h" + +#if !defined(_CUPTI_OPENMP_H_) +#define _CUPTI_OPENMP_H_ + +#ifndef CUPTIAPI +#ifdef _WIN32 +#define CUPTIAPI __stdcall +#else +#define CUPTIAPI +#endif +#endif + +#if defined(__LP64__) +#define CUPTILP64 1 +#elif defined(_WIN64) +#define CUPTILP64 1 +#else +#undef CUPTILP64 +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility push(default) +#endif + +/** + * \brief Initialize OPENMP support (deprecated, used before OpenMP 5.0) + * + */ +int CUPTIAPI cuptiOpenMpInitialize(ompt_function_lookup_t ompt_fn_lookup, const char *runtime_version, unsigned int ompt_version); + +/** + * \brief Initialize OPENMP support + * + */ +int CUPTIAPI cuptiOpenMpInitialize_v2(ompt_function_lookup_t lookup, int initial_device_num, ompt_data_t *tool_data); + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility pop +#endif + +#if defined(__cplusplus) +} +#endif + +#endif /*_CUPTI_OPENMP_H_*/ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/Openmp/omp-tools.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/Openmp/omp-tools.h new file mode 100644 index 0000000000000000000000000000000000000000..276967d07e8f8c0f7686e5b3b15151edf2415ae7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/Openmp/omp-tools.h @@ -0,0 +1,1083 @@ +/* + * include/50/omp-tools.h.var + */ + +//===----------------------------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.txt for details. +// +//===----------------------------------------------------------------------===// + +#ifndef __OMPT__ +#define __OMPT__ + +/***************************************************************************** + * system include files + *****************************************************************************/ + +#include +#include + +/***************************************************************************** + * iteration macros + *****************************************************************************/ + +#define FOREACH_OMPT_INQUIRY_FN(macro) \ + macro (ompt_enumerate_states) \ + macro (ompt_enumerate_mutex_impls) \ + \ + macro (ompt_set_callback) \ + macro (ompt_get_callback) \ + \ + macro (ompt_get_state) \ + \ + macro (ompt_get_parallel_info) \ + macro (ompt_get_task_info) \ + macro (ompt_get_task_memory) \ + macro (ompt_get_thread_data) \ + macro (ompt_get_unique_id) \ + macro (ompt_finalize_tool) \ + \ + macro(ompt_get_num_procs) \ + macro(ompt_get_num_places) \ + macro(ompt_get_place_proc_ids) \ + macro(ompt_get_place_num) \ + macro(ompt_get_partition_place_nums) \ + macro(ompt_get_proc_id) \ + \ + macro(ompt_get_target_info) \ + macro(ompt_get_num_devices) + +#define FOREACH_OMPT_STATE(macro) \ + \ + /* first available state */ \ + macro (ompt_state_undefined, 0x102) /* undefined thread state */ \ + \ + /* work states (0..15) */ \ + macro (ompt_state_work_serial, 0x000) /* working outside parallel */ \ + macro (ompt_state_work_parallel, 0x001) /* working within parallel */ \ + macro (ompt_state_work_reduction, 0x002) /* performing a reduction */ \ + \ + /* barrier wait states (16..31) */ \ + macro (ompt_state_wait_barrier, 0x010) /* waiting at a barrier */ \ + macro (ompt_state_wait_barrier_implicit_parallel, 0x011) \ + /* implicit barrier at the end of parallel region */\ + macro (ompt_state_wait_barrier_implicit_workshare, 0x012) \ + /* implicit barrier at the end of worksharing */ \ + macro (ompt_state_wait_barrier_implicit, 0x013) /* implicit barrier */ \ + macro (ompt_state_wait_barrier_explicit, 0x014) /* explicit barrier */ \ + \ + /* task wait states (32..63) */ \ + macro (ompt_state_wait_taskwait, 0x020) /* waiting at a taskwait */ \ + macro (ompt_state_wait_taskgroup, 0x021) /* waiting at a taskgroup */ \ + \ + /* mutex wait states (64..127) */ \ + macro (ompt_state_wait_mutex, 0x040) \ + macro (ompt_state_wait_lock, 0x041) /* waiting for lock */ \ + macro (ompt_state_wait_critical, 0x042) /* waiting for critical */ \ + macro (ompt_state_wait_atomic, 0x043) /* waiting for atomic */ \ + macro (ompt_state_wait_ordered, 0x044) /* waiting for ordered */ \ + \ + /* target wait states (128..255) */ \ + macro (ompt_state_wait_target, 0x080) /* waiting for target region */ \ + macro (ompt_state_wait_target_map, 0x081) /* waiting for target data mapping operation */ \ + macro (ompt_state_wait_target_update, 0x082) /* waiting for target update operation */ \ + \ + /* misc (256..511) */ \ + macro (ompt_state_idle, 0x100) /* waiting for work */ \ + macro (ompt_state_overhead, 0x101) /* overhead excluding wait states */ \ + \ + /* implementation-specific states (512..) */ + + +#define FOREACH_KMP_MUTEX_IMPL(macro) \ + macro (kmp_mutex_impl_none, 0) /* unknown implementation */ \ + macro (kmp_mutex_impl_spin, 1) /* based on spin */ \ + macro (kmp_mutex_impl_queuing, 2) /* based on some fair policy */ \ + macro (kmp_mutex_impl_speculative, 3) /* based on HW-supported speculation */ + +#define FOREACH_OMPT_EVENT(macro) \ + \ + /*--- Mandatory Events ---*/ \ + macro (ompt_callback_thread_begin, ompt_callback_thread_begin_t, 1) /* thread begin */ \ + macro (ompt_callback_thread_end, ompt_callback_thread_end_t, 2) /* thread end */ \ + \ + macro (ompt_callback_parallel_begin, ompt_callback_parallel_begin_t, 3) /* parallel begin */ \ + macro (ompt_callback_parallel_end, ompt_callback_parallel_end_t, 4) /* parallel end */ \ + \ + macro (ompt_callback_task_create, ompt_callback_task_create_t, 5) /* task begin */ \ + macro (ompt_callback_task_schedule, ompt_callback_task_schedule_t, 6) /* task schedule */ \ + macro (ompt_callback_implicit_task, ompt_callback_implicit_task_t, 7) /* implicit task */ \ + \ + macro (ompt_callback_target, ompt_callback_target_t, 8) /* target */ \ + macro (ompt_callback_target_data_op, ompt_callback_target_data_op_t, 9) /* target data op */ \ + macro (ompt_callback_target_submit, ompt_callback_target_submit_t, 10) /* target submit */ \ + \ + macro (ompt_callback_control_tool, ompt_callback_control_tool_t, 11) /* control tool */ \ + \ + macro (ompt_callback_device_initialize, ompt_callback_device_initialize_t, 12) /* device initialize */ \ + macro (ompt_callback_device_finalize, ompt_callback_device_finalize_t, 13) /* device finalize */ \ + \ + macro (ompt_callback_device_load, ompt_callback_device_load_t, 14) /* device load */ \ + macro (ompt_callback_device_unload, ompt_callback_device_unload_t, 15) /* device unload */ \ + \ + /* Optional Events */ \ + macro (ompt_callback_sync_region_wait, ompt_callback_sync_region_t, 16) /* sync region wait begin or end */ \ + \ + macro (ompt_callback_mutex_released, ompt_callback_mutex_t, 17) /* mutex released */ \ + \ + macro (ompt_callback_dependences, ompt_callback_dependences_t, 18) /* report task dependences */ \ + macro (ompt_callback_task_dependence, ompt_callback_task_dependence_t, 19) /* report task dependence */ \ + \ + macro (ompt_callback_work, ompt_callback_work_t, 20) /* task at work begin or end */ \ + \ + macro (ompt_callback_master, ompt_callback_master_t, 21) /* task at master begin or end */ \ + \ + macro (ompt_callback_target_map, ompt_callback_target_map_t, 22) /* target map */ \ + \ + macro (ompt_callback_sync_region, ompt_callback_sync_region_t, 23) /* sync region begin or end */ \ + \ + macro (ompt_callback_lock_init, ompt_callback_mutex_acquire_t, 24) /* lock init */ \ + macro (ompt_callback_lock_destroy, ompt_callback_mutex_t, 25) /* lock destroy */ \ + \ + macro (ompt_callback_mutex_acquire, ompt_callback_mutex_acquire_t, 26) /* mutex acquire */ \ + macro (ompt_callback_mutex_acquired, ompt_callback_mutex_t, 27) /* mutex acquired */ \ + \ + macro (ompt_callback_nest_lock, ompt_callback_nest_lock_t, 28) /* nest lock */ \ + \ + macro (ompt_callback_flush, ompt_callback_flush_t, 29) /* after executing flush */ \ + \ + macro (ompt_callback_cancel, ompt_callback_cancel_t, 30) /* cancel innermost binding region */ \ + \ + macro (ompt_callback_reduction, ompt_callback_sync_region_t, 31) /* reduction */ \ + \ + macro (ompt_callback_dispatch, ompt_callback_dispatch_t, 32) /* dispatch of work */ + +/***************************************************************************** + * implementation specific types + *****************************************************************************/ + +typedef enum kmp_mutex_impl_t { +#define kmp_mutex_impl_macro(impl, code) impl = code, + FOREACH_KMP_MUTEX_IMPL(kmp_mutex_impl_macro) +#undef kmp_mutex_impl_macro +} kmp_mutex_impl_t; + +/***************************************************************************** + * definitions generated from spec + *****************************************************************************/ + +typedef enum ompt_callbacks_t { + ompt_callback_thread_begin = 1, + ompt_callback_thread_end = 2, + ompt_callback_parallel_begin = 3, + ompt_callback_parallel_end = 4, + ompt_callback_task_create = 5, + ompt_callback_task_schedule = 6, + ompt_callback_implicit_task = 7, + ompt_callback_target = 8, + ompt_callback_target_data_op = 9, + ompt_callback_target_submit = 10, + ompt_callback_control_tool = 11, + ompt_callback_device_initialize = 12, + ompt_callback_device_finalize = 13, + ompt_callback_device_load = 14, + ompt_callback_device_unload = 15, + ompt_callback_sync_region_wait = 16, + ompt_callback_mutex_released = 17, + ompt_callback_dependences = 18, + ompt_callback_task_dependence = 19, + ompt_callback_work = 20, + ompt_callback_master = 21, + ompt_callback_target_map = 22, + ompt_callback_sync_region = 23, + ompt_callback_lock_init = 24, + ompt_callback_lock_destroy = 25, + ompt_callback_mutex_acquire = 26, + ompt_callback_mutex_acquired = 27, + ompt_callback_nest_lock = 28, + ompt_callback_flush = 29, + ompt_callback_cancel = 30, + ompt_callback_reduction = 31, + ompt_callback_dispatch = 32 +} ompt_callbacks_t; + +typedef enum ompt_record_t { + ompt_record_ompt = 1, + ompt_record_native = 2, + ompt_record_invalid = 3 +} ompt_record_t; + +typedef enum ompt_record_native_t { + ompt_record_native_info = 1, + ompt_record_native_event = 2 +} ompt_record_native_t; + +typedef enum ompt_set_result_t { + ompt_set_error = 0, + ompt_set_never = 1, + ompt_set_impossible = 2, + ompt_set_sometimes = 3, + ompt_set_sometimes_paired = 4, + ompt_set_always = 5 +} ompt_set_result_t; + +typedef uint64_t ompt_id_t; + +typedef uint64_t ompt_device_time_t; + +typedef uint64_t ompt_buffer_cursor_t; + +typedef enum ompt_thread_t { + ompt_thread_initial = 1, + ompt_thread_worker = 2, + ompt_thread_other = 3, + ompt_thread_unknown = 4 +} ompt_thread_t; + +typedef enum ompt_scope_endpoint_t { + ompt_scope_begin = 1, + ompt_scope_end = 2 +} ompt_scope_endpoint_t; + +typedef enum ompt_dispatch_t { + ompt_dispatch_iteration = 1, + ompt_dispatch_section = 2 +} ompt_dispatch_t; + +typedef enum ompt_sync_region_t { + ompt_sync_region_barrier = 1, + ompt_sync_region_barrier_implicit = 2, + ompt_sync_region_barrier_explicit = 3, + ompt_sync_region_barrier_implementation = 4, + ompt_sync_region_taskwait = 5, + ompt_sync_region_taskgroup = 6, + ompt_sync_region_reduction = 7 +} ompt_sync_region_t; + +typedef enum ompt_target_data_op_t { + ompt_target_data_alloc = 1, + ompt_target_data_transfer_to_device = 2, + ompt_target_data_transfer_from_device = 3, + ompt_target_data_delete = 4, + ompt_target_data_associate = 5, + ompt_target_data_disassociate = 6 +} ompt_target_data_op_t; + +typedef enum ompt_work_t { + ompt_work_loop = 1, + ompt_work_sections = 2, + ompt_work_single_executor = 3, + ompt_work_single_other = 4, + ompt_work_workshare = 5, + ompt_work_distribute = 6, + ompt_work_taskloop = 7 +} ompt_work_t; + +typedef enum ompt_mutex_t { + ompt_mutex_lock = 1, + ompt_mutex_test_lock = 2, + ompt_mutex_nest_lock = 3, + ompt_mutex_test_nest_lock = 4, + ompt_mutex_critical = 5, + ompt_mutex_atomic = 6, + ompt_mutex_ordered = 7 +} ompt_mutex_t; + +typedef enum ompt_native_mon_flag_t { + ompt_native_data_motion_explicit = 0x01, + ompt_native_data_motion_implicit = 0x02, + ompt_native_kernel_invocation = 0x04, + ompt_native_kernel_execution = 0x08, + ompt_native_driver = 0x10, + ompt_native_runtime = 0x20, + ompt_native_overhead = 0x40, + ompt_native_idleness = 0x80 +} ompt_native_mon_flag_t; + +typedef enum ompt_task_flag_t { + ompt_task_initial = 0x00000001, + ompt_task_implicit = 0x00000002, + ompt_task_explicit = 0x00000004, + ompt_task_target = 0x00000008, + ompt_task_undeferred = 0x08000000, + ompt_task_untied = 0x10000000, + ompt_task_final = 0x20000000, + ompt_task_mergeable = 0x40000000, + ompt_task_merged = 0x80000000 +} ompt_task_flag_t; + +typedef enum ompt_task_status_t { + ompt_task_complete = 1, + ompt_task_yield = 2, + ompt_task_cancel = 3, + ompt_task_detach = 4, + ompt_task_early_fulfill = 5, + ompt_task_late_fulfill = 6, + ompt_task_switch = 7 +} ompt_task_status_t; + +typedef enum ompt_target_t { + ompt_target = 1, + ompt_target_enter_data = 2, + ompt_target_exit_data = 3, + ompt_target_update = 4 +} ompt_target_t; + +typedef enum ompt_parallel_flag_t { + ompt_parallel_invoker_program = 0x00000001, + ompt_parallel_invoker_runtime = 0x00000002, + ompt_parallel_league = 0x40000000, + ompt_parallel_team = 0x80000000 +} ompt_parallel_flag_t; + +typedef enum ompt_target_map_flag_t { + ompt_target_map_flag_to = 0x01, + ompt_target_map_flag_from = 0x02, + ompt_target_map_flag_alloc = 0x04, + ompt_target_map_flag_release = 0x08, + ompt_target_map_flag_delete = 0x10, + ompt_target_map_flag_implicit = 0x20 +} ompt_target_map_flag_t; + +typedef enum ompt_dependence_type_t { + ompt_dependence_type_in = 1, + ompt_dependence_type_out = 2, + ompt_dependence_type_inout = 3, + ompt_dependence_type_mutexinoutset = 4, + ompt_dependence_type_source = 5, + ompt_dependence_type_sink = 6 +} ompt_dependence_type_t; + +typedef enum ompt_cancel_flag_t { + ompt_cancel_parallel = 0x01, + ompt_cancel_sections = 0x02, + ompt_cancel_loop = 0x04, + ompt_cancel_taskgroup = 0x08, + ompt_cancel_activated = 0x10, + ompt_cancel_detected = 0x20, + ompt_cancel_discarded_task = 0x40 +} ompt_cancel_flag_t; + +typedef uint64_t ompt_hwid_t; + +typedef uint64_t ompt_wait_id_t; + +typedef enum ompt_frame_flag_t { + ompt_frame_runtime = 0x00, + ompt_frame_application = 0x01, + ompt_frame_cfa = 0x10, + ompt_frame_framepointer = 0x20, + ompt_frame_stackaddress = 0x30 +} ompt_frame_flag_t; + +typedef enum ompt_state_t { + ompt_state_work_serial = 0x000, + ompt_state_work_parallel = 0x001, + ompt_state_work_reduction = 0x002, + + ompt_state_wait_barrier = 0x010, + ompt_state_wait_barrier_implicit_parallel = 0x011, + ompt_state_wait_barrier_implicit_workshare = 0x012, + ompt_state_wait_barrier_implicit = 0x013, + ompt_state_wait_barrier_explicit = 0x014, + + ompt_state_wait_taskwait = 0x020, + ompt_state_wait_taskgroup = 0x021, + + ompt_state_wait_mutex = 0x040, + ompt_state_wait_lock = 0x041, + ompt_state_wait_critical = 0x042, + ompt_state_wait_atomic = 0x043, + ompt_state_wait_ordered = 0x044, + + ompt_state_wait_target = 0x080, + ompt_state_wait_target_map = 0x081, + ompt_state_wait_target_update = 0x082, + + ompt_state_idle = 0x100, + ompt_state_overhead = 0x101, + ompt_state_undefined = 0x102 +} ompt_state_t; + +typedef uint64_t (*ompt_get_unique_id_t) (void); + +typedef uint64_t ompd_size_t; + +typedef uint64_t ompd_wait_id_t; + +typedef uint64_t ompd_addr_t; +typedef int64_t ompd_word_t; +typedef uint64_t ompd_seg_t; + +typedef uint64_t ompd_device_t; + +typedef uint64_t ompd_thread_id_t; + +typedef enum ompd_scope_t { + ompd_scope_global = 1, + ompd_scope_address_space = 2, + ompd_scope_thread = 3, + ompd_scope_parallel = 4, + ompd_scope_implicit_task = 5, + ompd_scope_task = 6 +} ompd_scope_t; + +typedef uint64_t ompd_icv_id_t; + +typedef enum ompd_rc_t { + ompd_rc_ok = 0, + ompd_rc_unavailable = 1, + ompd_rc_stale_handle = 2, + ompd_rc_bad_input = 3, + ompd_rc_error = 4, + ompd_rc_unsupported = 5, + ompd_rc_needs_state_tracking = 6, + ompd_rc_incompatible = 7, + ompd_rc_device_read_error = 8, + ompd_rc_device_write_error = 9, + ompd_rc_nomem = 10, +} ompd_rc_t; + +typedef void (*ompt_interface_fn_t) (void); + +typedef ompt_interface_fn_t (*ompt_function_lookup_t) ( + const char *interface_function_name +); + +typedef union ompt_data_t { + uint64_t value; + void *ptr; +} ompt_data_t; + +typedef struct ompt_frame_t { + ompt_data_t exit_frame; + ompt_data_t enter_frame; + int exit_frame_flags; + int enter_frame_flags; +} ompt_frame_t; + +typedef void (*ompt_callback_t) (void); + +typedef void ompt_device_t; + +typedef void ompt_buffer_t; + +typedef void (*ompt_callback_buffer_request_t) ( + int device_num, + ompt_buffer_t **buffer, + size_t *bytes +); + +typedef void (*ompt_callback_buffer_complete_t) ( + int device_num, + ompt_buffer_t *buffer, + size_t bytes, + ompt_buffer_cursor_t begin, + int buffer_owned +); + +typedef void (*ompt_finalize_t) ( + ompt_data_t *tool_data +); + +typedef int (*ompt_initialize_t) ( + ompt_function_lookup_t lookup, + int initial_device_num, + ompt_data_t *tool_data +); + +typedef struct ompt_start_tool_result_t { + ompt_initialize_t initialize; + ompt_finalize_t finalize; + ompt_data_t tool_data; +} ompt_start_tool_result_t; + +typedef struct ompt_record_abstract_t { + ompt_record_native_t rclass; + const char *type; + ompt_device_time_t start_time; + ompt_device_time_t end_time; + ompt_hwid_t hwid; +} ompt_record_abstract_t; + +typedef struct ompt_dependence_t { + ompt_data_t variable; + ompt_dependence_type_t dependence_type; +} ompt_dependence_t; + +typedef int (*ompt_enumerate_states_t) ( + int current_state, + int *next_state, + const char **next_state_name +); + +typedef int (*ompt_enumerate_mutex_impls_t) ( + int current_impl, + int *next_impl, + const char **next_impl_name +); + +typedef ompt_set_result_t (*ompt_set_callback_t) ( + ompt_callbacks_t event, + ompt_callback_t callback +); + +typedef int (*ompt_get_callback_t) ( + ompt_callbacks_t event, + ompt_callback_t *callback +); + +typedef ompt_data_t *(*ompt_get_thread_data_t) (void); + +typedef int (*ompt_get_num_procs_t) (void); + +typedef int (*ompt_get_num_places_t) (void); + +typedef int (*ompt_get_place_proc_ids_t) ( + int place_num, + int ids_size, + int *ids +); + +typedef int (*ompt_get_place_num_t) (void); + +typedef int (*ompt_get_partition_place_nums_t) ( + int place_nums_size, + int *place_nums +); + +typedef int (*ompt_get_proc_id_t) (void); + +typedef int (*ompt_get_state_t) ( + ompt_wait_id_t *wait_id +); + +typedef int (*ompt_get_parallel_info_t) ( + int ancestor_level, + ompt_data_t **parallel_data, + int *team_size +); + +typedef int (*ompt_get_task_info_t) ( + int ancestor_level, + int *flags, + ompt_data_t **task_data, + ompt_frame_t **task_frame, + ompt_data_t **parallel_data, + int *thread_num +); + +typedef int (*ompt_get_task_memory_t)( + void **addr, + size_t *size, + int block +); + +typedef int (*ompt_get_target_info_t) ( + uint64_t *device_num, + ompt_id_t *target_id, + ompt_id_t *host_op_id +); + +typedef int (*ompt_get_num_devices_t) (void); + +typedef void (*ompt_finalize_tool_t) (void); + +typedef int (*ompt_get_device_num_procs_t) ( + ompt_device_t *device +); + +typedef ompt_device_time_t (*ompt_get_device_time_t) ( + ompt_device_t *device +); + +typedef double (*ompt_translate_time_t) ( + ompt_device_t *device, + ompt_device_time_t time +); + +typedef ompt_set_result_t (*ompt_set_trace_ompt_t) ( + ompt_device_t *device, + unsigned int enable, + unsigned int etype +); + +typedef ompt_set_result_t (*ompt_set_trace_native_t) ( + ompt_device_t *device, + int enable, + int flags +); + +typedef int (*ompt_start_trace_t) ( + ompt_device_t *device, + ompt_callback_buffer_request_t request, + ompt_callback_buffer_complete_t complete +); + +typedef int (*ompt_pause_trace_t) ( + ompt_device_t *device, + int begin_pause +); + +typedef int (*ompt_flush_trace_t) ( + ompt_device_t *device +); + +typedef int (*ompt_stop_trace_t) ( + ompt_device_t *device +); + +typedef int (*ompt_advance_buffer_cursor_t) ( + ompt_device_t *device, + ompt_buffer_t *buffer, + size_t size, + ompt_buffer_cursor_t current, + ompt_buffer_cursor_t *next +); + +typedef ompt_record_t (*ompt_get_record_type_t) ( + ompt_buffer_t *buffer, + ompt_buffer_cursor_t current +); + +typedef void *(*ompt_get_record_native_t) ( + ompt_buffer_t *buffer, + ompt_buffer_cursor_t current, + ompt_id_t *host_op_id +); + +typedef ompt_record_abstract_t * +(*ompt_get_record_abstract_t) ( + void *native_record +); + +typedef void (*ompt_callback_thread_begin_t) ( + ompt_thread_t thread_type, + ompt_data_t *thread_data +); + +typedef struct ompt_record_thread_begin_t { + ompt_thread_t thread_type; +} ompt_record_thread_begin_t; + +typedef void (*ompt_callback_thread_end_t) ( + ompt_data_t *thread_data +); + +typedef void (*ompt_callback_parallel_begin_t) ( + ompt_data_t *encountering_task_data, + const ompt_frame_t *encountering_task_frame, + ompt_data_t *parallel_data, + unsigned int requested_parallelism, + int flags, + const void *codeptr_ra +); + +typedef struct ompt_record_parallel_begin_t { + ompt_id_t encountering_task_id; + ompt_id_t parallel_id; + unsigned int requested_parallelism; + int flags; + const void *codeptr_ra; +} ompt_record_parallel_begin_t; + +typedef void (*ompt_callback_parallel_end_t) ( + ompt_data_t *parallel_data, + ompt_data_t *encountering_task_data, + int flags, + const void *codeptr_ra +); + +typedef struct ompt_record_parallel_end_t { + ompt_id_t parallel_id; + ompt_id_t encountering_task_id; + int flags; + const void *codeptr_ra; +} ompt_record_parallel_end_t; + +typedef void (*ompt_callback_work_t) ( + ompt_work_t wstype, + ompt_scope_endpoint_t endpoint, + ompt_data_t *parallel_data, + ompt_data_t *task_data, + uint64_t count, + const void *codeptr_ra +); + +typedef struct ompt_record_work_t { + ompt_work_t wstype; + ompt_scope_endpoint_t endpoint; + ompt_id_t parallel_id; + ompt_id_t task_id; + uint64_t count; + const void *codeptr_ra; +} ompt_record_work_t; + +typedef void (*ompt_callback_dispatch_t) ( + ompt_data_t *parallel_data, + ompt_data_t *task_data, + ompt_dispatch_t kind, + ompt_data_t instance +); + +typedef struct ompt_record_dispatch_t { + ompt_id_t parallel_id; + ompt_id_t task_id; + ompt_dispatch_t kind; + ompt_data_t instance; +} ompt_record_dispatch_t; + +typedef void (*ompt_callback_task_create_t) ( + ompt_data_t *encountering_task_data, + const ompt_frame_t *encountering_task_frame, + ompt_data_t *new_task_data, + int flags, + int has_dependences, + const void *codeptr_ra +); + +typedef struct ompt_record_task_create_t { + ompt_id_t encountering_task_id; + ompt_id_t new_task_id; + int flags; + int has_dependences; + const void *codeptr_ra; +} ompt_record_task_create_t; + +typedef void (*ompt_callback_dependences_t) ( + ompt_data_t *task_data, + const ompt_dependence_t *deps, + int ndeps +); + +typedef struct ompt_record_dependences_t { + ompt_id_t task_id; + ompt_dependence_t dep; + int ndeps; +} ompt_record_dependences_t; + +typedef void (*ompt_callback_task_dependence_t) ( + ompt_data_t *src_task_data, + ompt_data_t *sink_task_data +); + +typedef struct ompt_record_task_dependence_t { + ompt_id_t src_task_id; + ompt_id_t sink_task_id; +} ompt_record_task_dependence_t; + +typedef void (*ompt_callback_task_schedule_t) ( + ompt_data_t *prior_task_data, + ompt_task_status_t prior_task_status, + ompt_data_t *next_task_data +); + +typedef struct ompt_record_task_schedule_t { + ompt_id_t prior_task_id; + ompt_task_status_t prior_task_status; + ompt_id_t next_task_id; +} ompt_record_task_schedule_t; + +typedef void (*ompt_callback_implicit_task_t) ( + ompt_scope_endpoint_t endpoint, + ompt_data_t *parallel_data, + ompt_data_t *task_data, + unsigned int actual_parallelism, + unsigned int index, + int flags +); + +typedef struct ompt_record_implicit_task_t { + ompt_scope_endpoint_t endpoint; + ompt_id_t parallel_id; + ompt_id_t task_id; + unsigned int actual_parallelism; + unsigned int index; + int flags; +} ompt_record_implicit_task_t; + +typedef void (*ompt_callback_master_t) ( + ompt_scope_endpoint_t endpoint, + ompt_data_t *parallel_data, + ompt_data_t *task_data, + const void *codeptr_ra +); + +typedef struct ompt_record_master_t { + ompt_scope_endpoint_t endpoint; + ompt_id_t parallel_id; + ompt_id_t task_id; + const void *codeptr_ra; +} ompt_record_master_t; + +typedef void (*ompt_callback_sync_region_t) ( + ompt_sync_region_t kind, + ompt_scope_endpoint_t endpoint, + ompt_data_t *parallel_data, + ompt_data_t *task_data, + const void *codeptr_ra +); + +typedef struct ompt_record_sync_region_t { + ompt_sync_region_t kind; + ompt_scope_endpoint_t endpoint; + ompt_id_t parallel_id; + ompt_id_t task_id; + const void *codeptr_ra; +} ompt_record_sync_region_t; + +typedef void (*ompt_callback_mutex_acquire_t) ( + ompt_mutex_t kind, + unsigned int hint, + unsigned int impl, + ompt_wait_id_t wait_id, + const void *codeptr_ra +); + +typedef struct ompt_record_mutex_acquire_t { + ompt_mutex_t kind; + unsigned int hint; + unsigned int impl; + ompt_wait_id_t wait_id; + const void *codeptr_ra; +} ompt_record_mutex_acquire_t; + +typedef void (*ompt_callback_mutex_t) ( + ompt_mutex_t kind, + ompt_wait_id_t wait_id, + const void *codeptr_ra +); + +typedef struct ompt_record_mutex_t { + ompt_mutex_t kind; + ompt_wait_id_t wait_id; + const void *codeptr_ra; +} ompt_record_mutex_t; + +typedef void (*ompt_callback_nest_lock_t) ( + ompt_scope_endpoint_t endpoint, + ompt_wait_id_t wait_id, + const void *codeptr_ra +); + +typedef struct ompt_record_nest_lock_t { + ompt_scope_endpoint_t endpoint; + ompt_wait_id_t wait_id; + const void *codeptr_ra; +} ompt_record_nest_lock_t; + +typedef void (*ompt_callback_flush_t) ( + ompt_data_t *thread_data, + const void *codeptr_ra +); + +typedef struct ompt_record_flush_t { + const void *codeptr_ra; +} ompt_record_flush_t; + +typedef void (*ompt_callback_cancel_t) ( + ompt_data_t *task_data, + int flags, + const void *codeptr_ra +); + +typedef struct ompt_record_cancel_t { + ompt_id_t task_id; + int flags; + const void *codeptr_ra; +} ompt_record_cancel_t; + +typedef void (*ompt_callback_device_initialize_t) ( + int device_num, + const char *type, + ompt_device_t *device, + ompt_function_lookup_t lookup, + const char *documentation +); + +typedef void (*ompt_callback_device_finalize_t) ( + int device_num +); + +typedef void (*ompt_callback_device_load_t) ( + int device_num, + const char *filename, + int64_t offset_in_file, + void *vma_in_file, + size_t bytes, + void *host_addr, + void *device_addr, + uint64_t module_id +); + +typedef void (*ompt_callback_device_unload_t) ( + int device_num, + uint64_t module_id +); + +typedef void (*ompt_callback_target_data_op_t) ( + ompt_id_t target_id, + ompt_id_t host_op_id, + ompt_target_data_op_t optype, + void *src_addr, + int src_device_num, + void *dest_addr, + int dest_device_num, + size_t bytes, + const void *codeptr_ra +); + +typedef struct ompt_record_target_data_op_t { + ompt_id_t host_op_id; + ompt_target_data_op_t optype; + void *src_addr; + int src_device_num; + void *dest_addr; + int dest_device_num; + size_t bytes; + ompt_device_time_t end_time; + const void *codeptr_ra; +} ompt_record_target_data_op_t; + +typedef void (*ompt_callback_target_t) ( + ompt_target_t kind, + ompt_scope_endpoint_t endpoint, + int device_num, + ompt_data_t *task_data, + ompt_id_t target_id, + const void *codeptr_ra +); + +typedef struct ompt_record_target_t { + ompt_target_t kind; + ompt_scope_endpoint_t endpoint; + int device_num; + ompt_id_t task_id; + ompt_id_t target_id; + const void *codeptr_ra; +} ompt_record_target_t; + +typedef void (*ompt_callback_target_map_t) ( + ompt_id_t target_id, + unsigned int nitems, + void **host_addr, + void **device_addr, + size_t *bytes, + unsigned int *mapping_flags, + const void *codeptr_ra +); + +typedef struct ompt_record_target_map_t { + ompt_id_t target_id; + unsigned int nitems; + void **host_addr; + void **device_addr; + size_t *bytes; + unsigned int *mapping_flags; + const void *codeptr_ra; +} ompt_record_target_map_t; + +typedef void (*ompt_callback_target_submit_t) ( + ompt_id_t target_id, + ompt_id_t host_op_id, + unsigned int requested_num_teams +); + +typedef struct ompt_record_target_kernel_t { + ompt_id_t host_op_id; + unsigned int requested_num_teams; + unsigned int granted_num_teams; + ompt_device_time_t end_time; +} ompt_record_target_kernel_t; + +typedef int (*ompt_callback_control_tool_t) ( + uint64_t command, + uint64_t modifier, + void *arg, + const void *codeptr_ra +); + +typedef struct ompt_record_control_tool_t { + uint64_t command; + uint64_t modifier; + const void *codeptr_ra; +} ompt_record_control_tool_t; + +typedef struct ompd_address_t { + ompd_seg_t segment; + ompd_addr_t address; +} ompd_address_t; + +typedef struct ompd_frame_info_t { + ompd_address_t frame_address; + ompd_word_t frame_flag; +} ompd_frame_info_t; + +typedef struct _ompd_aspace_handle ompd_address_space_handle_t; +typedef struct _ompd_thread_handle ompd_thread_handle_t; +typedef struct _ompd_parallel_handle ompd_parallel_handle_t; +typedef struct _ompd_task_handle ompd_task_handle_t; + +typedef struct _ompd_aspace_cont ompd_address_space_context_t; +typedef struct _ompd_thread_cont ompd_thread_context_t; + +typedef struct ompd_device_type_sizes_t { + uint8_t sizeof_char; + uint8_t sizeof_short; + uint8_t sizeof_int; + uint8_t sizeof_long; + uint8_t sizeof_long_long; + uint8_t sizeof_pointer; +} ompd_device_type_sizes_t; + +typedef struct ompt_record_ompt_t { + ompt_callbacks_t type; + ompt_device_time_t time; + ompt_id_t thread_id; + ompt_id_t target_id; + union { + ompt_record_thread_begin_t thread_begin; + ompt_record_parallel_begin_t parallel_begin; + ompt_record_parallel_end_t parallel_end; + ompt_record_work_t work; + ompt_record_dispatch_t dispatch; + ompt_record_task_create_t task_create; + ompt_record_dependences_t dependences; + ompt_record_task_dependence_t task_dependence; + ompt_record_task_schedule_t task_schedule; + ompt_record_implicit_task_t implicit_task; + ompt_record_master_t master; + ompt_record_sync_region_t sync_region; + ompt_record_mutex_acquire_t mutex_acquire; + ompt_record_mutex_t mutex; + ompt_record_nest_lock_t nest_lock; + ompt_record_flush_t flush; + ompt_record_cancel_t cancel; + ompt_record_target_t target; + ompt_record_target_data_op_t target_data_op; + ompt_record_target_map_t target_map; + ompt_record_target_kernel_t target_kernel; + ompt_record_control_tool_t control_tool; + } record; +} ompt_record_ompt_t; + +typedef ompt_record_ompt_t *(*ompt_get_record_ompt_t) ( + ompt_buffer_t *buffer, + ompt_buffer_cursor_t current +); + +#define ompt_id_none 0 +#define ompt_data_none {0} +#define ompt_time_none 0 +#define ompt_hwid_none 0 +#define ompt_addr_none ~0 +#define ompt_mutex_impl_none 0 +#define ompt_wait_id_none 0 + +#define ompd_segment_none 0 + +#endif /* __OMPT__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/__init__.py b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e81ce2346c7dbdad836146f4d32d6e665359e74 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cuda_stdint.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cuda_stdint.h new file mode 100644 index 0000000000000000000000000000000000000000..8a9814410e4b6fb4f07ad9edc8394e956b77dbcd --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cuda_stdint.h @@ -0,0 +1,112 @@ +/* + * Copyright 2009-2017 NVIDIA Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of NVIDIA CORPORATION nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __cuda_stdint_h__ +#define __cuda_stdint_h__ + +// Compiler-specific treatment for C99's stdint.h +// +// By default, this header will use the standard headers (so it +// is your responsibility to make sure they are available), except +// on MSVC before Visual Studio 2010, when they were not provided. +// To support old MSVC, a few of the commonly-used definitions are +// provided here. If more definitions are needed, add them here, +// or replace these definitions with a complete implementation, +// such as the ones available from Google, Boost, or MSVC10. You +// can prevent the definition of any of these types (in order to +// use your own) by #defining CU_STDINT_TYPES_ALREADY_DEFINED. + +#if !defined(CU_STDINT_TYPES_ALREADY_DEFINED) + +// In VS including stdint.h forces the C++ runtime dep - provide an opt-out +// (CU_STDINT_VS_FORCE_NO_STDINT_H) for users that care (notably static +// cudart). +#if defined(_MSC_VER) && ((_MSC_VER < 1600) || defined(CU_STDINT_VS_FORCE_NO_STDINT_H)) + +// These definitions can be used with MSVC 8 and 9, +// which don't ship with stdint.h: + +typedef unsigned char uint8_t; + +typedef short int16_t; +typedef unsigned short uint16_t; + +// To keep it consistent with all MSVC build. define those types +// in the exact same way they are defined with the MSVC headers +#if defined(_MSC_VER) +typedef signed char int8_t; + +typedef int int32_t; +typedef unsigned int uint32_t; + +typedef long long int64_t; +typedef unsigned long long uint64_t; +#else +typedef char int8_t; + +typedef long int32_t; +typedef unsigned long uint32_t; + +typedef __int64 int64_t; +typedef unsigned __int64 uint64_t; +#endif + +#elif defined(__DJGPP__) + +// These definitions can be used when compiling +// C code with DJGPP, which only provides stdint.h +// when compiling C++ code with TR1 enabled. + +typedef char int8_t; +typedef unsigned char uint8_t; + +typedef short int16_t; +typedef unsigned short uint16_t; + +typedef long int32_t; +typedef unsigned long uint32_t; + +typedef long long int64_t; +typedef unsigned long long uint64_t; + +#else + +// Use standard headers, as specified by C99 and C++ TR1. +// Known to be provided by: +// - gcc/glibc, supported by all versions of glibc +// - djgpp, supported since 2001 +// - MSVC, supported by Visual Studio 2010 and later + +#include + +#endif + +#endif // !defined(CU_STDINT_TYPES_ALREADY_DEFINED) + + +#endif // file guard diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti.h new file mode 100644 index 0000000000000000000000000000000000000000..be316531dcfd846bcea8feadf3604437ce2447a1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti.h @@ -0,0 +1,123 @@ +/* + * Copyright 2010-2017 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(_CUPTI_H_) +#define _CUPTI_H_ + +#ifdef _WIN32 +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#ifdef NOMINMAX +#include +#else +#define NOMINMAX +#include +#undef NOMINMAX +#endif +#endif + +#include +#include +#include + +/* Activity, callback, event and metric APIs */ +#include +#include +#include +#include + +/* Runtime, driver, and nvtx function identifiers */ +#include +#include +#include + +/* To support function parameter structures for obsoleted API. See + cuda.h for the actual definition of these structures. */ +typedef unsigned int CUdeviceptr_v1; +typedef struct CUDA_MEMCPY2D_v1_st { int dummy; } CUDA_MEMCPY2D_v1; +typedef struct CUDA_MEMCPY3D_v1_st { int dummy; } CUDA_MEMCPY3D_v1; +typedef struct CUDA_ARRAY_DESCRIPTOR_v1_st { int dummy; } CUDA_ARRAY_DESCRIPTOR_v1; +typedef struct CUDA_ARRAY3D_DESCRIPTOR_v1_st { int dummy; } CUDA_ARRAY3D_DESCRIPTOR_v1; + +/* Function parameter structures */ +#include +#include + +/* The following parameter structures cannot be included unless a + header that defines GL_VERSION is included before including them. + If these are needed then make sure such a header is included + already. */ +#ifdef GL_VERSION +#include +#include +#endif + +//#include + +/* The following parameter structures cannot be included by default as + they are not guaranteed to be available on all systems. Uncomment + the includes that are available, or use the include explicitly. */ +#if defined(__linux__) +//#include +//#include +#endif + +#ifdef _WIN32 +//#include +//#include +//#include +//#include +//#include +//#include +#endif + +#endif /*_CUPTI_H_*/ + + diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_activity.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_activity.h new file mode 100644 index 0000000000000000000000000000000000000000..82cb1b1af9a490996543abd7ad12e7b36ee74dcb --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_activity.h @@ -0,0 +1,11604 @@ +/* + * Copyright 2011-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(_CUPTI_ACTIVITY_H_) +#define _CUPTI_ACTIVITY_H_ + +#include +#include +#include +#include +#include +#if defined(CUPTI_DIRECTIVE_SUPPORT) +#include +#include +#endif + +#ifndef CUPTIAPI +#ifdef _WIN32 +#define CUPTIAPI __stdcall +#else +#define CUPTIAPI +#endif +#endif + +#if defined(__LP64__) +#define CUPTILP64 1 +#elif defined(_WIN64) +#define CUPTILP64 1 +#else +#undef CUPTILP64 +#endif + +#define ACTIVITY_RECORD_ALIGNMENT 8 +#if defined(_WIN32) // Windows 32- and 64-bit +#define START_PACKED_ALIGNMENT __pragma(pack(push,1)) // exact fit - no padding +#define PACKED_ALIGNMENT __declspec(align(ACTIVITY_RECORD_ALIGNMENT)) +#define END_PACKED_ALIGNMENT __pragma(pack(pop)) +#elif defined(__GNUC__) // GCC +#define START_PACKED_ALIGNMENT +#define PACKED_ALIGNMENT __attribute__ ((__packed__)) __attribute__ ((aligned (ACTIVITY_RECORD_ALIGNMENT))) +#define END_PACKED_ALIGNMENT +#else // all other compilers +#define START_PACKED_ALIGNMENT +#define PACKED_ALIGNMENT +#define END_PACKED_ALIGNMENT +#endif + +#define CUPTI_UNIFIED_MEMORY_CPU_DEVICE_ID ((uint32_t) 0xFFFFFFFFU) +#define CUPTI_INVALID_CONTEXT_ID ((uint32_t) 0xFFFFFFFFU) +#define CUPTI_INVALID_STREAM_ID ((uint32_t) 0xFFFFFFFFU) +#define CUPTI_INVALID_CHANNEL_ID ((uint32_t) 0xFFFFFFFFU) +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility push(default) +#endif + +#define invalidNumaId ((uint32_t) 0xFFFFFFFF) + +/** + * \defgroup CUPTI_ACTIVITY_API CUPTI Activity API + * Functions, types, and enums that implement the CUPTI Activity API. + * @{ + */ + +/** + * \brief The kinds of activity records. + * + * Each activity record kind represents information about a GPU or an + * activity occurring on a CPU or GPU. Each kind is associated with a + * activity record structure that holds the information associated + * with the kind. + * \see CUpti_Activity + * \see CUpti_ActivityAPI + * \see CUpti_ActivityContext + * \see CUpti_ActivityDevice + * \see CUpti_ActivityDevice2 + * \see CUpti_ActivityDevice3 + * \see CUpti_ActivityDevice4 + * \see CUpti_ActivityDeviceAttribute + * \see CUpti_ActivityEvent + * \see CUpti_ActivityEventInstance + * \see CUpti_ActivityKernel + * \see CUpti_ActivityKernel2 + * \see CUpti_ActivityKernel3 + * \see CUpti_ActivityKernel4 + * \see CUpti_ActivityKernel5 + * \see CUpti_ActivityKernel6 + * \see CUpti_ActivityKernel7 + * \see CUpti_ActivityKernel8 + * \see CUpti_ActivityKernel9 + * \see CUpti_ActivityCdpKernel + * \see CUpti_ActivityPreemption + * \see CUpti_ActivityMemcpy + * \see CUpti_ActivityMemcpy3 + * \see CUpti_ActivityMemcpy4 + * \see CUpti_ActivityMemcpy5 + * \see CUpti_ActivityMemcpyPtoP + * \see CUpti_ActivityMemcpyPtoP2 + * \see CUpti_ActivityMemcpyPtoP3 + * \see CUpti_ActivityMemcpyPtoP4 + * \see CUpti_ActivityMemset + * \see CUpti_ActivityMemset2 + * \see CUpti_ActivityMemset3 + * \see CUpti_ActivityMemset4 + * \see CUpti_ActivityMetric + * \see CUpti_ActivityMetricInstance + * \see CUpti_ActivityName + * \see CUpti_ActivityMarker + * \see CUpti_ActivityMarker2 + * \see CUpti_ActivityMarkerData + * \see CUpti_ActivitySourceLocator + * \see CUpti_ActivityGlobalAccess + * \see CUpti_ActivityGlobalAccess2 + * \see CUpti_ActivityGlobalAccess3 + * \see CUpti_ActivityBranch + * \see CUpti_ActivityBranch2 + * \see CUpti_ActivityOverhead + * \see CUpti_ActivityEnvironment + * \see CUpti_ActivityInstructionExecution + * \see CUpti_ActivityUnifiedMemoryCounter + * \see CUpti_ActivityFunction + * \see CUpti_ActivityModule + * \see CUpti_ActivitySharedAccess + * \see CUpti_ActivityPCSampling + * \see CUpti_ActivityPCSampling2 + * \see CUpti_ActivityPCSampling3 + * \see CUpti_ActivityPCSamplingRecordInfo + * \see CUpti_ActivityCudaEvent + * \see CUpti_ActivityStream + * \see CUpti_ActivitySynchronization + * \see CUpti_ActivityInstructionCorrelation + * \see CUpti_ActivityExternalCorrelation + * \see CUpti_ActivityUnifiedMemoryCounter2 + * \see CUpti_ActivityOpenAccData + * \see CUpti_ActivityOpenAccLaunch + * \see CUpti_ActivityOpenAccOther + * \see CUpti_ActivityOpenMp + * \see CUpti_ActivityNvLink + * \see CUpti_ActivityNvLink2 + * \see CUpti_ActivityNvLink3 + * \see CUpti_ActivityNvLink4 + * \see CUpti_ActivityMemory + * \see CUpti_ActivityPcie + */ +typedef enum { + /** + * The activity record is invalid. + */ + CUPTI_ACTIVITY_KIND_INVALID = 0, + + /** + * A host<->host, host<->device, or device<->device memory copy. The + * corresponding activity record structure is \ref + * CUpti_ActivityMemcpy5. + */ + CUPTI_ACTIVITY_KIND_MEMCPY = 1, + + /** + * A memory set executing on the GPU. The corresponding activity + * record structure is \ref CUpti_ActivityMemset4. + */ + CUPTI_ACTIVITY_KIND_MEMSET = 2, + + /** + * A kernel executing on the GPU. This activity kind may significantly change + * the overall performance characteristics of the application because all + * kernel executions are serialized on the GPU. Other activity kind for kernel + * CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL doesn't break kernel concurrency. + * The corresponding activity record structure is \ref CUpti_ActivityKernel9. + */ + CUPTI_ACTIVITY_KIND_KERNEL = 3, + + /** + * A CUDA driver API function execution. The corresponding activity + * record structure is \ref CUpti_ActivityAPI. + */ + CUPTI_ACTIVITY_KIND_DRIVER = 4, + + /** + * A CUDA runtime API function execution. The corresponding activity + * record structure is \ref CUpti_ActivityAPI. + */ + CUPTI_ACTIVITY_KIND_RUNTIME = 5, + + /** + * An event value. The corresponding activity record structure is + * \ref CUpti_ActivityEvent. + */ + CUPTI_ACTIVITY_KIND_EVENT = 6, + + /** + * A metric value. The corresponding activity record structure is + * \ref CUpti_ActivityMetric. + */ + CUPTI_ACTIVITY_KIND_METRIC = 7, + + /** + * Information about a device. The corresponding activity record + * structure is \ref CUpti_ActivityDevice4. + */ + CUPTI_ACTIVITY_KIND_DEVICE = 8, + + /** + * Information about a context. The corresponding activity record + * structure is \ref CUpti_ActivityContext. + */ + CUPTI_ACTIVITY_KIND_CONTEXT = 9, + + /** + * A kernel executing on the GPU. This activity kind doesn't break + * kernel concurrency. The corresponding activity record structure + * is \ref CUpti_ActivityKernel9. + */ + CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL = 10, + + /** + * Resource naming done via NVTX APIs for thread, device, context, etc. + * The corresponding activity record structure is \ref CUpti_ActivityName. + */ + CUPTI_ACTIVITY_KIND_NAME = 11, + + /** + * Instantaneous, start, or end NVTX marker. The corresponding activity + * record structure is \ref CUpti_ActivityMarker2. + */ + CUPTI_ACTIVITY_KIND_MARKER = 12, + + /** + * Extended, optional, data about a marker. The corresponding + * activity record structure is \ref CUpti_ActivityMarkerData. + */ + CUPTI_ACTIVITY_KIND_MARKER_DATA = 13, + + /** + * Source information about source level result. The corresponding + * activity record structure is \ref CUpti_ActivitySourceLocator. + */ + CUPTI_ACTIVITY_KIND_SOURCE_LOCATOR = 14, + + /** + * Results for source-level global acccess. The + * corresponding activity record structure is \ref + * CUpti_ActivityGlobalAccess3. + */ + CUPTI_ACTIVITY_KIND_GLOBAL_ACCESS = 15, + + /** + * Results for source-level branch. The corresponding + * activity record structure is \ref CUpti_ActivityBranch2. + */ + CUPTI_ACTIVITY_KIND_BRANCH = 16, + + /** + * Overhead activity records. The + * corresponding activity record structure is + * \ref CUpti_ActivityOverhead. + */ + CUPTI_ACTIVITY_KIND_OVERHEAD = 17, + + /** + * A CDP (CUDA Dynamic Parallel) kernel executing on the GPU. The + * corresponding activity record structure is \ref + * CUpti_ActivityCdpKernel. This activity can not be directly + * enabled or disabled. It is enabled and disabled through + * concurrent kernel activity i.e. _CONCURRENT_KERNEL. + */ + CUPTI_ACTIVITY_KIND_CDP_KERNEL = 18, + /** + * Preemption activity record indicating a preemption of a CDP (CUDA + * Dynamic Parallel) kernel executing on the GPU. The corresponding + * activity record structure is \ref CUpti_ActivityPreemption. + */ + CUPTI_ACTIVITY_KIND_PREEMPTION = 19, + + /** + * Environment activity records indicating power, clock, thermal, + * etc. levels of the GPU. The corresponding activity record + * structure is \ref CUpti_ActivityEnvironment. + */ + CUPTI_ACTIVITY_KIND_ENVIRONMENT = 20, + + /** + * An event value associated with a specific event domain + * instance. The corresponding activity record structure is \ref + * CUpti_ActivityEventInstance. + */ + CUPTI_ACTIVITY_KIND_EVENT_INSTANCE = 21, + + /** + * A peer to peer memory copy. The corresponding activity record + * structure is \ref CUpti_ActivityMemcpyPtoP4. + */ + CUPTI_ACTIVITY_KIND_MEMCPY2 = 22, + + /** + * A metric value associated with a specific metric domain + * instance. The corresponding activity record structure is \ref + * CUpti_ActivityMetricInstance. + */ + CUPTI_ACTIVITY_KIND_METRIC_INSTANCE = 23, + + /** + * Results for source-level instruction execution. + * The corresponding activity record structure is \ref + * CUpti_ActivityInstructionExecution. + */ + CUPTI_ACTIVITY_KIND_INSTRUCTION_EXECUTION = 24, + + /** + * Unified Memory counter record. The corresponding activity + * record structure is \ref CUpti_ActivityUnifiedMemoryCounter2. + */ + CUPTI_ACTIVITY_KIND_UNIFIED_MEMORY_COUNTER = 25, + + /** + * Device global/function record. The corresponding activity + * record structure is \ref CUpti_ActivityFunction. + */ + CUPTI_ACTIVITY_KIND_FUNCTION = 26, + + /** + * CUDA Module record. The corresponding activity + * record structure is \ref CUpti_ActivityModule. + */ + CUPTI_ACTIVITY_KIND_MODULE = 27, + + /** + * A device attribute value. The corresponding activity record + * structure is \ref CUpti_ActivityDeviceAttribute. + */ + CUPTI_ACTIVITY_KIND_DEVICE_ATTRIBUTE = 28, + + /** + * Results for source-level shared acccess. The + * corresponding activity record structure is \ref + * CUpti_ActivitySharedAccess. + */ + CUPTI_ACTIVITY_KIND_SHARED_ACCESS = 29, + + /** + * Enable PC sampling for kernels. This will serialize + * kernels. The corresponding activity record structure + * is \ref CUpti_ActivityPCSampling3. + */ + CUPTI_ACTIVITY_KIND_PC_SAMPLING = 30, + + /** + * Summary information about PC sampling records. The + * corresponding activity record structure is \ref + * CUpti_ActivityPCSamplingRecordInfo. + */ + CUPTI_ACTIVITY_KIND_PC_SAMPLING_RECORD_INFO = 31, + + /** + * SASS/Source line-by-line correlation record. + * This will generate sass/source correlation for functions that have source + * level analysis or pc sampling results. The records will be generated only + * when either of source level analysis or pc sampling activity is enabled. + * The corresponding activity record structure is \ref + * CUpti_ActivityInstructionCorrelation. + */ + CUPTI_ACTIVITY_KIND_INSTRUCTION_CORRELATION = 32, + + /** + * OpenACC data events. + * The corresponding activity record structure is \ref + * CUpti_ActivityOpenAccData. + */ + CUPTI_ACTIVITY_KIND_OPENACC_DATA = 33, + + /** + * OpenACC launch events. + * The corresponding activity record structure is \ref + * CUpti_ActivityOpenAccLaunch. + */ + CUPTI_ACTIVITY_KIND_OPENACC_LAUNCH = 34, + + /** + * OpenACC other events. + * The corresponding activity record structure is \ref + * CUpti_ActivityOpenAccOther. + */ + CUPTI_ACTIVITY_KIND_OPENACC_OTHER = 35, + + /** + * Information about a CUDA event. The + * corresponding activity record structure is \ref + * CUpti_ActivityCudaEvent. + */ + CUPTI_ACTIVITY_KIND_CUDA_EVENT = 36, + + /** + * Information about a CUDA stream. The + * corresponding activity record structure is \ref + * CUpti_ActivityStream. + */ + CUPTI_ACTIVITY_KIND_STREAM = 37, + + /** + * Records for synchronization management. The + * corresponding activity record structure is \ref + * CUpti_ActivitySynchronization. + */ + CUPTI_ACTIVITY_KIND_SYNCHRONIZATION = 38, + + /** + * Records for correlation of different programming APIs. The + * corresponding activity record structure is \ref + * CUpti_ActivityExternalCorrelation. + */ + CUPTI_ACTIVITY_KIND_EXTERNAL_CORRELATION = 39, + + /** + * NVLink information. + * The corresponding activity record structure is \ref + * CUpti_ActivityNvLink4. + */ + CUPTI_ACTIVITY_KIND_NVLINK = 40, + + /** + * Instantaneous Event information. + * The corresponding activity record structure is \ref + * CUpti_ActivityInstantaneousEvent. + */ + CUPTI_ACTIVITY_KIND_INSTANTANEOUS_EVENT = 41, + + /** + * Instantaneous Event information for a specific event + * domain instance. + * The corresponding activity record structure is \ref + * CUpti_ActivityInstantaneousEventInstance + */ + CUPTI_ACTIVITY_KIND_INSTANTANEOUS_EVENT_INSTANCE = 42, + + /** + * Instantaneous Metric information + * The corresponding activity record structure is \ref + * CUpti_ActivityInstantaneousMetric. + */ + CUPTI_ACTIVITY_KIND_INSTANTANEOUS_METRIC = 43, + + /** + * Instantaneous Metric information for a specific metric + * domain instance. + * The corresponding activity record structure is \ref + * CUpti_ActivityInstantaneousMetricInstance. + */ + CUPTI_ACTIVITY_KIND_INSTANTANEOUS_METRIC_INSTANCE = 44, + + /** + * Memory activity tracking allocation and freeing of the memory + * The corresponding activity record structure is \ref + * CUpti_ActivityMemory. + */ + CUPTI_ACTIVITY_KIND_MEMORY = 45, + + /** + * PCI devices information used for PCI topology. + * The corresponding activity record structure is \ref + * CUpti_ActivityPcie. + */ + CUPTI_ACTIVITY_KIND_PCIE = 46, + + /** + * OpenMP parallel events. + * The corresponding activity record structure is \ref + * CUpti_ActivityOpenMp. + */ + CUPTI_ACTIVITY_KIND_OPENMP = 47, + + /** + * A CUDA driver kernel launch occurring outside of any + * public API function execution. Tools can handle these + * like records for driver API launch functions, although + * the cbid field is not used here. + * The corresponding activity record structure is \ref + * CUpti_ActivityAPI. + */ + CUPTI_ACTIVITY_KIND_INTERNAL_LAUNCH_API = 48, + + /** + * Memory activity tracking allocation and freeing of the memory + * The corresponding activity record structure is \ref + * CUpti_ActivityMemory3. + */ + CUPTI_ACTIVITY_KIND_MEMORY2 = 49, + + /** + * Memory pool activity tracking creation, destruction and + * triming of the memory pool. + * The corresponding activity record structure is \ref + * CUpti_ActivityMemoryPool2. + */ + CUPTI_ACTIVITY_KIND_MEMORY_POOL = 50, + + /** + * The corresponding activity record structure is + * \ref CUpti_ActivityGraphTrace. + */ + CUPTI_ACTIVITY_KIND_GRAPH_TRACE = 51, + + /** + * JIT operation tracking + * The corresponding activity record structure is \ref + * CUpti_ActivityJit. + */ + CUPTI_ACTIVITY_KIND_JIT = 52, + + CUPTI_ACTIVITY_KIND_COUNT, + + CUPTI_ACTIVITY_KIND_FORCE_INT = 0x7fffffff +} CUpti_ActivityKind; + +/** + * \brief The kinds of activity objects. + * \see CUpti_ActivityObjectKindId + */ +typedef enum { + /** + * The object kind is not known. + */ + CUPTI_ACTIVITY_OBJECT_UNKNOWN = 0, + + /** + * A process. + */ + CUPTI_ACTIVITY_OBJECT_PROCESS = 1, + + /** + * A thread. + */ + CUPTI_ACTIVITY_OBJECT_THREAD = 2, + + /** + * A device. + */ + CUPTI_ACTIVITY_OBJECT_DEVICE = 3, + + /** + * A context. + */ + CUPTI_ACTIVITY_OBJECT_CONTEXT = 4, + + /** + * A stream. + */ + CUPTI_ACTIVITY_OBJECT_STREAM = 5, + + CUPTI_ACTIVITY_OBJECT_FORCE_INT = 0x7fffffff +} CUpti_ActivityObjectKind; + +/** + * \brief Identifiers for object kinds as specified by + * CUpti_ActivityObjectKind. + * \see CUpti_ActivityObjectKind + */ +typedef union { + /** + * A process object requires that we identify the process ID. A + * thread object requires that we identify both the process and + * thread ID. + */ + struct { + uint32_t processId; + uint32_t threadId; + } pt; + + /** + * A device object requires that we identify the device ID. A + * context object requires that we identify both the device and + * context ID. A stream object requires that we identify device, + * context, and stream ID. + */ + struct { + uint32_t deviceId; + uint32_t contextId; + uint32_t streamId; + } dcs; +} CUpti_ActivityObjectKindId; + +/** + * \brief The kinds of activity overhead. + */ +typedef enum { + /** + * The overhead kind is not known. + */ + CUPTI_ACTIVITY_OVERHEAD_UNKNOWN = 0, + + /** + * Compiler overhead. + */ + CUPTI_ACTIVITY_OVERHEAD_DRIVER_COMPILER = 1, + + /** + * Activity buffer flush overhead. + */ + CUPTI_ACTIVITY_OVERHEAD_CUPTI_BUFFER_FLUSH = 1<<16, + + /** + * CUPTI instrumentation overhead. + */ + CUPTI_ACTIVITY_OVERHEAD_CUPTI_INSTRUMENTATION = 2<<16, + + /** + * CUPTI resource creation and destruction overhead. + */ + CUPTI_ACTIVITY_OVERHEAD_CUPTI_RESOURCE = 3<<16, + CUPTI_ACTIVITY_OVERHEAD_FORCE_INT = 0x7fffffff +} CUpti_ActivityOverheadKind; + +/** + * \brief The kind of a compute API. + */ +typedef enum { + /** + * The compute API is not known. + */ + CUPTI_ACTIVITY_COMPUTE_API_UNKNOWN = 0, + + /** + * The compute APIs are for CUDA. + */ + CUPTI_ACTIVITY_COMPUTE_API_CUDA = 1, + + /** + * The compute APIs are for CUDA running + * in MPS (Multi-Process Service) environment. + */ + CUPTI_ACTIVITY_COMPUTE_API_CUDA_MPS = 2, + + CUPTI_ACTIVITY_COMPUTE_API_FORCE_INT = 0x7fffffff +} CUpti_ActivityComputeApiKind; + +/** + * \brief Flags associated with activity records. + * + * Activity record flags. Flags can be combined by bitwise OR to + * associated multiple flags with an activity record. Each flag is + * specific to a certain activity kind, as noted below. + */ +typedef enum { + /** + * Indicates the activity record has no flags. + */ + CUPTI_ACTIVITY_FLAG_NONE = 0, + + /** + * Indicates the activity represents a device that supports + * concurrent kernel execution. Valid for + * CUPTI_ACTIVITY_KIND_DEVICE. + */ + CUPTI_ACTIVITY_FLAG_DEVICE_CONCURRENT_KERNELS = 1 << 0, + + /** + * Indicates if the activity represents a CUdevice_attribute value + * or a CUpti_DeviceAttribute value. Valid for + * CUPTI_ACTIVITY_KIND_DEVICE_ATTRIBUTE. + */ + CUPTI_ACTIVITY_FLAG_DEVICE_ATTRIBUTE_CUDEVICE = 1 << 0, + + /** + * Indicates the activity represents an asynchronous memcpy + * operation. Valid for CUPTI_ACTIVITY_KIND_MEMCPY. + */ + CUPTI_ACTIVITY_FLAG_MEMCPY_ASYNC = 1 << 0, + + /** + * Indicates the activity represents an instantaneous marker. Valid + * for CUPTI_ACTIVITY_KIND_MARKER. + */ + CUPTI_ACTIVITY_FLAG_MARKER_INSTANTANEOUS = 1 << 0, + + /** + * Indicates the activity represents a region start marker. Valid + * for CUPTI_ACTIVITY_KIND_MARKER. + */ + CUPTI_ACTIVITY_FLAG_MARKER_START = 1 << 1, + + /** + * Indicates the activity represents a region end marker. Valid for + * CUPTI_ACTIVITY_KIND_MARKER. + */ + CUPTI_ACTIVITY_FLAG_MARKER_END = 1 << 2, + + /** + * Indicates the activity represents an attempt to acquire a user + * defined synchronization object. + * Valid for CUPTI_ACTIVITY_KIND_MARKER. + */ + CUPTI_ACTIVITY_FLAG_MARKER_SYNC_ACQUIRE = 1 << 3, + + /** + * Indicates the activity represents success in acquiring the + * user defined synchronization object. + * Valid for CUPTI_ACTIVITY_KIND_MARKER. + */ + CUPTI_ACTIVITY_FLAG_MARKER_SYNC_ACQUIRE_SUCCESS = 1 << 4, + + /** + * Indicates the activity represents failure in acquiring the + * user defined synchronization object. + * Valid for CUPTI_ACTIVITY_KIND_MARKER. + */ + CUPTI_ACTIVITY_FLAG_MARKER_SYNC_ACQUIRE_FAILED = 1 << 5, + + /** + * Indicates the activity represents releasing a reservation on + * user defined synchronization object. + * Valid for CUPTI_ACTIVITY_KIND_MARKER. + */ + CUPTI_ACTIVITY_FLAG_MARKER_SYNC_RELEASE = 1 << 6, + + /** + * Indicates the activity represents a marker that does not specify + * a color. Valid for CUPTI_ACTIVITY_KIND_MARKER_DATA. + */ + CUPTI_ACTIVITY_FLAG_MARKER_COLOR_NONE = 1 << 0, + + /** + * Indicates the activity represents a marker that specifies a color + * in alpha-red-green-blue format. Valid for + * CUPTI_ACTIVITY_KIND_MARKER_DATA. + */ + CUPTI_ACTIVITY_FLAG_MARKER_COLOR_ARGB = 1 << 1, + + /** + * The number of bytes requested by each thread + * Valid for CUpti_ActivityGlobalAccess3. + */ + CUPTI_ACTIVITY_FLAG_GLOBAL_ACCESS_KIND_SIZE_MASK = 0xFF << 0, + + /** + * If bit in this flag is set, the access was load, else it is a + * store access. Valid for CUpti_ActivityGlobalAccess3. + */ + CUPTI_ACTIVITY_FLAG_GLOBAL_ACCESS_KIND_LOAD = 1 << 8, + + /** + * If this bit in flag is set, the load access was cached else it is + * uncached. Valid for CUpti_ActivityGlobalAccess3. + */ + CUPTI_ACTIVITY_FLAG_GLOBAL_ACCESS_KIND_CACHED = 1 << 9, + + /** + * If this bit in flag is set, the metric value overflowed. Valid + * for CUpti_ActivityMetric and CUpti_ActivityMetricInstance. + */ + CUPTI_ACTIVITY_FLAG_METRIC_OVERFLOWED = 1 << 0, + + /** + * If this bit in flag is set, the metric value couldn't be + * calculated. This occurs when a value(s) required to calculate the + * metric is missing. Valid for CUpti_ActivityMetric and + * CUpti_ActivityMetricInstance. + */ + CUPTI_ACTIVITY_FLAG_METRIC_VALUE_INVALID = 1 << 1, + + /** + * If this bit in flag is set, the source level metric value couldn't be + * calculated. This occurs when a value(s) required to calculate the + * source level metric cannot be evaluated. + * Valid for CUpti_ActivityInstructionExecution. + */ + CUPTI_ACTIVITY_FLAG_INSTRUCTION_VALUE_INVALID = 1 << 0, + + /** + * The mask for the instruction class, \ref CUpti_ActivityInstructionClass + * Valid for CUpti_ActivityInstructionExecution and + * CUpti_ActivityInstructionCorrelation + */ + CUPTI_ACTIVITY_FLAG_INSTRUCTION_CLASS_MASK = 0xFF << 1, + + /** + * When calling cuptiActivityFlushAll, this flag + * can be set to force CUPTI to flush all records in the buffer, whether + * finished or not + */ + CUPTI_ACTIVITY_FLAG_FLUSH_FORCED = 1 << 0, + + /** + * The number of bytes requested by each thread + * Valid for CUpti_ActivitySharedAccess. + */ + CUPTI_ACTIVITY_FLAG_SHARED_ACCESS_KIND_SIZE_MASK = 0xFF << 0, + + /** + * If bit in this flag is set, the access was load, else it is a + * store access. Valid for CUpti_ActivitySharedAccess. + */ + CUPTI_ACTIVITY_FLAG_SHARED_ACCESS_KIND_LOAD = 1 << 8, + + /** + * Indicates the activity represents an asynchronous memset + * operation. Valid for CUPTI_ACTIVITY_KIND_MEMSET. + */ + CUPTI_ACTIVITY_FLAG_MEMSET_ASYNC = 1 << 0, + + /** + * Indicates the activity represents thrashing in CPU. + * Valid for counter of kind CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_THRASHING in + * CUPTI_ACTIVITY_KIND_UNIFIED_MEMORY_COUNTER + */ + CUPTI_ACTIVITY_FLAG_THRASHING_IN_CPU = 1 << 0, + + /** + * Indicates the activity represents page throttling in CPU. + * Valid for counter of kind CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_THROTTLING in + * CUPTI_ACTIVITY_KIND_UNIFIED_MEMORY_COUNTER + */ + CUPTI_ACTIVITY_FLAG_THROTTLING_IN_CPU = 1 << 0, + + CUPTI_ACTIVITY_FLAG_FORCE_INT = 0x7fffffff +} CUpti_ActivityFlag; + +/** + * \brief The stall reason for PC sampling activity. + */ +typedef enum { + /** + * Invalid reason + */ + CUPTI_ACTIVITY_PC_SAMPLING_STALL_INVALID = 0, + + /** + * No stall, instruction is selected for issue + */ + CUPTI_ACTIVITY_PC_SAMPLING_STALL_NONE = 1, + + /** + * Warp is blocked because next instruction is not yet available, + * because of instruction cache miss, or because of branching effects + */ + CUPTI_ACTIVITY_PC_SAMPLING_STALL_INST_FETCH = 2, + + /** + * Instruction is waiting on an arithmatic dependency + */ + CUPTI_ACTIVITY_PC_SAMPLING_STALL_EXEC_DEPENDENCY = 3, + + /** + * Warp is blocked because it is waiting for a memory access to complete. + */ + CUPTI_ACTIVITY_PC_SAMPLING_STALL_MEMORY_DEPENDENCY = 4, + + /** + * Texture sub-system is fully utilized or has too many outstanding requests. + */ + CUPTI_ACTIVITY_PC_SAMPLING_STALL_TEXTURE = 5, + + /** + * Warp is blocked as it is waiting at __syncthreads() or at memory barrier. + */ + CUPTI_ACTIVITY_PC_SAMPLING_STALL_SYNC = 6, + + /** + * Warp is blocked waiting for __constant__ memory and immediate memory access to complete. + */ + CUPTI_ACTIVITY_PC_SAMPLING_STALL_CONSTANT_MEMORY_DEPENDENCY = 7, + + /** + * Compute operation cannot be performed due to the required resources not + * being available. + */ + CUPTI_ACTIVITY_PC_SAMPLING_STALL_PIPE_BUSY = 8, + + /** + * Warp is blocked because there are too many pending memory operations. + * In Kepler architecture it often indicates high number of memory replays. + */ + CUPTI_ACTIVITY_PC_SAMPLING_STALL_MEMORY_THROTTLE = 9, + + /** + * Warp was ready to issue, but some other warp issued instead. + */ + CUPTI_ACTIVITY_PC_SAMPLING_STALL_NOT_SELECTED = 10, + + /** + * Miscellaneous reasons + */ + CUPTI_ACTIVITY_PC_SAMPLING_STALL_OTHER = 11, + + /** + * Sleeping. + */ + CUPTI_ACTIVITY_PC_SAMPLING_STALL_SLEEPING = 12, + + CUPTI_ACTIVITY_PC_SAMPLING_STALL_FORCE_INT = 0x7fffffff +} CUpti_ActivityPCSamplingStallReason; + +/** + * \brief Sampling period for PC sampling method + * + * Sampling period can be set using \ref cuptiActivityConfigurePCSampling + */ +typedef enum { + /** + * The PC sampling period is not set. + */ + CUPTI_ACTIVITY_PC_SAMPLING_PERIOD_INVALID = 0, + + /** + * Minimum sampling period available on the device. + */ + CUPTI_ACTIVITY_PC_SAMPLING_PERIOD_MIN = 1, + + /** + * Sampling period in lower range. + */ + CUPTI_ACTIVITY_PC_SAMPLING_PERIOD_LOW = 2, + + /** + * Medium sampling period. + */ + CUPTI_ACTIVITY_PC_SAMPLING_PERIOD_MID = 3, + + /** + * Sampling period in higher range. + */ + CUPTI_ACTIVITY_PC_SAMPLING_PERIOD_HIGH = 4, + + /** + * Maximum sampling period available on the device. + */ + CUPTI_ACTIVITY_PC_SAMPLING_PERIOD_MAX = 5, + + CUPTI_ACTIVITY_PC_SAMPLING_PERIOD_FORCE_INT = 0x7fffffff +} CUpti_ActivityPCSamplingPeriod; + +/** + * \brief The kind of a memory copy, indicating the source and + * destination targets of the copy. + * + * Each kind represents the source and destination targets of a memory + * copy. Targets are host, device, and array. + */ +typedef enum { + /** + * The memory copy kind is not known. + */ + CUPTI_ACTIVITY_MEMCPY_KIND_UNKNOWN = 0, + + /** + * A host to device memory copy. + */ + CUPTI_ACTIVITY_MEMCPY_KIND_HTOD = 1, + + /** + * A device to host memory copy. + */ + CUPTI_ACTIVITY_MEMCPY_KIND_DTOH = 2, + + /** + * A host to device array memory copy. + */ + CUPTI_ACTIVITY_MEMCPY_KIND_HTOA = 3, + + /** + * A device array to host memory copy. + */ + CUPTI_ACTIVITY_MEMCPY_KIND_ATOH = 4, + + /** + * A device array to device array memory copy. + */ + CUPTI_ACTIVITY_MEMCPY_KIND_ATOA = 5, + + /** + * A device array to device memory copy. + */ + CUPTI_ACTIVITY_MEMCPY_KIND_ATOD = 6, + + /** + * A device to device array memory copy. + */ + CUPTI_ACTIVITY_MEMCPY_KIND_DTOA = 7, + + /** + * A device to device memory copy on the same device. + */ + CUPTI_ACTIVITY_MEMCPY_KIND_DTOD = 8, + + /** + * A host to host memory copy. + */ + CUPTI_ACTIVITY_MEMCPY_KIND_HTOH = 9, + + /** + * A peer to peer memory copy across different devices. + */ + CUPTI_ACTIVITY_MEMCPY_KIND_PTOP = 10, + + CUPTI_ACTIVITY_MEMCPY_KIND_FORCE_INT = 0x7fffffff +} CUpti_ActivityMemcpyKind; + +/** + * \brief The kinds of memory accessed by a memory operation/copy. + * + * Each kind represents the type of the memory + * accessed by a memory operation/copy. + */ +typedef enum { + /** + * The memory kind is unknown. + */ + CUPTI_ACTIVITY_MEMORY_KIND_UNKNOWN = 0, + + /** + * The memory is pageable. + */ + CUPTI_ACTIVITY_MEMORY_KIND_PAGEABLE = 1, + + /** + * The memory is pinned. + */ + CUPTI_ACTIVITY_MEMORY_KIND_PINNED = 2, + + /** + * The memory is on the device. + */ + CUPTI_ACTIVITY_MEMORY_KIND_DEVICE = 3, + + /** + * The memory is an array. + */ + CUPTI_ACTIVITY_MEMORY_KIND_ARRAY = 4, + + /** + * The memory is managed + */ + CUPTI_ACTIVITY_MEMORY_KIND_MANAGED = 5, + + /** + * The memory is device static + */ + CUPTI_ACTIVITY_MEMORY_KIND_DEVICE_STATIC = 6, + + /** + * The memory is managed static + */ + CUPTI_ACTIVITY_MEMORY_KIND_MANAGED_STATIC = 7, + + CUPTI_ACTIVITY_MEMORY_KIND_FORCE_INT = 0x7fffffff +} CUpti_ActivityMemoryKind; + +/** + * \brief The kind of a preemption activity. + */ +typedef enum { + /** + * The preemption kind is not known. + */ + CUPTI_ACTIVITY_PREEMPTION_KIND_UNKNOWN = 0, + + /** + * Preemption to save CDP block. + */ + CUPTI_ACTIVITY_PREEMPTION_KIND_SAVE = 1, + + /** + * Preemption to restore CDP block. + */ + CUPTI_ACTIVITY_PREEMPTION_KIND_RESTORE = 2, + + CUPTI_ACTIVITY_PREEMPTION_KIND_FORCE_INT = 0x7fffffff +} CUpti_ActivityPreemptionKind; + +/** + * \brief The kind of environment data. Used to indicate what type of + * data is being reported by an environment activity record. + */ +typedef enum { + /** + * Unknown data. + */ + CUPTI_ACTIVITY_ENVIRONMENT_UNKNOWN = 0, + + /** + * The environment data is related to speed. + */ + CUPTI_ACTIVITY_ENVIRONMENT_SPEED = 1, + + /** + * The environment data is related to temperature. + */ + CUPTI_ACTIVITY_ENVIRONMENT_TEMPERATURE = 2, + + /** + * The environment data is related to power. + */ + CUPTI_ACTIVITY_ENVIRONMENT_POWER = 3, + + /** + * The environment data is related to cooling. + */ + CUPTI_ACTIVITY_ENVIRONMENT_COOLING = 4, + + CUPTI_ACTIVITY_ENVIRONMENT_COUNT, + + CUPTI_ACTIVITY_ENVIRONMENT_KIND_FORCE_INT = 0x7fffffff +} CUpti_ActivityEnvironmentKind; + +/** + * \brief Reasons for clock throttling. + * + * The possible reasons that a clock can be throttled. There can be + * more than one reason that a clock is being throttled so these types + * can be combined by bitwise OR. These are used in the + * clocksThrottleReason field in the Environment Activity Record. + */ +typedef enum { + /** + * Nothing is running on the GPU and the clocks are dropping to idle + * state. + */ + CUPTI_CLOCKS_THROTTLE_REASON_GPU_IDLE = 0x00000001, + + /** + * The GPU clocks are limited by a user specified limit. + */ + CUPTI_CLOCKS_THROTTLE_REASON_USER_DEFINED_CLOCKS = 0x00000002, + + /** + * A software power scaling algorithm is reducing the clocks below + * requested clocks. + */ + CUPTI_CLOCKS_THROTTLE_REASON_SW_POWER_CAP = 0x00000004, + + /** + * Hardware slowdown to reduce the clock by a factor of two or more + * is engaged. This is an indicator of one of the following: 1) + * Temperature is too high, 2) External power brake assertion is + * being triggered (e.g. by the system power supply), 3) Change in + * power state. + */ + CUPTI_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN = 0x00000008, + + /** + * Some unspecified factor is reducing the clocks. + */ + CUPTI_CLOCKS_THROTTLE_REASON_UNKNOWN = 0x80000000, + + /** + * Throttle reason is not supported for this GPU. + */ + CUPTI_CLOCKS_THROTTLE_REASON_UNSUPPORTED = 0x40000000, + + /** + * No clock throttling. + */ + CUPTI_CLOCKS_THROTTLE_REASON_NONE = 0x00000000, + + CUPTI_CLOCKS_THROTTLE_REASON_FORCE_INT = 0x7fffffff +} CUpti_EnvironmentClocksThrottleReason; + +/** + * \brief Scope of the unified memory counter (deprecated in CUDA 7.0) + */ +typedef enum { + /** + * The unified memory counter scope is not known. + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_SCOPE_UNKNOWN = 0, + + /** + * Collect unified memory counter for single process on one device + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_SCOPE_PROCESS_SINGLE_DEVICE = 1, + + /** + * Collect unified memory counter for single process across all devices + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_SCOPE_PROCESS_ALL_DEVICES = 2, + + CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_SCOPE_COUNT, + + CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_SCOPE_FORCE_INT = 0x7fffffff +} CUpti_ActivityUnifiedMemoryCounterScope; + +/** + * \brief Kind of the Unified Memory counter + * + * Many activities are associated with Unified Memory mechanism; among them + * are tranfer from host to device, device to host, page fault at + * host side. + */ +typedef enum { + /** + * The unified memory counter kind is not known. + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_UNKNOWN = 0, + + /** + * Number of bytes transfered from host to device + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_HTOD = 1, + + /** + * Number of bytes transfered from device to host + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOH = 2, + + /** + * Number of CPU page faults, this is only supported on 64 bit + * Linux and Mac platforms + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_CPU_PAGE_FAULT_COUNT = 3, + + /** + * Number of GPU page faults, this is only supported on devices with + * compute capability 6.0 and higher and 64 bit Linux platforms + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_GPU_PAGE_FAULT = 4, + + /** + * Thrashing occurs when data is frequently accessed by + * multiple processors and has to be constantly migrated around + * to achieve data locality. In this case the overhead of migration + * may exceed the benefits of locality. + * This is only supported on 64 bit Linux platforms. + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_THRASHING = 5, + + /** + * Throttling is a prevention technique used by the driver to avoid + * further thrashing. Here, the driver doesn't service the fault for + * one of the contending processors for a specific period of time, + * so that the other processor can run at full-speed. + * This is only supported on 64 bit Linux platforms. + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_THROTTLING = 6, + + /** + * In case throttling does not help, the driver tries to pin the memory + * to a processor for a specific period of time. One of the contending + * processors will have slow access to the memory, while the other will + * have fast access. + * This is only supported on 64 bit Linux platforms. + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_REMOTE_MAP = 7, + + /** + * Number of bytes transferred from one device to another device. + * This is only supported on 64 bit Linux platforms. + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOD = 8, + + CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_COUNT, + + CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_FORCE_INT = 0x7fffffff +} CUpti_ActivityUnifiedMemoryCounterKind; + +/** + * \brief Memory access type for unified memory page faults + * + * This is valid for \ref CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_GPU_PAGE_FAULT + * and \ref CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_CPU_PAGE_FAULT_COUNT + */ +typedef enum { + /** + * The unified memory access type is not known + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_ACCESS_TYPE_UNKNOWN = 0, + + /** + * The page fault was triggered by read memory instruction + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_ACCESS_TYPE_READ = 1, + + /** + * The page fault was triggered by write memory instruction + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_ACCESS_TYPE_WRITE = 2, + + /** + * The page fault was triggered by atomic memory instruction + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_ACCESS_TYPE_ATOMIC = 3, + + /** + * The page fault was triggered by memory prefetch operation + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_ACCESS_TYPE_PREFETCH = 4 +} CUpti_ActivityUnifiedMemoryAccessType; + +/** + * \brief Migration cause of the Unified Memory counter + * + * This is valid for \ref CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_HTOD and + * \ref CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOH + */ +typedef enum { + /** + * The unified memory migration cause is not known + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_MIGRATION_CAUSE_UNKNOWN = 0, + + /** + * The unified memory migrated due to an explicit call from + * the user e.g. cudaMemPrefetchAsync + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_MIGRATION_CAUSE_USER = 1, + + /** + * The unified memory migrated to guarantee data coherence + * e.g. CPU/GPU faults on Pascal+ and kernel launch on pre-Pascal GPUs + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_MIGRATION_CAUSE_COHERENCE = 2, + + /** + * The unified memory was speculatively migrated by the UVM driver + * before being accessed by the destination processor to improve + * performance + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_MIGRATION_CAUSE_PREFETCH = 3, + + /** + * The unified memory migrated to the CPU because it was evicted to make + * room for another block of memory on the GPU + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_MIGRATION_CAUSE_EVICTION = 4, + + /** + * The unified memory migrated to another processor because of access counter + * notifications. Only frequently accessed pages are migrated between CPU and GPU, or + * between peer GPUs. + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_MIGRATION_CAUSE_ACCESS_COUNTERS = 5, +} CUpti_ActivityUnifiedMemoryMigrationCause; + +/** + * \brief Remote memory map cause of the Unified Memory counter + * + * This is valid for \ref CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_REMOTE_MAP + */ +typedef enum { + /** + * The cause of mapping to remote memory was unknown + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_REMOTE_MAP_CAUSE_UNKNOWN = 0, + + /** + * Mapping to remote memory was added to maintain data coherence. + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_REMOTE_MAP_CAUSE_COHERENCE = 1, + + /** + * Mapping to remote memory was added to prevent further thrashing + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_REMOTE_MAP_CAUSE_THRASHING = 2, + + /** + * Mapping to remote memory was added to enforce the hints + * specified by the programmer or by performance heuristics of the + * UVM driver + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_REMOTE_MAP_CAUSE_POLICY = 3, + + /** + * Mapping to remote memory was added because there is no more + * memory available on the processor and eviction was not + * possible + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_REMOTE_MAP_CAUSE_OUT_OF_MEMORY = 4, + + /** + * Mapping to remote memory was added after the memory was + * evicted to make room for another block of memory on the GPU + */ + CUPTI_ACTIVITY_UNIFIED_MEMORY_REMOTE_MAP_CAUSE_EVICTION = 5, +} CUpti_ActivityUnifiedMemoryRemoteMapCause; + +/** + * \brief SASS instruction classification. + * + * The sass instruction are broadly divided into different class. Each enum represents a classification. + */ +typedef enum { + /** + * The instruction class is not known. + */ + CUPTI_ACTIVITY_INSTRUCTION_CLASS_UNKNOWN = 0, + + /** + * Represents a 32 bit floating point operation. + */ + CUPTI_ACTIVITY_INSTRUCTION_CLASS_FP_32 = 1, + + /** + * Represents a 64 bit floating point operation. + */ + CUPTI_ACTIVITY_INSTRUCTION_CLASS_FP_64 = 2, + + /** + * Represents an integer operation. + */ + CUPTI_ACTIVITY_INSTRUCTION_CLASS_INTEGER = 3, + + /** + * Represents a bit conversion operation. + */ + CUPTI_ACTIVITY_INSTRUCTION_CLASS_BIT_CONVERSION = 4, + + /** + * Represents a control flow instruction. + */ + CUPTI_ACTIVITY_INSTRUCTION_CLASS_CONTROL_FLOW = 5, + + /** + * Represents a global load-store instruction. + */ + CUPTI_ACTIVITY_INSTRUCTION_CLASS_GLOBAL = 6, + + /** + * Represents a shared load-store instruction. + */ + CUPTI_ACTIVITY_INSTRUCTION_CLASS_SHARED = 7, + + /** + * Represents a local load-store instruction. + */ + CUPTI_ACTIVITY_INSTRUCTION_CLASS_LOCAL = 8, + + /** + * Represents a generic load-store instruction. + */ + CUPTI_ACTIVITY_INSTRUCTION_CLASS_GENERIC = 9, + + /** + * Represents a surface load-store instruction. + */ + CUPTI_ACTIVITY_INSTRUCTION_CLASS_SURFACE = 10, + + /** + * Represents a constant load instruction. + */ + CUPTI_ACTIVITY_INSTRUCTION_CLASS_CONSTANT = 11, + + /** + * Represents a texture load-store instruction. + */ + CUPTI_ACTIVITY_INSTRUCTION_CLASS_TEXTURE = 12, + + /** + * Represents a global atomic instruction. + */ + CUPTI_ACTIVITY_INSTRUCTION_CLASS_GLOBAL_ATOMIC = 13, + + /** + * Represents a shared atomic instruction. + */ + CUPTI_ACTIVITY_INSTRUCTION_CLASS_SHARED_ATOMIC = 14, + + /** + * Represents a surface atomic instruction. + */ + CUPTI_ACTIVITY_INSTRUCTION_CLASS_SURFACE_ATOMIC = 15, + + /** + * Represents a inter-thread communication instruction. + */ + CUPTI_ACTIVITY_INSTRUCTION_CLASS_INTER_THREAD_COMMUNICATION = 16, + + /** + * Represents a barrier instruction. + */ + CUPTI_ACTIVITY_INSTRUCTION_CLASS_BARRIER = 17, + + /** + * Represents some miscellaneous instructions which do not fit in the above classification. + */ + CUPTI_ACTIVITY_INSTRUCTION_CLASS_MISCELLANEOUS = 18, + + /** + * Represents a 16 bit floating point operation. + */ + CUPTI_ACTIVITY_INSTRUCTION_CLASS_FP_16 = 19, + + /** + * Represents uniform instruction. + */ + CUPTI_ACTIVITY_INSTRUCTION_CLASS_UNIFORM = 20, + + CUPTI_ACTIVITY_INSTRUCTION_CLASS_KIND_FORCE_INT = 0x7fffffff +} CUpti_ActivityInstructionClass; + +/** + * \brief Partitioned global caching option + */ +typedef enum { + /** + * Partitioned global cache config unknown. + */ + CUPTI_ACTIVITY_PARTITIONED_GLOBAL_CACHE_CONFIG_UNKNOWN = 0, + + /** + * Partitioned global cache not supported. + */ + CUPTI_ACTIVITY_PARTITIONED_GLOBAL_CACHE_CONFIG_NOT_SUPPORTED = 1, + + /** + * Partitioned global cache config off. + */ + CUPTI_ACTIVITY_PARTITIONED_GLOBAL_CACHE_CONFIG_OFF = 2, + + /** + * Partitioned global cache config on. + */ + CUPTI_ACTIVITY_PARTITIONED_GLOBAL_CACHE_CONFIG_ON = 3, + + CUPTI_ACTIVITY_PARTITIONED_GLOBAL_CACHE_CONFIG_FORCE_INT = 0x7fffffff +} CUpti_ActivityPartitionedGlobalCacheConfig; + +/** + * \brief Synchronization type. + * + * The types of synchronization to be used with CUpti_ActivitySynchronization. + */ + +typedef enum { + /** + * Unknown data. + */ + CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_UNKNOWN = 0, + + /** + * Event synchronize API. + */ + CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_EVENT_SYNCHRONIZE = 1, + + /** + * Stream wait event API. + */ + CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_STREAM_WAIT_EVENT = 2, + + /** + * Stream synchronize API. + */ + CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_STREAM_SYNCHRONIZE = 3, + + /** + * Context synchronize API. + */ + CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_CONTEXT_SYNCHRONIZE = 4, + + CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_FORCE_INT = 0x7fffffff +} CUpti_ActivitySynchronizationType; + +/** + * \brief stream type. + * + * The types of stream to be used with CUpti_ActivityStream. + */ + +typedef enum { + /** + * Unknown data. + */ + CUPTI_ACTIVITY_STREAM_CREATE_FLAG_UNKNOWN = 0, + + /** + * Default stream. + */ + CUPTI_ACTIVITY_STREAM_CREATE_FLAG_DEFAULT = 1, + + /** + * Non-blocking stream. + */ + CUPTI_ACTIVITY_STREAM_CREATE_FLAG_NON_BLOCKING = 2, + + /** + * Null stream. + */ + CUPTI_ACTIVITY_STREAM_CREATE_FLAG_NULL = 3, + + /** + * Stream create Mask + */ + CUPTI_ACTIVITY_STREAM_CREATE_MASK = 0xFFFF, + + CUPTI_ACTIVITY_STREAM_CREATE_FLAG_FORCE_INT = 0x7fffffff +} CUpti_ActivityStreamFlag; + +/** +* \brief Link flags. +* +* Describes link properties, to be used with CUpti_ActivityNvLink. +*/ + +typedef enum { + /** + * The flag is invalid. + */ + CUPTI_LINK_FLAG_INVALID = 0, + + /** + * Is peer to peer access supported by this link. + */ + CUPTI_LINK_FLAG_PEER_ACCESS = (1 << 1), + + /** + * Is system memory access supported by this link. + */ + CUPTI_LINK_FLAG_SYSMEM_ACCESS = (1 << 2), + + /** + * Is peer atomic access supported by this link. + */ + CUPTI_LINK_FLAG_PEER_ATOMICS = (1 << 3), + + /** + * Is system memory atomic access supported by this link. + */ + CUPTI_LINK_FLAG_SYSMEM_ATOMICS = (1 << 4), + + CUPTI_LINK_FLAG_FORCE_INT = 0x7fffffff +} CUpti_LinkFlag; + +/** +* \brief Memory operation types. +* +* Describes the type of memory operation, to be used with CUpti_ActivityMemory3. +*/ + +typedef enum { + /** + * The operation is invalid. + */ + CUPTI_ACTIVITY_MEMORY_OPERATION_TYPE_INVALID = 0, + + /** + * Memory is allocated. + */ + CUPTI_ACTIVITY_MEMORY_OPERATION_TYPE_ALLOCATION = 1, + + /** + * Memory is released. + */ + CUPTI_ACTIVITY_MEMORY_OPERATION_TYPE_RELEASE = 2, + + CUPTI_ACTIVITY_MEMORY_OPERATION_TYPE_FORCE_INT = 0x7fffffff +} CUpti_ActivityMemoryOperationType; + +/** +* \brief Memory pool types. +* +* Describes the type of memory pool, to be used with CUpti_ActivityMemory3. +*/ + +typedef enum { + /** + * The operation is invalid. + */ + CUPTI_ACTIVITY_MEMORY_POOL_TYPE_INVALID = 0, + + /** + * Memory pool is local to the process. + */ + CUPTI_ACTIVITY_MEMORY_POOL_TYPE_LOCAL = 1, + + /** + * Memory pool is imported by the process. + */ + CUPTI_ACTIVITY_MEMORY_POOL_TYPE_IMPORTED = 2, + + CUPTI_ACTIVITY_MEMORY_POOL_TYPE_FORCE_INT = 0x7fffffff +} CUpti_ActivityMemoryPoolType; + +/** +* \brief Memory pool operation types. +* +* Describes the type of memory pool operation, to be used with CUpti_ActivityMemoryPool2. +*/ + +typedef enum { + /** + * The operation is invalid. + */ + CUPTI_ACTIVITY_MEMORY_POOL_OPERATION_TYPE_INVALID = 0, + + /** + * Memory pool is created. + */ + CUPTI_ACTIVITY_MEMORY_POOL_OPERATION_TYPE_CREATED = 1, + + /** + * Memory pool is destroyed. + */ + CUPTI_ACTIVITY_MEMORY_POOL_OPERATION_TYPE_DESTROYED = 2, + + /** + * Memory pool is trimmed. + */ + CUPTI_ACTIVITY_MEMORY_POOL_OPERATION_TYPE_TRIMMED = 3, + + CUPTI_ACTIVITY_MEMORY_POOL_OPERATION_TYPE_FORCE_INT = 0x7fffffff +} CUpti_ActivityMemoryPoolOperationType; + +typedef enum { + CUPTI_CHANNEL_TYPE_INVALID = 0, + + CUPTI_CHANNEL_TYPE_COMPUTE = 1, + + CUPTI_CHANNEL_TYPE_ASYNC_MEMCPY = 2 +} CUpti_ChannelType; + +/** + * The source-locator ID that indicates an unknown source + * location. There is not an actual CUpti_ActivitySourceLocator object + * corresponding to this value. + */ +#define CUPTI_SOURCE_LOCATOR_ID_UNKNOWN 0 + +/** + * An invalid function index ID. + */ +#define CUPTI_FUNCTION_INDEX_ID_INVALID 0 + +/** + * An invalid/unknown correlation ID. A correlation ID of this value + * indicates that there is no correlation for the activity record. + */ +#define CUPTI_CORRELATION_ID_UNKNOWN 0 + +/** + * An invalid/unknown grid ID. + */ +#define CUPTI_GRID_ID_UNKNOWN 0LL + +/** + * An invalid/unknown timestamp for a start, end, queued, submitted, + * or completed time. + */ +#define CUPTI_TIMESTAMP_UNKNOWN 0LL + +/** + * An invalid/unknown value. + */ +#define CUPTI_SYNCHRONIZATION_INVALID_VALUE -1 + +/** + * An invalid/unknown process id. + */ +#define CUPTI_AUTO_BOOST_INVALID_CLIENT_PID 0 + +/** + * Invalid/unknown NVLink port number. +*/ +#define CUPTI_NVLINK_INVALID_PORT -1 + +/** + * Maximum NVLink port numbers. +*/ +#define CUPTI_MAX_NVLINK_PORTS 32 + +START_PACKED_ALIGNMENT +/** + * \brief Unified Memory counters configuration structure + * + * This structure controls the enable/disable of the various + * Unified Memory counters consisting of scope, kind and other parameters. + * See function \ref cuptiActivityConfigureUnifiedMemoryCounter + */ +typedef struct PACKED_ALIGNMENT { + /** + * Unified Memory counter Counter scope. (deprecated in CUDA 7.0) + */ + CUpti_ActivityUnifiedMemoryCounterScope scope; + + /** + * Unified Memory counter Counter kind + */ + CUpti_ActivityUnifiedMemoryCounterKind kind; + + /** + * Device id of the traget device. This is relevant only + * for single device scopes. (deprecated in CUDA 7.0) + */ + uint32_t deviceId; + + /** + * Control to enable/disable the counter. To enable the counter + * set it to non-zero value while disable is indicated by zero. + */ + uint32_t enable; +} CUpti_ActivityUnifiedMemoryCounterConfig; + +/** + * \brief Device auto boost state structure + * + * This structure defines auto boost state for a device. + * See function \ref cuptiGetAutoBoostState + */ +typedef struct PACKED_ALIGNMENT { + /** + * Returned auto boost state. 1 is returned in case auto boost is enabled, 0 + * otherwise + */ + uint32_t enabled; + + /** + * Id of process that has set the current boost state. The value will be + * CUPTI_AUTO_BOOST_INVALID_CLIENT_PID if the user does not have the + * permission to query process ids or there is an error in querying the + * process id. + */ + uint32_t pid; + +} CUpti_ActivityAutoBoostState; + +/** + * \brief PC sampling configuration structure + * + * This structure defines the pc sampling configuration. + * + * See function \ref cuptiActivityConfigurePCSampling + */ +typedef struct PACKED_ALIGNMENT { + /** + * Size of configuration structure. + * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are + * available in the structure. Used to preserve backward compatibility. + */ + uint32_t size; + + /** + * There are 5 level provided for sampling period. The level + * internally maps to a period in terms of cycles. Same level can + * map to different number of cycles on different gpus. No of + * cycles will be chosen to minimize information loss. The period + * chosen will be given by samplingPeriodInCycles in + * \ref CUpti_ActivityPCSamplingRecordInfo for each kernel instance. + */ + CUpti_ActivityPCSamplingPeriod samplingPeriod; + + /** + * This will override the period set by samplingPeriod. Value 0 in samplingPeriod2 will be + * considered as samplingPeriod2 should not be used and samplingPeriod should be used. + * Valid values for samplingPeriod2 are between 5 to 31 both inclusive. + * This will set the sampling period to (2^samplingPeriod2) cycles. + */ + uint32_t samplingPeriod2; +} CUpti_ActivityPCSamplingConfig; + +/** + * \brief The base activity record. + * + * The activity API uses a CUpti_Activity as a generic representation + * for any activity. The 'kind' field is used to determine the + * specific activity kind, and from that the CUpti_Activity object can + * be cast to the specific activity record type appropriate for that kind. + * + * Note that all activity record types are padded and aligned to + * ensure that each member of the record is naturally aligned. + * + * \see CUpti_ActivityKind + */ +typedef struct PACKED_ALIGNMENT { + /** + * The kind of this activity. + */ + CUpti_ActivityKind kind; +} CUpti_Activity; + +/** + * \brief The activity record for memory copies. (deprecated) + * + * This activity record represents a memory copy + * (CUPTI_ACTIVITY_KIND_MEMCPY). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_MEMCPY. + */ + CUpti_ActivityKind kind; + + /** + * The kind of the memory copy, stored as a byte to reduce record + * size. \see CUpti_ActivityMemcpyKind + */ + uint8_t copyKind; + + /** + * The source memory kind read by the memory copy, stored as a byte + * to reduce record size. \see CUpti_ActivityMemoryKind + */ + uint8_t srcKind; + + /** + * The destination memory kind read by the memory copy, stored as a + * byte to reduce record size. \see CUpti_ActivityMemoryKind + */ + uint8_t dstKind; + + /** + * The flags associated with the memory copy. \see CUpti_ActivityFlag + */ + uint8_t flags; + + /** + * The number of bytes transferred by the memory copy. + */ + uint64_t bytes; + + /** + * The start timestamp for the memory copy, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the memory copy. + */ + uint64_t start; + + /** + * The end timestamp for the memory copy, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the memory copy. + */ + uint64_t end; + + /** + * The ID of the device where the memory copy is occurring. + */ + uint32_t deviceId; + + /** + * The ID of the context where the memory copy is occurring. + */ + uint32_t contextId; + + /** + * The ID of the stream where the memory copy is occurring. + */ + uint32_t streamId; + + /** + * The correlation ID of the memory copy. Each memory copy is + * assigned a unique correlation ID that is identical to the + * correlation ID in the driver API activity record that launched + * the memory copy. + */ + uint32_t correlationId; + + /** + * The runtime correlation ID of the memory copy. Each memory copy + * is assigned a unique runtime correlation ID that is identical to + * the correlation ID in the runtime API activity record that + * launched the memory copy. + */ + uint32_t runtimeCorrelationId; + +#ifdef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +#endif + + /** + * Undefined. Reserved for internal use. + */ + void *reserved0; +} CUpti_ActivityMemcpy; + +/** + * \brief The activity record for memory copies. (deprecated in CUDA 11.1) + * + * This activity record represents a memory copy + * (CUPTI_ACTIVITY_KIND_MEMCPY). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_MEMCPY. + */ + CUpti_ActivityKind kind; + + /** + * The kind of the memory copy, stored as a byte to reduce record + * size. \see CUpti_ActivityMemcpyKind + */ + uint8_t copyKind; + + /** + * The source memory kind read by the memory copy, stored as a byte + * to reduce record size. \see CUpti_ActivityMemoryKind + */ + uint8_t srcKind; + + /** + * The destination memory kind read by the memory copy, stored as a + * byte to reduce record size. \see CUpti_ActivityMemoryKind + */ + uint8_t dstKind; + + /** + * The flags associated with the memory copy. \see CUpti_ActivityFlag + */ + uint8_t flags; + + /** + * The number of bytes transferred by the memory copy. + */ + uint64_t bytes; + + /** + * The start timestamp for the memory copy, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the memory copy. + */ + uint64_t start; + + /** + * The end timestamp for the memory copy, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the memory copy. + */ + uint64_t end; + + /** + * The ID of the device where the memory copy is occurring. + */ + uint32_t deviceId; + + /** + * The ID of the context where the memory copy is occurring. + */ + uint32_t contextId; + + /** + * The ID of the stream where the memory copy is occurring. + */ + uint32_t streamId; + + /** + * The correlation ID of the memory copy. Each memory copy is + * assigned a unique correlation ID that is identical to the + * correlation ID in the driver API activity record that launched + * the memory copy. + */ + uint32_t correlationId; + + /** + * The runtime correlation ID of the memory copy. Each memory copy + * is assigned a unique runtime correlation ID that is identical to + * the correlation ID in the runtime API activity record that + * launched the memory copy. + */ + uint32_t runtimeCorrelationId; + +#ifdef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +#endif + + /** + * Undefined. Reserved for internal use. + */ + void *reserved0; + + /** + * The unique ID of the graph node that executed this memcpy through graph launch. + * This field will be 0 if the memcpy is not done through graph launch. + */ + uint64_t graphNodeId; +} CUpti_ActivityMemcpy3; + +/** + * \brief The activity record for memory copies. (deprecated in CUDA 11.6) + * + * This activity record represents a memory copy + * (CUPTI_ACTIVITY_KIND_MEMCPY). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_MEMCPY. + */ + CUpti_ActivityKind kind; + + /** + * The kind of the memory copy, stored as a byte to reduce record + * size. \see CUpti_ActivityMemcpyKind + */ + uint8_t copyKind; + + /** + * The source memory kind read by the memory copy, stored as a byte + * to reduce record size. \see CUpti_ActivityMemoryKind + */ + uint8_t srcKind; + + /** + * The destination memory kind read by the memory copy, stored as a + * byte to reduce record size. \see CUpti_ActivityMemoryKind + */ + uint8_t dstKind; + + /** + * The flags associated with the memory copy. \see CUpti_ActivityFlag + */ + uint8_t flags; + + /** + * The number of bytes transferred by the memory copy. + */ + uint64_t bytes; + + /** + * The start timestamp for the memory copy, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the memory copy. + */ + uint64_t start; + + /** + * The end timestamp for the memory copy, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the memory copy. + */ + uint64_t end; + + /** + * The ID of the device where the memory copy is occurring. + */ + uint32_t deviceId; + + /** + * The ID of the context where the memory copy is occurring. + */ + uint32_t contextId; + + /** + * The ID of the stream where the memory copy is occurring. + */ + uint32_t streamId; + + /** + * The correlation ID of the memory copy. Each memory copy is + * assigned a unique correlation ID that is identical to the + * correlation ID in the driver API activity record that launched + * the memory copy. + */ + uint32_t correlationId; + + /** + * The runtime correlation ID of the memory copy. Each memory copy + * is assigned a unique runtime correlation ID that is identical to + * the correlation ID in the runtime API activity record that + * launched the memory copy. + */ + uint32_t runtimeCorrelationId; + +#ifdef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +#endif + + /** + * Undefined. Reserved for internal use. + */ + void *reserved0; + + /** + * The unique ID of the graph node that executed this memcpy through graph launch. + * This field will be 0 if the memcpy is not done through graph launch. + */ + uint64_t graphNodeId; + + /** + * The unique ID of the graph that executed this memcpy through graph launch. + * This field will be 0 if the memcpy is not done through graph launch. + */ + uint32_t graphId; + + /** + * Undefined. Reserved for internal use. + */ + uint32_t padding; +} CUpti_ActivityMemcpy4; + +/** + * \brief The activity record for memory copies. + * + * This activity record represents a memory copy + * (CUPTI_ACTIVITY_KIND_MEMCPY). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_MEMCPY. + */ + CUpti_ActivityKind kind; + + /** + * The kind of the memory copy, stored as a byte to reduce record + * size. \see CUpti_ActivityMemcpyKind + */ + uint8_t copyKind; + + /** + * The source memory kind read by the memory copy, stored as a byte + * to reduce record size. \see CUpti_ActivityMemoryKind + */ + uint8_t srcKind; + + /** + * The destination memory kind read by the memory copy, stored as a + * byte to reduce record size. \see CUpti_ActivityMemoryKind + */ + uint8_t dstKind; + + /** + * The flags associated with the memory copy. \see CUpti_ActivityFlag + */ + uint8_t flags; + + /** + * The number of bytes transferred by the memory copy. + */ + uint64_t bytes; + + /** + * The start timestamp for the memory copy, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the memory copy. + */ + uint64_t start; + + /** + * The end timestamp for the memory copy, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the memory copy. + */ + uint64_t end; + + /** + * The ID of the device where the memory copy is occurring. + */ + uint32_t deviceId; + + /** + * The ID of the context where the memory copy is occurring. + */ + uint32_t contextId; + + /** + * The ID of the stream where the memory copy is occurring. + */ + uint32_t streamId; + + /** + * The correlation ID of the memory copy. Each memory copy is + * assigned a unique correlation ID that is identical to the + * correlation ID in the driver API activity record that launched + * the memory copy. + */ + uint32_t correlationId; + + /** + * The runtime correlation ID of the memory copy. Each memory copy + * is assigned a unique runtime correlation ID that is identical to + * the correlation ID in the runtime API activity record that + * launched the memory copy. + */ + uint32_t runtimeCorrelationId; + +#ifdef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +#endif + + /** + * Undefined. Reserved for internal use. + */ + void *reserved0; + + /** + * The unique ID of the graph node that executed this memcpy through graph launch. + * This field will be 0 if the memcpy is not done through graph launch. + */ + uint64_t graphNodeId; + + /** + * The unique ID of the graph that executed this memcpy through graph launch. + * This field will be 0 if the memcpy is not done through graph launch. + */ + uint32_t graphId; + + /** + * The ID of the HW channel on which the memory copy is occuring. + */ + uint32_t channelID; + + /** + * The type of the channel + */ + CUpti_ChannelType channelType; + + /** + * Reserved for internal use. + */ + uint32_t pad2; +} CUpti_ActivityMemcpy5; + +/** + * \brief The activity record for peer-to-peer memory copies. + * + * This activity record represents a peer-to-peer memory copy + * (CUPTI_ACTIVITY_KIND_MEMCPY2) but is no longer generated + * by CUPTI. Peer-to-peer memory copy activities are now reported using the + * CUpti_ActivityMemcpyPtoP2 activity record.. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_MEMCPY2. + */ + CUpti_ActivityKind kind; + + /** + * The kind of the memory copy, stored as a byte to reduce record + * size. \see CUpti_ActivityMemcpyKind + */ + uint8_t copyKind; + + /** + * The source memory kind read by the memory copy, stored as a byte + * to reduce record size. \see CUpti_ActivityMemoryKind + */ + uint8_t srcKind; + + /** + * The destination memory kind read by the memory copy, stored as a + * byte to reduce record size. \see CUpti_ActivityMemoryKind + */ + uint8_t dstKind; + + /** + * The flags associated with the memory copy. \see + * CUpti_ActivityFlag + */ + uint8_t flags; + + /** + * The number of bytes transferred by the memory copy. + */ + uint64_t bytes; + + /** + * The start timestamp for the memory copy, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the memory copy. + */ + uint64_t start; + + /** + * The end timestamp for the memory copy, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the memory copy. + */ + uint64_t end; + + /** + * The ID of the device where the memory copy is occurring. + */ + uint32_t deviceId; + + /** + * The ID of the context where the memory copy is occurring. + */ + uint32_t contextId; + + /** + * The ID of the stream where the memory copy is occurring. + */ + uint32_t streamId; + + /** + * The ID of the device where memory is being copied from. + */ + uint32_t srcDeviceId; + + /** + * The ID of the context owning the memory being copied from. + */ + uint32_t srcContextId; + + /** + * The ID of the device where memory is being copied to. + */ + uint32_t dstDeviceId; + + /** + * The ID of the context owning the memory being copied to. + */ + uint32_t dstContextId; + + /** + * The correlation ID of the memory copy. Each memory copy is + * assigned a unique correlation ID that is identical to the + * correlation ID in the driver and runtime API activity record that + * launched the memory copy. + */ + uint32_t correlationId; + +#ifndef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +#endif + + /** + * Undefined. Reserved for internal use. + */ + void *reserved0; +} CUpti_ActivityMemcpyPtoP; + +typedef CUpti_ActivityMemcpyPtoP CUpti_ActivityMemcpy2; + +/** + * \brief The activity record for peer-to-peer memory copies. + * (deprecated in CUDA 11.1) + * + * This activity record represents a peer-to-peer memory copy + * (CUPTI_ACTIVITY_KIND_MEMCPY2). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_MEMCPY2. + */ + CUpti_ActivityKind kind; + + /** + * The kind of the memory copy, stored as a byte to reduce record + * size. \see CUpti_ActivityMemcpyKind + */ + uint8_t copyKind; + + /** + * The source memory kind read by the memory copy, stored as a byte + * to reduce record size. \see CUpti_ActivityMemoryKind + */ + uint8_t srcKind; + + /** + * The destination memory kind read by the memory copy, stored as a + * byte to reduce record size. \see CUpti_ActivityMemoryKind + */ + uint8_t dstKind; + + /** + * The flags associated with the memory copy. \see + * CUpti_ActivityFlag + */ + uint8_t flags; + + /** + * The number of bytes transferred by the memory copy. + */ + uint64_t bytes; + + /** + * The start timestamp for the memory copy, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the memory copy. + */ + uint64_t start; + + /** + * The end timestamp for the memory copy, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the memory copy. + */ + uint64_t end; + + /** + * The ID of the device where the memory copy is occurring. + */ + uint32_t deviceId; + + /** + * The ID of the context where the memory copy is occurring. + */ + uint32_t contextId; + + /** + * The ID of the stream where the memory copy is occurring. + */ + uint32_t streamId; + + /** + * The ID of the device where memory is being copied from. + */ + uint32_t srcDeviceId; + + /** + * The ID of the context owning the memory being copied from. + */ + uint32_t srcContextId; + + /** + * The ID of the device where memory is being copied to. + */ + uint32_t dstDeviceId; + + /** + * The ID of the context owning the memory being copied to. + */ + uint32_t dstContextId; + + /** + * The correlation ID of the memory copy. Each memory copy is + * assigned a unique correlation ID that is identical to the + * correlation ID in the driver and runtime API activity record that + * launched the memory copy. + */ + uint32_t correlationId; + +#ifndef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +#endif + + /** + * Undefined. Reserved for internal use. + */ + void *reserved0; + + /** + * The unique ID of the graph node that executed the memcpy through graph launch. + * This field will be 0 if memcpy is not done using graph launch. + */ + uint64_t graphNodeId; +} CUpti_ActivityMemcpyPtoP2; + +/** + * \brief The activity record for peer-to-peer memory copies. + * (deprecated in CUDA 11.6) + * + * This activity record represents a peer-to-peer memory copy + * (CUPTI_ACTIVITY_KIND_MEMCPY2). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_MEMCPY2. + */ + CUpti_ActivityKind kind; + + /** + * The kind of the memory copy, stored as a byte to reduce record + * size. \see CUpti_ActivityMemcpyKind + */ + uint8_t copyKind; + + /** + * The source memory kind read by the memory copy, stored as a byte + * to reduce record size. \see CUpti_ActivityMemoryKind + */ + uint8_t srcKind; + + /** + * The destination memory kind read by the memory copy, stored as a + * byte to reduce record size. \see CUpti_ActivityMemoryKind + */ + uint8_t dstKind; + + /** + * The flags associated with the memory copy. \see + * CUpti_ActivityFlag + */ + uint8_t flags; + + /** + * The number of bytes transferred by the memory copy. + */ + uint64_t bytes; + + /** + * The start timestamp for the memory copy, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the memory copy. + */ + uint64_t start; + + /** + * The end timestamp for the memory copy, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the memory copy. + */ + uint64_t end; + + /** + * The ID of the device where the memory copy is occurring. + */ + uint32_t deviceId; + + /** + * The ID of the context where the memory copy is occurring. + */ + uint32_t contextId; + + /** + * The ID of the stream where the memory copy is occurring. + */ + uint32_t streamId; + + /** + * The ID of the device where memory is being copied from. + */ + uint32_t srcDeviceId; + + /** + * The ID of the context owning the memory being copied from. + */ + uint32_t srcContextId; + + /** + * The ID of the device where memory is being copied to. + */ + uint32_t dstDeviceId; + + /** + * The ID of the context owning the memory being copied to. + */ + uint32_t dstContextId; + + /** + * The correlation ID of the memory copy. Each memory copy is + * assigned a unique correlation ID that is identical to the + * correlation ID in the driver and runtime API activity record that + * launched the memory copy. + */ + uint32_t correlationId; + +#ifndef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +#endif + + /** + * Undefined. Reserved for internal use. + */ + void *reserved0; + + /** + * The unique ID of the graph node that executed the memcpy through graph launch. + * This field will be 0 if memcpy is not done using graph launch. + */ + uint64_t graphNodeId; + + /** + * The unique ID of the graph that executed this memcpy through graph launch. + * This field will be 0 if the memcpy is not done through graph launch. + */ + uint32_t graphId; + + /** + * Undefined. Reserved for internal use. + */ + uint32_t padding; +} CUpti_ActivityMemcpyPtoP3; + +/** + * \brief The activity record for peer-to-peer memory copies. + * + * This activity record represents a peer-to-peer memory copy + * (CUPTI_ACTIVITY_KIND_MEMCPY2). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_MEMCPY2. + */ + CUpti_ActivityKind kind; + + /** + * The kind of the memory copy, stored as a byte to reduce record + * size. \see CUpti_ActivityMemcpyKind + */ + uint8_t copyKind; + + /** + * The source memory kind read by the memory copy, stored as a byte + * to reduce record size. \see CUpti_ActivityMemoryKind + */ + uint8_t srcKind; + + /** + * The destination memory kind read by the memory copy, stored as a + * byte to reduce record size. \see CUpti_ActivityMemoryKind + */ + uint8_t dstKind; + + /** + * The flags associated with the memory copy. \see + * CUpti_ActivityFlag + */ + uint8_t flags; + + /** + * The number of bytes transferred by the memory copy. + */ + uint64_t bytes; + + /** + * The start timestamp for the memory copy, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the memory copy. + */ + uint64_t start; + + /** + * The end timestamp for the memory copy, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the memory copy. + */ + uint64_t end; + + /** + * The ID of the device where the memory copy is occurring. + */ + uint32_t deviceId; + + /** + * The ID of the context where the memory copy is occurring. + */ + uint32_t contextId; + + /** + * The ID of the stream where the memory copy is occurring. + */ + uint32_t streamId; + + /** + * The ID of the device where memory is being copied from. + */ + uint32_t srcDeviceId; + + /** + * The ID of the context owning the memory being copied from. + */ + uint32_t srcContextId; + + /** + * The ID of the device where memory is being copied to. + */ + uint32_t dstDeviceId; + + /** + * The ID of the context owning the memory being copied to. + */ + uint32_t dstContextId; + + /** + * The correlation ID of the memory copy. Each memory copy is + * assigned a unique correlation ID that is identical to the + * correlation ID in the driver and runtime API activity record that + * launched the memory copy. + */ + uint32_t correlationId; + +#ifndef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +#endif + + /** + * Undefined. Reserved for internal use. + */ + void *reserved0; + + /** + * The unique ID of the graph node that executed the memcpy through graph launch. + * This field will be 0 if memcpy is not done using graph launch. + */ + uint64_t graphNodeId; + + /** + * The unique ID of the graph that executed this memcpy through graph launch. + * This field will be 0 if the memcpy is not done through graph launch. + */ + uint32_t graphId; + + /** + * The ID of the HW channel on which the memory copy is occuring. + */ + uint32_t channelID; + + /** + * The type of the channel + */ + CUpti_ChannelType channelType; +} CUpti_ActivityMemcpyPtoP4; + +/** + * \brief The activity record for memset. (deprecated) + * + * This activity record represents a memory set operation + * (CUPTI_ACTIVITY_KIND_MEMSET). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_MEMSET. + */ + CUpti_ActivityKind kind; + + /** + * The value being assigned to memory by the memory set. + */ + uint32_t value; + + /** + * The number of bytes being set by the memory set. + */ + uint64_t bytes; + + /** + * The start timestamp for the memory set, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the memory set. + */ + uint64_t start; + + /** + * The end timestamp for the memory set, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the memory set. + */ + uint64_t end; + + /** + * The ID of the device where the memory set is occurring. + */ + uint32_t deviceId; + + /** + * The ID of the context where the memory set is occurring. + */ + uint32_t contextId; + + /** + * The ID of the stream where the memory set is occurring. + */ + uint32_t streamId; + + /** + * The correlation ID of the memory set. Each memory set is assigned + * a unique correlation ID that is identical to the correlation ID + * in the driver API activity record that launched the memory set. + */ + uint32_t correlationId; + + /** + * The flags associated with the memset. \see CUpti_ActivityFlag + */ + uint16_t flags; + + /** + * The memory kind of the memory set \see CUpti_ActivityMemoryKind + */ + uint16_t memoryKind; + +#ifdef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +#endif + + /** + * Undefined. Reserved for internal use. + */ + void *reserved0; +} CUpti_ActivityMemset; + +/** + * \brief The activity record for memset. (deprecated in CUDA 11.1) + * + * This activity record represents a memory set operation + * (CUPTI_ACTIVITY_KIND_MEMSET). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_MEMSET. + */ + CUpti_ActivityKind kind; + + /** + * The value being assigned to memory by the memory set. + */ + uint32_t value; + + /** + * The number of bytes being set by the memory set. + */ + uint64_t bytes; + + /** + * The start timestamp for the memory set, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the memory set. + */ + uint64_t start; + + /** + * The end timestamp for the memory set, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the memory set. + */ + uint64_t end; + + /** + * The ID of the device where the memory set is occurring. + */ + uint32_t deviceId; + + /** + * The ID of the context where the memory set is occurring. + */ + uint32_t contextId; + + /** + * The ID of the stream where the memory set is occurring. + */ + uint32_t streamId; + + /** + * The correlation ID of the memory set. Each memory set is assigned + * a unique correlation ID that is identical to the correlation ID + * in the driver API activity record that launched the memory set. + */ + uint32_t correlationId; + + /** + * The flags associated with the memset. \see CUpti_ActivityFlag + */ + uint16_t flags; + + /** + * The memory kind of the memory set \see CUpti_ActivityMemoryKind + */ + uint16_t memoryKind; + +#ifdef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +#endif + + /** + * Undefined. Reserved for internal use. + */ + void *reserved0; + + /** + * The unique ID of the graph node that executed this memset through graph launch. + * This field will be 0 if the memset is not executed through graph launch. + */ + uint64_t graphNodeId; +} CUpti_ActivityMemset2; + +/** + * \brief The activity record for memset. (deprecated in CUDA 11.6) + * + * This activity record represents a memory set operation + * (CUPTI_ACTIVITY_KIND_MEMSET). + */ + +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_MEMSET. + */ + CUpti_ActivityKind kind; + + /** + * The value being assigned to memory by the memory set. + */ + uint32_t value; + + /** + * The number of bytes being set by the memory set. + */ + uint64_t bytes; + + /** + * The start timestamp for the memory set, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the memory set. + */ + uint64_t start; + + /** + * The end timestamp for the memory set, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the memory set. + */ + uint64_t end; + + /** + * The ID of the device where the memory set is occurring. + */ + uint32_t deviceId; + + /** + * The ID of the context where the memory set is occurring. + */ + uint32_t contextId; + + /** + * The ID of the stream where the memory set is occurring. + */ + uint32_t streamId; + + /** + * The correlation ID of the memory set. Each memory set is assigned + * a unique correlation ID that is identical to the correlation ID + * in the driver API activity record that launched the memory set. + */ + uint32_t correlationId; + + /** + * The flags associated with the memset. \see CUpti_ActivityFlag + */ + uint16_t flags; + + /** + * The memory kind of the memory set \see CUpti_ActivityMemoryKind + */ + uint16_t memoryKind; + +#ifdef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +#endif + + /** + * Undefined. Reserved for internal use. + */ + void *reserved0; + + /** + * The unique ID of the graph node that executed this memset through graph launch. + * This field will be 0 if the memset is not executed through graph launch. + */ + uint64_t graphNodeId; + + /** + * The unique ID of the graph that executed this memset through graph launch. + * This field will be 0 if the memset is not executed through graph launch. + */ + uint32_t graphId; + + /** + * Undefined. Reserved for internal use. + */ + uint32_t padding; +} CUpti_ActivityMemset3; + +/** + * \brief The activity record for memset. + * + * This activity record represents a memory set operation + * (CUPTI_ACTIVITY_KIND_MEMSET). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_MEMSET. + */ + CUpti_ActivityKind kind; + + /** + * The value being assigned to memory by the memory set. + */ + uint32_t value; + + /** + * The number of bytes being set by the memory set. + */ + uint64_t bytes; + + /** + * The start timestamp for the memory set, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the memory set. + */ + uint64_t start; + + /** + * The end timestamp for the memory set, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the memory set. + */ + uint64_t end; + + /** + * The ID of the device where the memory set is occurring. + */ + uint32_t deviceId; + + /** + * The ID of the context where the memory set is occurring. + */ + uint32_t contextId; + + /** + * The ID of the stream where the memory set is occurring. + */ + uint32_t streamId; + + /** + * The correlation ID of the memory set. Each memory set is assigned + * a unique correlation ID that is identical to the correlation ID + * in the driver API activity record that launched the memory set. + */ + uint32_t correlationId; + + /** + * The flags associated with the memset. \see CUpti_ActivityFlag + */ + uint16_t flags; + + /** + * The memory kind of the memory set \see CUpti_ActivityMemoryKind + */ + uint16_t memoryKind; + +#ifdef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +#endif + + /** + * Undefined. Reserved for internal use. + */ + void *reserved0; + + /** + * The unique ID of the graph node that executed this memset through graph launch. + * This field will be 0 if the memset is not executed through graph launch. + */ + uint64_t graphNodeId; + + /** + * The unique ID of the graph that executed this memset through graph launch. + * This field will be 0 if the memset is not executed through graph launch. + */ + uint32_t graphId; + + /** + * The ID of the HW channel on which the memory set is occuring. + */ + uint32_t channelID; + + /** + * The type of the channel + */ + CUpti_ChannelType channelType; + + /** + * Undefined. Reserved for internal use + */ + uint32_t pad2; +} CUpti_ActivityMemset4; + +/** + * \brief The activity record for memory. + * + * This activity record represents a memory allocation and free operation + * (CUPTI_ACTIVITY_KIND_MEMORY). + * This activity record provides a single record for the memory + * allocation and memory release operations. + * + * Note: It is recommended to move to the new activity record \ref CUpti_ActivityMemory3 + * enabled using the kind \ref CUPTI_ACTIVITY_KIND_MEMORY2. + * \ref CUpti_ActivityMemory3 provides separate records for memory + * allocation and memory release operations. This allows to correlate the + * corresponding driver and runtime API activity record with the memory operation. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_MEMORY + */ + CUpti_ActivityKind kind; + + /** + * The memory kind requested by the user + */ + CUpti_ActivityMemoryKind memoryKind; + + /** + * The virtual address of the allocation + */ + uint64_t address; + + /** + * The number of bytes of memory allocated. + */ + uint64_t bytes; + + /** + * The start timestamp for the memory operation, i.e. + * the time when memory was allocated, in ns. + */ + uint64_t start; + + /** + * The end timestamp for the memory operation, i.e. + * the time when memory was freed, in ns. + * This will be 0 if memory is not freed in the application + */ + uint64_t end; + + /** + * The program counter of the allocation of memory + */ + uint64_t allocPC; + + /** + * The program counter of the freeing of memory. This will + * be 0 if memory is not freed in the application + */ + uint64_t freePC; + + /** + * The ID of the process to which this record belongs to. + */ + uint32_t processId; + + /** + * The ID of the device where the memory allocation is taking place. + */ + uint32_t deviceId; + + /** + * The ID of the context. If context is NULL, \p contextId is set to CUPTI_INVALID_CONTEXT_ID. + */ + uint32_t contextId; + +#ifdef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +#endif + + /** + * Variable name. This name is shared across all activity + * records representing the same symbol, and so should not be + * modified. + */ + const char* name; +} CUpti_ActivityMemory; + +/** + * \brief The activity record for memory. + * + * This activity record represents a memory allocation and free operation + * (CUPTI_ACTIVITY_KIND_MEMORY2). + * This activity record provides separate records for memory allocation and + * memory release operations. + * This allows to correlate the corresponding driver and runtime API + * activity record with the memory operation. + * + * Note: This activity record is an upgrade over \ref CUpti_ActivityMemory + * enabled using the kind \ref CUPTI_ACTIVITY_KIND_MEMORY. + * \ref CUpti_ActivityMemory provides a single record for the memory + * allocation and memory release operations. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_MEMORY2 + */ + CUpti_ActivityKind kind; + + /** + * The memory operation requested by the user, \ref CUpti_ActivityMemoryOperationType. + */ + CUpti_ActivityMemoryOperationType memoryOperationType; + + /** + * The memory kind requested by the user, \ref CUpti_ActivityMemoryKind. + */ + CUpti_ActivityMemoryKind memoryKind; + + /** + * The correlation ID of the memory operation. Each memory operation is + * assigned a unique correlation ID that is identical to the + * correlation ID in the driver and runtime API activity record that + * launched the memory operation. + */ + uint32_t correlationId; + + /** + * The virtual address of the allocation. + */ + uint64_t address; + + /** + * The number of bytes of memory allocated. + */ + uint64_t bytes; + + /** + * The start timestamp for the memory operation, in ns. + */ + uint64_t timestamp; + + /** + * The program counter of the memory operation. + */ + uint64_t PC; + + /** + * The ID of the process to which this record belongs to. + */ + uint32_t processId; + + /** + * The ID of the device where the memory operation is taking place. + */ + uint32_t deviceId; + + /** + * The ID of the context. If context is NULL, \p contextId is set to CUPTI_INVALID_CONTEXT_ID. + */ + uint32_t contextId; + + /** + * The ID of the stream. If memory operation is not async, \p streamId is set to CUPTI_INVALID_STREAM_ID. + */ + uint32_t streamId; + + /** + * Variable name. This name is shared across all activity + * records representing the same symbol, and so should not be + * modified. + */ + const char* name; + + /** + * \p isAsync is set if memory operation happens through async memory APIs. + */ + uint32_t isAsync; + +#ifdef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad1; +#endif + + /** + * The memory pool configuration used for the memory operations. + */ + struct { + /** + * The type of the memory pool, \ref CUpti_ActivityMemoryPoolType + */ + CUpti_ActivityMemoryPoolType memoryPoolType; + +#ifdef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad2; +#endif + + /** + * The base address of the memory pool. + */ + uint64_t address; + + /** + * The release threshold of the memory pool in bytes. \p releaseThreshold is + * valid for CUPTI_ACTIVITY_MEMORY_POOL_TYPE_LOCAL, \ref CUpti_ActivityMemoryPoolType. + */ + uint64_t releaseThreshold; + + union { + /** + * The size of the memory pool in bytes. + * \p size is valid if \p memoryPoolType is + * CUPTI_ACTIVITY_MEMORY_POOL_TYPE_LOCAL, \ref CUpti_ActivityMemoryPoolType. + */ + uint64_t size; + + /** + * The processId of the memory pool. + * \p processId is valid if \p memoryPoolType is + * CUPTI_ACTIVITY_MEMORY_POOL_TYPE_IMPORTED, \ref CUpti_ActivityMemoryPoolType. + */ + uint64_t processId; + } pool; + } memoryPoolConfig; + +} CUpti_ActivityMemory2; + +/** + * \brief The activity record for memory. + * + * This activity record represents a memory allocation and free operation + * (CUPTI_ACTIVITY_KIND_MEMORY2). + * This activity record provides separate records for memory allocation and + * memory release operations. + * This allows to correlate the corresponding driver and runtime API + * activity record with the memory operation. + * + * Note: This activity record is an upgrade over \ref CUpti_ActivityMemory + * enabled using the kind \ref CUPTI_ACTIVITY_KIND_MEMORY. + * \ref CUpti_ActivityMemory provides a single record for the memory + * allocation and memory release operations. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_MEMORY2 + */ + CUpti_ActivityKind kind; + + /** + * The memory operation requested by the user, \ref CUpti_ActivityMemoryOperationType. + */ + CUpti_ActivityMemoryOperationType memoryOperationType; + + /** + * The memory kind requested by the user, \ref CUpti_ActivityMemoryKind. + */ + CUpti_ActivityMemoryKind memoryKind; + + /** + * The correlation ID of the memory operation. Each memory operation is + * assigned a unique correlation ID that is identical to the + * correlation ID in the driver and runtime API activity record that + * launched the memory operation. + */ + uint32_t correlationId; + + /** + * The virtual address of the allocation. + */ + uint64_t address; + + /** + * The number of bytes of memory allocated. + */ + uint64_t bytes; + + /** + * The start timestamp for the memory operation, in ns. + */ + uint64_t timestamp; + + /** + * The program counter of the memory operation. + */ + uint64_t PC; + + /** + * The ID of the process to which this record belongs to. + */ + uint32_t processId; + + /** + * The ID of the device where the memory operation is taking place. + */ + uint32_t deviceId; + + /** + * The ID of the context. If context is NULL, \p contextId is set to CUPTI_INVALID_CONTEXT_ID. + */ + uint32_t contextId; + + /** + * The ID of the stream. If memory operation is not async, \p streamId is set to CUPTI_INVALID_STREAM_ID. + */ + uint32_t streamId; + + /** + * Variable name. This name is shared across all activity + * records representing the same symbol, and so should not be + * modified. + */ + const char* name; + + /** + * \p isAsync is set if memory operation happens through async memory APIs. + */ + uint32_t isAsync; + +#ifdef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad1; +#endif + + /** + * The memory pool configuration used for the memory operations. + */ + struct PACKED_ALIGNMENT { + /** + * The type of the memory pool, \ref CUpti_ActivityMemoryPoolType + */ + CUpti_ActivityMemoryPoolType memoryPoolType; + +#ifdef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad2; +#endif + + /** + * The base address of the memory pool. + */ + uint64_t address; + + /** + * The release threshold of the memory pool in bytes. \p releaseThreshold is + * valid for CUPTI_ACTIVITY_MEMORY_POOL_TYPE_LOCAL, \ref CUpti_ActivityMemoryPoolType. + */ + uint64_t releaseThreshold; + + union { + /** + * The size of the memory pool in bytes. + * \p size is valid if \p memoryPoolType is + * CUPTI_ACTIVITY_MEMORY_POOL_TYPE_LOCAL, \ref CUpti_ActivityMemoryPoolType. + */ + uint64_t size; + + /** + * The processId of the memory pool. + * \p processId is valid if \p memoryPoolType is + * CUPTI_ACTIVITY_MEMORY_POOL_TYPE_IMPORTED, \ref CUpti_ActivityMemoryPoolType. + */ + uint64_t processId; + } pool; + + /** + * The utilized size of the memory pool. \p utilizedSize is + * valid for CUPTI_ACTIVITY_MEMORY_POOL_TYPE_LOCAL, \ref CUpti_ActivityMemoryPoolType. + */ + uint64_t utilizedSize; + } memoryPoolConfig; + +} CUpti_ActivityMemory3; + +/** + * \brief The activity record for memory pool. + * + * This activity record represents a memory pool creation, destruction and + * trimming (CUPTI_ACTIVITY_KIND_MEMORY_POOL). + * This activity record provides separate records for memory pool creation, + * destruction and triming operations. + * This allows to correlate the corresponding driver and runtime API + * activity record with the memory pool operation. + * + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_MEMORY_POOL + */ + CUpti_ActivityKind kind; + + /** + * The memory operation requested by the user, \ref CUpti_ActivityMemoryPoolOperationType. + */ + CUpti_ActivityMemoryPoolOperationType memoryPoolOperationType; + + /** + * The type of the memory pool, \ref CUpti_ActivityMemoryPoolType + */ + CUpti_ActivityMemoryPoolType memoryPoolType; + + /** + * The correlation ID of the memory pool operation. Each memory pool + * operation is assigned a unique correlation ID that is identical to the + * correlation ID in the driver and runtime API activity record that + * launched the memory operation. + */ + uint32_t correlationId; + + /** + * The ID of the process to which this record belongs to. + */ + uint32_t processId; + + /** + * The ID of the device where the memory pool is created. + */ + uint32_t deviceId; + + /** + * The minimum bytes to keep of the memory pool. \p minBytesToKeep is + * valid for CUPTI_ACTIVITY_MEMORY_POOL_OPERATION_TYPE_TRIMMED, + * \ref CUpti_ActivityMemoryPoolOperationType + */ + size_t minBytesToKeep; + +#ifndef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +#endif + + /** + * The virtual address of the allocation. + */ + uint64_t address; + + /** + * The size of the memory pool operation in bytes. \p size is + * valid for CUPTI_ACTIVITY_MEMORY_POOL_TYPE_LOCAL, \ref CUpti_ActivityMemoryPoolType. + */ + uint64_t size; + + /** + * The release threshold of the memory pool. \p releaseThreshold is + * valid for CUPTI_ACTIVITY_MEMORY_POOL_TYPE_LOCAL, \ref CUpti_ActivityMemoryPoolType. + */ + uint64_t releaseThreshold; + + /** + * The start timestamp for the memory operation, in ns. + */ + uint64_t timestamp; +} CUpti_ActivityMemoryPool; + +/** + * \brief The activity record for memory pool. + * + * This activity record represents a memory pool creation, destruction and + * trimming (CUPTI_ACTIVITY_KIND_MEMORY_POOL). + * This activity record provides separate records for memory pool creation, + * destruction and triming operations. + * This allows to correlate the corresponding driver and runtime API + * activity record with the memory pool operation. + * + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_MEMORY_POOL + */ + CUpti_ActivityKind kind; + + /** + * The memory operation requested by the user, \ref CUpti_ActivityMemoryPoolOperationType. + */ + CUpti_ActivityMemoryPoolOperationType memoryPoolOperationType; + + /** + * The type of the memory pool, \ref CUpti_ActivityMemoryPoolType + */ + CUpti_ActivityMemoryPoolType memoryPoolType; + + /** + * The correlation ID of the memory pool operation. Each memory pool + * operation is assigned a unique correlation ID that is identical to the + * correlation ID in the driver and runtime API activity record that + * launched the memory operation. + */ + uint32_t correlationId; + + /** + * The ID of the process to which this record belongs to. + */ + uint32_t processId; + + /** + * The ID of the device where the memory pool is created. + */ + uint32_t deviceId; + + /** + * The minimum bytes to keep of the memory pool. \p minBytesToKeep is + * valid for CUPTI_ACTIVITY_MEMORY_POOL_OPERATION_TYPE_TRIMMED, + * \ref CUpti_ActivityMemoryPoolOperationType + */ + size_t minBytesToKeep; + +#ifndef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +#endif + + /** + * The virtual address of the allocation. + */ + uint64_t address; + + /** + * The size of the memory pool operation in bytes. \p size is + * valid for CUPTI_ACTIVITY_MEMORY_POOL_TYPE_LOCAL, \ref CUpti_ActivityMemoryPoolType. + */ + uint64_t size; + + /** + * The release threshold of the memory pool. \p releaseThreshold is + * valid for CUPTI_ACTIVITY_MEMORY_POOL_TYPE_LOCAL, \ref CUpti_ActivityMemoryPoolType. + */ + uint64_t releaseThreshold; + + /** + * The start timestamp for the memory operation, in ns. + */ + uint64_t timestamp; + + /** + * The utilized size of the memory pool. \p utilizedSize is + * valid for CUPTI_ACTIVITY_MEMORY_POOL_TYPE_LOCAL, \ref CUpti_ActivityMemoryPoolType. + */ + uint64_t utilizedSize; +} CUpti_ActivityMemoryPool2; + +/** + * \brief The activity record for kernel. (deprecated) + * + * This activity record represents a kernel execution + * (CUPTI_ACTIVITY_KIND_KERNEL and + * CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL) but is no longer generated + * by CUPTI. Kernel activities are now reported using the + * CUpti_ActivityKernel9 activity record. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_KERNEL + * or CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL. + */ + CUpti_ActivityKind kind; + + /** + * The cache configuration requested by the kernel. The value is one + * of the CUfunc_cache enumeration values from cuda.h. + */ + uint8_t cacheConfigRequested; + + /** + * The cache configuration used for the kernel. The value is one of + * the CUfunc_cache enumeration values from cuda.h. + */ + uint8_t cacheConfigExecuted; + + /** + * The number of registers required for each thread executing the + * kernel. + */ + uint16_t registersPerThread; + + /** + * The start timestamp for the kernel execution, in ns. A value of 0 + * for both the start and end timestamps indicates that timestamp + * information could not be collected for the kernel. + */ + uint64_t start; + + /** + * The end timestamp for the kernel execution, in ns. A value of 0 + * for both the start and end timestamps indicates that timestamp + * information could not be collected for the kernel. + */ + uint64_t end; + + /** + * The ID of the device where the kernel is executing. + */ + uint32_t deviceId; + + /** + * The ID of the context where the kernel is executing. + */ + uint32_t contextId; + + /** + * The ID of the stream where the kernel is executing. + */ + uint32_t streamId; + + /** + * The X-dimension grid size for the kernel. + */ + int32_t gridX; + + /** + * The Y-dimension grid size for the kernel. + */ + int32_t gridY; + + /** + * The Z-dimension grid size for the kernel. + */ + int32_t gridZ; + + /** + * The X-dimension block size for the kernel. + */ + int32_t blockX; + + /** + * The Y-dimension block size for the kernel. + */ + int32_t blockY; + + /** + * The Z-dimension grid size for the kernel. + */ + int32_t blockZ; + + /** + * The static shared memory allocated for the kernel, in bytes. + */ + int32_t staticSharedMemory; + + /** + * The dynamic shared memory reserved for the kernel, in bytes. + */ + int32_t dynamicSharedMemory; + + /** + * The amount of local memory reserved for each thread, in bytes. + */ + uint32_t localMemoryPerThread; + + /** + * The total amount of local memory reserved for the kernel, in + * bytes. + */ + uint32_t localMemoryTotal; + + /** + * The correlation ID of the kernel. Each kernel execution is + * assigned a unique correlation ID that is identical to the + * correlation ID in the driver API activity record that launched + * the kernel. + */ + uint32_t correlationId; + + /** + * The runtime correlation ID of the kernel. Each kernel execution + * is assigned a unique runtime correlation ID that is identical to + * the correlation ID in the runtime API activity record that + * launched the kernel. + */ + uint32_t runtimeCorrelationId; + + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; + + /** + * The name of the kernel. This name is shared across all activity + * records representing the same kernel, and so should not be + * modified. + */ + const char *name; + + /** + * Undefined. Reserved for internal use. + */ + void *reserved0; +} CUpti_ActivityKernel; + +/** + * \brief The activity record for kernel. (deprecated) + * + * This activity record represents a kernel execution + * (CUPTI_ACTIVITY_KIND_KERNEL and + * CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL) but is no longer generated + * by CUPTI. Kernel activities are now reported using the + * CUpti_ActivityKernel9 activity record. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_KERNEL or + * CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL. + */ + CUpti_ActivityKind kind; + + union { + uint8_t both; + struct { + /** + * The cache configuration requested by the kernel. The value is one + * of the CUfunc_cache enumeration values from cuda.h. + */ + uint8_t requested:4; + + /** + * The cache configuration used for the kernel. The value is one of + * the CUfunc_cache enumeration values from cuda.h. + */ + uint8_t executed:4; + } config; + } cacheConfig; + + /** + * The shared memory configuration used for the kernel. The value is one of + * the CUsharedconfig enumeration values from cuda.h. + */ + uint8_t sharedMemoryConfig; + + /** + * The number of registers required for each thread executing the + * kernel. + */ + uint16_t registersPerThread; + + /** + * The start timestamp for the kernel execution, in ns. A value of 0 + * for both the start and end timestamps indicates that timestamp + * information could not be collected for the kernel. + */ + uint64_t start; + + /** + * The end timestamp for the kernel execution, in ns. A value of 0 + * for both the start and end timestamps indicates that timestamp + * information could not be collected for the kernel. + */ + uint64_t end; + + /** + * The completed timestamp for the kernel execution, in ns. It + * represents the completion of all it's child kernels and the + * kernel itself. A value of CUPTI_TIMESTAMP_UNKNOWN indicates that + * the completion time is unknown. + */ + uint64_t completed; + + /** + * The ID of the device where the kernel is executing. + */ + uint32_t deviceId; + + /** + * The ID of the context where the kernel is executing. + */ + uint32_t contextId; + + /** + * The ID of the stream where the kernel is executing. + */ + uint32_t streamId; + + /** + * The X-dimension grid size for the kernel. + */ + int32_t gridX; + + /** + * The Y-dimension grid size for the kernel. + */ + int32_t gridY; + + /** + * The Z-dimension grid size for the kernel. + */ + int32_t gridZ; + + /** + * The X-dimension block size for the kernel. + */ + int32_t blockX; + + /** + * The Y-dimension block size for the kernel. + */ + int32_t blockY; + + /** + * The Z-dimension grid size for the kernel. + */ + int32_t blockZ; + + /** + * The static shared memory allocated for the kernel, in bytes. + */ + int32_t staticSharedMemory; + + /** + * The dynamic shared memory reserved for the kernel, in bytes. + */ + int32_t dynamicSharedMemory; + + /** + * The amount of local memory reserved for each thread, in bytes. + */ + uint32_t localMemoryPerThread; + + /** + * The total amount of local memory reserved for the kernel, in + * bytes. + */ + uint32_t localMemoryTotal; + + /** + * The correlation ID of the kernel. Each kernel execution is + * assigned a unique correlation ID that is identical to the + * correlation ID in the driver or runtime API activity record that + * launched the kernel. + */ + uint32_t correlationId; + + /** + * The grid ID of the kernel. Each kernel is assigned a unique + * grid ID at runtime. + */ + int64_t gridId; + + /** + * The name of the kernel. This name is shared across all activity + * records representing the same kernel, and so should not be + * modified. + */ + const char *name; + + /** + * Undefined. Reserved for internal use. + */ + void *reserved0; +} CUpti_ActivityKernel2; + +/** + * \brief The activity record for a kernel (CUDA 6.5(with sm_52 support) onwards). + * (deprecated in CUDA 9.0) + * + * This activity record represents a kernel execution + * (CUPTI_ACTIVITY_KIND_KERNEL and + * CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL). + * Kernel activities are now reported using the CUpti_ActivityKernel9 activity + * record. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_KERNEL or + * CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL. + */ + CUpti_ActivityKind kind; + + union { + uint8_t both; + struct { + /** + * The cache configuration requested by the kernel. The value is one + * of the CUfunc_cache enumeration values from cuda.h. + */ + uint8_t requested:4; + + /** + * The cache configuration used for the kernel. The value is one of + * the CUfunc_cache enumeration values from cuda.h. + */ + uint8_t executed:4; + } config; + } cacheConfig; + + /** + * The shared memory configuration used for the kernel. The value is one of + * the CUsharedconfig enumeration values from cuda.h. + */ + uint8_t sharedMemoryConfig; + + /** + * The number of registers required for each thread executing the + * kernel. + */ + uint16_t registersPerThread; + + /** + * The partitioned global caching requested for the kernel. Partitioned + * global caching is required to enable caching on certain chips, such as + * devices with compute capability 5.2. + */ + CUpti_ActivityPartitionedGlobalCacheConfig partitionedGlobalCacheRequested; + + /** + * The partitioned global caching executed for the kernel. Partitioned + * global caching is required to enable caching on certain chips, such as + * devices with compute capability 5.2. Partitioned global caching can be + * automatically disabled if the occupancy requirement of the launch cannot + * support caching. + */ + CUpti_ActivityPartitionedGlobalCacheConfig partitionedGlobalCacheExecuted; + + /** + * The start timestamp for the kernel execution, in ns. A value of 0 + * for both the start and end timestamps indicates that timestamp + * information could not be collected for the kernel. + */ + uint64_t start; + + /** + * The end timestamp for the kernel execution, in ns. A value of 0 + * for both the start and end timestamps indicates that timestamp + * information could not be collected for the kernel. + */ + uint64_t end; + + /** + * The completed timestamp for the kernel execution, in ns. It + * represents the completion of all it's child kernels and the + * kernel itself. A value of CUPTI_TIMESTAMP_UNKNOWN indicates that + * the completion time is unknown. + */ + uint64_t completed; + + /** + * The ID of the device where the kernel is executing. + */ + uint32_t deviceId; + + /** + * The ID of the context where the kernel is executing. + */ + uint32_t contextId; + + /** + * The ID of the stream where the kernel is executing. + */ + uint32_t streamId; + + /** + * The X-dimension grid size for the kernel. + */ + int32_t gridX; + + /** + * The Y-dimension grid size for the kernel. + */ + int32_t gridY; + + /** + * The Z-dimension grid size for the kernel. + */ + int32_t gridZ; + + /** + * The X-dimension block size for the kernel. + */ + int32_t blockX; + + /** + * The Y-dimension block size for the kernel. + */ + int32_t blockY; + + /** + * The Z-dimension grid size for the kernel. + */ + int32_t blockZ; + + /** + * The static shared memory allocated for the kernel, in bytes. + */ + int32_t staticSharedMemory; + + /** + * The dynamic shared memory reserved for the kernel, in bytes. + */ + int32_t dynamicSharedMemory; + + /** + * The amount of local memory reserved for each thread, in bytes. + */ + uint32_t localMemoryPerThread; + + /** + * The total amount of local memory reserved for the kernel, in + * bytes. + */ + uint32_t localMemoryTotal; + + /** + * The correlation ID of the kernel. Each kernel execution is + * assigned a unique correlation ID that is identical to the + * correlation ID in the driver or runtime API activity record that + * launched the kernel. + */ + uint32_t correlationId; + + /** + * The grid ID of the kernel. Each kernel is assigned a unique + * grid ID at runtime. + */ + int64_t gridId; + + /** + * The name of the kernel. This name is shared across all activity + * records representing the same kernel, and so should not be + * modified. + */ + const char *name; + + /** + * Undefined. Reserved for internal use. + */ + void *reserved0; +} CUpti_ActivityKernel3; + +/** + * \brief The type of the CUDA kernel launch. + */ +typedef enum { + /** + * The kernel was launched via a regular kernel call + */ + CUPTI_ACTIVITY_LAUNCH_TYPE_REGULAR = 0, + + /** + * The kernel was launched via API \ref cudaLaunchCooperativeKernel() or + * \ref cuLaunchCooperativeKernel() + */ + CUPTI_ACTIVITY_LAUNCH_TYPE_COOPERATIVE_SINGLE_DEVICE = 1, + + /** + * The kernel was launched via API \ref cudaLaunchCooperativeKernelMultiDevice() or + * \ref cuLaunchCooperativeKernelMultiDevice() + */ + CUPTI_ACTIVITY_LAUNCH_TYPE_COOPERATIVE_MULTI_DEVICE = 2, + + /** + * The kernel was launched as a CBL commandlist + */ + CUPTI_ACTIVITY_LAUNCH_TYPE_CBL_COMMANDLIST = 3, +} CUpti_ActivityLaunchType; + +/** + * \brief The activity record for a kernel (CUDA 9.0(with sm_70 support) onwards). + * (deprecated in CUDA 11.0) + * + * This activity record represents a kernel execution + * (CUPTI_ACTIVITY_KIND_KERNEL and + * CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL). + * Kernel activities are now reported using the CUpti_ActivityKernel9 activity + * record. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_KERNEL or + * CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL. + */ + CUpti_ActivityKind kind; + + /** + * For devices with compute capability 7.0+ cacheConfig values are not updated + * in case field isSharedMemoryCarveoutRequested is set + */ + union { + uint8_t both; + struct { + /** + * The cache configuration requested by the kernel. The value is one + * of the CUfunc_cache enumeration values from cuda.h. + */ + uint8_t requested:4; + + /** + * The cache configuration used for the kernel. The value is one of + * the CUfunc_cache enumeration values from cuda.h. + */ + uint8_t executed:4; + } config; + } cacheConfig; + + /** + * The shared memory configuration used for the kernel. The value is one of + * the CUsharedconfig enumeration values from cuda.h. + */ + uint8_t sharedMemoryConfig; + + /** + * The number of registers required for each thread executing the + * kernel. + */ + uint16_t registersPerThread; + + /** + * The partitioned global caching requested for the kernel. Partitioned + * global caching is required to enable caching on certain chips, such as + * devices with compute capability 5.2. + */ + CUpti_ActivityPartitionedGlobalCacheConfig partitionedGlobalCacheRequested; + + /** + * The partitioned global caching executed for the kernel. Partitioned + * global caching is required to enable caching on certain chips, such as + * devices with compute capability 5.2. Partitioned global caching can be + * automatically disabled if the occupancy requirement of the launch cannot + * support caching. + */ + CUpti_ActivityPartitionedGlobalCacheConfig partitionedGlobalCacheExecuted; + + /** + * The start timestamp for the kernel execution, in ns. A value of 0 + * for both the start and end timestamps indicates that timestamp + * information could not be collected for the kernel. + */ + uint64_t start; + + /** + * The end timestamp for the kernel execution, in ns. A value of 0 + * for both the start and end timestamps indicates that timestamp + * information could not be collected for the kernel. + */ + uint64_t end; + + /** + * The completed timestamp for the kernel execution, in ns. It + * represents the completion of all it's child kernels and the + * kernel itself. A value of CUPTI_TIMESTAMP_UNKNOWN indicates that + * the completion time is unknown. + */ + uint64_t completed; + + /** + * The ID of the device where the kernel is executing. + */ + uint32_t deviceId; + + /** + * The ID of the context where the kernel is executing. + */ + uint32_t contextId; + + /** + * The ID of the stream where the kernel is executing. + */ + uint32_t streamId; + + /** + * The X-dimension grid size for the kernel. + */ + int32_t gridX; + + /** + * The Y-dimension grid size for the kernel. + */ + int32_t gridY; + + /** + * The Z-dimension grid size for the kernel. + */ + int32_t gridZ; + + /** + * The X-dimension block size for the kernel. + */ + int32_t blockX; + + /** + * The Y-dimension block size for the kernel. + */ + int32_t blockY; + + /** + * The Z-dimension grid size for the kernel. + */ + int32_t blockZ; + + /** + * The static shared memory allocated for the kernel, in bytes. + */ + int32_t staticSharedMemory; + + /** + * The dynamic shared memory reserved for the kernel, in bytes. + */ + int32_t dynamicSharedMemory; + + /** + * The amount of local memory reserved for each thread, in bytes. + */ + uint32_t localMemoryPerThread; + + /** + * The total amount of local memory reserved for the kernel, in + * bytes. + */ + uint32_t localMemoryTotal; + + /** + * The correlation ID of the kernel. Each kernel execution is + * assigned a unique correlation ID that is identical to the + * correlation ID in the driver or runtime API activity record that + * launched the kernel. + */ + uint32_t correlationId; + + /** + * The grid ID of the kernel. Each kernel is assigned a unique + * grid ID at runtime. + */ + int64_t gridId; + + /** + * The name of the kernel. This name is shared across all activity + * records representing the same kernel, and so should not be + * modified. + */ + const char *name; + + /** + * Undefined. Reserved for internal use. + */ + void *reserved0; + + /** + * The timestamp when the kernel is queued up in the command buffer, in ns. + * A value of CUPTI_TIMESTAMP_UNKNOWN indicates that the queued time + * could not be collected for the kernel. This timestamp is not collected + * by default. Use API \ref cuptiActivityEnableLatencyTimestamps() to + * enable collection. + * + * Command buffer is a buffer written by CUDA driver to send commands + * like kernel launch, memory copy etc to the GPU. All launches of CUDA + * kernels are asynchrnous with respect to the host, the host requests + * the launch by writing commands into the command buffer, then returns + * without checking the GPU's progress. + */ + uint64_t queued; + + /** + * The timestamp when the command buffer containing the kernel launch + * is submitted to the GPU, in ns. A value of CUPTI_TIMESTAMP_UNKNOWN + * indicates that the submitted time could not be collected for the kernel. + * This timestamp is not collected by default. Use API \ref + * cuptiActivityEnableLatencyTimestamps() to enable collection. + */ + uint64_t submitted; + + /** + * The indicates if the kernel was executed via a regular launch or via a + * single/multi device cooperative launch. \see CUpti_ActivityLaunchType + */ + uint8_t launchType; + + /** + * This indicates if CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT was + * updated for the kernel launch + */ + uint8_t isSharedMemoryCarveoutRequested; + + /** + * Shared memory carveout value requested for the function in percentage of + * the total resource. The value will be updated only if field + * isSharedMemoryCarveoutRequested is set. + */ + uint8_t sharedMemoryCarveoutRequested; + + /** + * Undefined. Reserved for internal use. + */ + uint8_t padding; + + /** + * Shared memory size set by the driver. + */ + uint32_t sharedMemoryExecuted; +} CUpti_ActivityKernel4; + +/** + * \brief The shared memory limit per block config for a kernel + * This should be used to set 'cudaOccFuncShmemConfig' field in occupancy calculator API + */ +typedef enum { + /** The shared memory limit config is default + */ + CUPTI_FUNC_SHMEM_LIMIT_DEFAULT = 0x00, + + /** User has opted for a higher dynamic shared memory limit using function attribute + * 'cudaFuncAttributeMaxDynamicSharedMemorySize' for runtime API or + * CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES for driver API + */ + CUPTI_FUNC_SHMEM_LIMIT_OPTIN = 0x01, + + CUPTI_FUNC_SHMEM_LIMIT_FORCE_INT = 0x7fffffff +} CUpti_FuncShmemLimitConfig; + +/** + * \brief The activity record for a kernel (CUDA 11.0(with sm_80 support) onwards). + * (deprecated in CUDA 11.2) + * This activity record represents a kernel execution + * (CUPTI_ACTIVITY_KIND_KERNEL and + * CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL) but is no longer generated + * by CUPTI. Kernel activities are now reported using the + * CUpti_ActivityKernel9 activity record. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_KERNEL or + * CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL. + */ + CUpti_ActivityKind kind; + + /** + * For devices with compute capability 7.0+ cacheConfig values are not updated + * in case field isSharedMemoryCarveoutRequested is set + */ + union { + uint8_t both; + struct { + /** + * The cache configuration requested by the kernel. The value is one + * of the CUfunc_cache enumeration values from cuda.h. + */ + uint8_t requested:4; + + /** + * The cache configuration used for the kernel. The value is one of + * the CUfunc_cache enumeration values from cuda.h. + */ + uint8_t executed:4; + } config; + } cacheConfig; + + /** + * The shared memory configuration used for the kernel. The value is one of + * the CUsharedconfig enumeration values from cuda.h. + */ + uint8_t sharedMemoryConfig; + + /** + * The number of registers required for each thread executing the + * kernel. + */ + uint16_t registersPerThread; + + /** + * The partitioned global caching requested for the kernel. Partitioned + * global caching is required to enable caching on certain chips, such as + * devices with compute capability 5.2. + */ + CUpti_ActivityPartitionedGlobalCacheConfig partitionedGlobalCacheRequested; + + /** + * The partitioned global caching executed for the kernel. Partitioned + * global caching is required to enable caching on certain chips, such as + * devices with compute capability 5.2. Partitioned global caching can be + * automatically disabled if the occupancy requirement of the launch cannot + * support caching. + */ + CUpti_ActivityPartitionedGlobalCacheConfig partitionedGlobalCacheExecuted; + + /** + * The start timestamp for the kernel execution, in ns. A value of 0 + * for both the start and end timestamps indicates that timestamp + * information could not be collected for the kernel. + */ + uint64_t start; + + /** + * The end timestamp for the kernel execution, in ns. A value of 0 + * for both the start and end timestamps indicates that timestamp + * information could not be collected for the kernel. + */ + uint64_t end; + + /** + * The completed timestamp for the kernel execution, in ns. It + * represents the completion of all it's child kernels and the + * kernel itself. A value of CUPTI_TIMESTAMP_UNKNOWN indicates that + * the completion time is unknown. + */ + uint64_t completed; + + /** + * The ID of the device where the kernel is executing. + */ + uint32_t deviceId; + + /** + * The ID of the context where the kernel is executing. + */ + uint32_t contextId; + + /** + * The ID of the stream where the kernel is executing. + */ + uint32_t streamId; + + /** + * The X-dimension grid size for the kernel. + */ + int32_t gridX; + + /** + * The Y-dimension grid size for the kernel. + */ + int32_t gridY; + + /** + * The Z-dimension grid size for the kernel. + */ + int32_t gridZ; + + /** + * The X-dimension block size for the kernel. + */ + int32_t blockX; + + /** + * The Y-dimension block size for the kernel. + */ + int32_t blockY; + + /** + * The Z-dimension grid size for the kernel. + */ + int32_t blockZ; + + /** + * The static shared memory allocated for the kernel, in bytes. + */ + int32_t staticSharedMemory; + + /** + * The dynamic shared memory reserved for the kernel, in bytes. + */ + int32_t dynamicSharedMemory; + + /** + * The amount of local memory reserved for each thread, in bytes. + */ + uint32_t localMemoryPerThread; + + /** + * The total amount of local memory reserved for the kernel, in + * bytes. + */ + uint32_t localMemoryTotal; + + /** + * The correlation ID of the kernel. Each kernel execution is + * assigned a unique correlation ID that is identical to the + * correlation ID in the driver or runtime API activity record that + * launched the kernel. + */ + uint32_t correlationId; + + /** + * The grid ID of the kernel. Each kernel is assigned a unique + * grid ID at runtime. + */ + int64_t gridId; + + /** + * The name of the kernel. This name is shared across all activity + * records representing the same kernel, and so should not be + * modified. + */ + const char *name; + + /** + * Undefined. Reserved for internal use. + */ + void *reserved0; + + /** + * The timestamp when the kernel is queued up in the command buffer, in ns. + * A value of CUPTI_TIMESTAMP_UNKNOWN indicates that the queued time + * could not be collected for the kernel. This timestamp is not collected + * by default. Use API \ref cuptiActivityEnableLatencyTimestamps() to + * enable collection. + * + * Command buffer is a buffer written by CUDA driver to send commands + * like kernel launch, memory copy etc to the GPU. All launches of CUDA + * kernels are asynchrnous with respect to the host, the host requests + * the launch by writing commands into the command buffer, then returns + * without checking the GPU's progress. + */ + uint64_t queued; + + /** + * The timestamp when the command buffer containing the kernel launch + * is submitted to the GPU, in ns. A value of CUPTI_TIMESTAMP_UNKNOWN + * indicates that the submitted time could not be collected for the kernel. + * This timestamp is not collected by default. Use API \ref + * cuptiActivityEnableLatencyTimestamps() to enable collection. + */ + uint64_t submitted; + + /** + * The indicates if the kernel was executed via a regular launch or via a + * single/multi device cooperative launch. \see CUpti_ActivityLaunchType + */ + uint8_t launchType; + + /** + * This indicates if CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT was + * updated for the kernel launch + */ + uint8_t isSharedMemoryCarveoutRequested; + + /** + * Shared memory carveout value requested for the function in percentage of + * the total resource. The value will be updated only if field + * isSharedMemoryCarveoutRequested is set. + */ + uint8_t sharedMemoryCarveoutRequested; + + /** + * Undefined. Reserved for internal use. + */ + uint8_t padding; + + /** + * Shared memory size set by the driver. + */ + uint32_t sharedMemoryExecuted; + + /** + * The unique ID of the graph node that launched this kernel through graph launch APIs. + * This field will be 0 if the kernel is not launched through graph launch APIs. + */ + uint64_t graphNodeId; + + /** + * The shared memory limit config for the kernel. This field shows whether user has opted for a + * higher per block limit of dynamic shared memory. + */ + CUpti_FuncShmemLimitConfig shmemLimitConfig; + + /** + * The unique ID of the graph that launched this kernel through graph launch APIs. + * This field will be 0 if the kernel is not launched through graph launch APIs. + */ + uint32_t graphId; +} CUpti_ActivityKernel5; + +/** + * \brief The activity record for kernel. (deprecated in CUDA 11.6) + * + * This activity record represents a kernel execution + * (CUPTI_ACTIVITY_KIND_KERNEL and + * CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL) but is no longer generated + * by CUPTI. Kernel activities are now reported using the + * CUpti_ActivityKernel9 activity record. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_KERNEL or + * CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL. + */ + CUpti_ActivityKind kind; + + /** + * For devices with compute capability 7.0+ cacheConfig values are not updated + * in case field isSharedMemoryCarveoutRequested is set + */ + union { + uint8_t both; + struct { + /** + * The cache configuration requested by the kernel. The value is one + * of the CUfunc_cache enumeration values from cuda.h. + */ + uint8_t requested:4; + + /** + * The cache configuration used for the kernel. The value is one of + * the CUfunc_cache enumeration values from cuda.h. + */ + uint8_t executed:4; + } config; + } cacheConfig; + + /** + * The shared memory configuration used for the kernel. The value is one of + * the CUsharedconfig enumeration values from cuda.h. + */ + uint8_t sharedMemoryConfig; + + /** + * The number of registers required for each thread executing the + * kernel. + */ + uint16_t registersPerThread; + + /** + * The partitioned global caching requested for the kernel. Partitioned + * global caching is required to enable caching on certain chips, such as + * devices with compute capability 5.2. + */ + CUpti_ActivityPartitionedGlobalCacheConfig partitionedGlobalCacheRequested; + + /** + * The partitioned global caching executed for the kernel. Partitioned + * global caching is required to enable caching on certain chips, such as + * devices with compute capability 5.2. Partitioned global caching can be + * automatically disabled if the occupancy requirement of the launch cannot + * support caching. + */ + CUpti_ActivityPartitionedGlobalCacheConfig partitionedGlobalCacheExecuted; + + /** + * The start timestamp for the kernel execution, in ns. A value of 0 + * for both the start and end timestamps indicates that timestamp + * information could not be collected for the kernel. + */ + uint64_t start; + + /** + * The end timestamp for the kernel execution, in ns. A value of 0 + * for both the start and end timestamps indicates that timestamp + * information could not be collected for the kernel. + */ + uint64_t end; + + /** + * The completed timestamp for the kernel execution, in ns. It + * represents the completion of all it's child kernels and the + * kernel itself. A value of CUPTI_TIMESTAMP_UNKNOWN indicates that + * the completion time is unknown. + */ + uint64_t completed; + + /** + * The ID of the device where the kernel is executing. + */ + uint32_t deviceId; + + /** + * The ID of the context where the kernel is executing. + */ + uint32_t contextId; + + /** + * The ID of the stream where the kernel is executing. + */ + uint32_t streamId; + + /** + * The X-dimension grid size for the kernel. + */ + int32_t gridX; + + /** + * The Y-dimension grid size for the kernel. + */ + int32_t gridY; + + /** + * The Z-dimension grid size for the kernel. + */ + int32_t gridZ; + + /** + * The X-dimension block size for the kernel. + */ + int32_t blockX; + + /** + * The Y-dimension block size for the kernel. + */ + int32_t blockY; + + /** + * The Z-dimension grid size for the kernel. + */ + int32_t blockZ; + + /** + * The static shared memory allocated for the kernel, in bytes. + */ + int32_t staticSharedMemory; + + /** + * The dynamic shared memory reserved for the kernel, in bytes. + */ + int32_t dynamicSharedMemory; + + /** + * The amount of local memory reserved for each thread, in bytes. + */ + uint32_t localMemoryPerThread; + + /** + * The total amount of local memory reserved for the kernel, in + * bytes. + */ + uint32_t localMemoryTotal; + + /** + * The correlation ID of the kernel. Each kernel execution is + * assigned a unique correlation ID that is identical to the + * correlation ID in the driver or runtime API activity record that + * launched the kernel. + */ + uint32_t correlationId; + + /** + * The grid ID of the kernel. Each kernel is assigned a unique + * grid ID at runtime. + */ + int64_t gridId; + + /** + * The name of the kernel. This name is shared across all activity + * records representing the same kernel, and so should not be + * modified. + */ + const char *name; + + /** + * Undefined. Reserved for internal use. + */ + void *reserved0; + + /** + * The timestamp when the kernel is queued up in the command buffer, in ns. + * A value of CUPTI_TIMESTAMP_UNKNOWN indicates that the queued time + * could not be collected for the kernel. This timestamp is not collected + * by default. Use API \ref cuptiActivityEnableLatencyTimestamps() to + * enable collection. + * + * Command buffer is a buffer written by CUDA driver to send commands + * like kernel launch, memory copy etc to the GPU. All launches of CUDA + * kernels are asynchrnous with respect to the host, the host requests + * the launch by writing commands into the command buffer, then returns + * without checking the GPU's progress. + */ + uint64_t queued; + + /** + * The timestamp when the command buffer containing the kernel launch + * is submitted to the GPU, in ns. A value of CUPTI_TIMESTAMP_UNKNOWN + * indicates that the submitted time could not be collected for the kernel. + * This timestamp is not collected by default. Use API \ref + * cuptiActivityEnableLatencyTimestamps() to enable collection. + */ + uint64_t submitted; + + /** + * The indicates if the kernel was executed via a regular launch or via a + * single/multi device cooperative launch. \see CUpti_ActivityLaunchType + */ + uint8_t launchType; + + /** + * This indicates if CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT was + * updated for the kernel launch + */ + uint8_t isSharedMemoryCarveoutRequested; + + /** + * Shared memory carveout value requested for the function in percentage of + * the total resource. The value will be updated only if field + * isSharedMemoryCarveoutRequested is set. + */ + uint8_t sharedMemoryCarveoutRequested; + + /** + * Undefined. Reserved for internal use. + */ + uint8_t padding; + + /** + * Shared memory size set by the driver. + */ + uint32_t sharedMemoryExecuted; + + /** + * The unique ID of the graph node that launched this kernel through graph launch APIs. + * This field will be 0 if the kernel is not launched through graph launch APIs. + */ + uint64_t graphNodeId; + + /** + * The shared memory limit config for the kernel. This field shows whether user has opted for a + * higher per block limit of dynamic shared memory. + */ + CUpti_FuncShmemLimitConfig shmemLimitConfig; + + /** + * The unique ID of the graph that launched this kernel through graph launch APIs. + * This field will be 0 if the kernel is not launched through graph launch APIs. + */ + uint32_t graphId; + + /** + * The pointer to the access policy window. The structure CUaccessPolicyWindow is + * defined in cuda.h. + */ + CUaccessPolicyWindow *pAccessPolicyWindow; +} CUpti_ActivityKernel6; + +/** + * \brief The activity record for kernel. (deprecated in CUDA 11.8) + * + * This activity record represents a kernel execution + * (CUPTI_ACTIVITY_KIND_KERNEL and + * CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL) but is no longer generated + * by CUPTI. Kernel activities are now reported using the + * CUpti_ActivityKernel9 activity record. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_KERNEL or + * CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL. + */ + CUpti_ActivityKind kind; + + /** + * For devices with compute capability 7.0+ cacheConfig values are not updated + * in case field isSharedMemoryCarveoutRequested is set + */ + union { + uint8_t both; + struct { + /** + * The cache configuration requested by the kernel. The value is one + * of the CUfunc_cache enumeration values from cuda.h. + */ + uint8_t requested:4; + + /** + * The cache configuration used for the kernel. The value is one of + * the CUfunc_cache enumeration values from cuda.h. + */ + uint8_t executed:4; + } config; + } cacheConfig; + + /** + * The shared memory configuration used for the kernel. The value is one of + * the CUsharedconfig enumeration values from cuda.h. + */ + uint8_t sharedMemoryConfig; + + /** + * The number of registers required for each thread executing the + * kernel. + */ + uint16_t registersPerThread; + + /** + * The partitioned global caching requested for the kernel. Partitioned + * global caching is required to enable caching on certain chips, such as + * devices with compute capability 5.2. + */ + CUpti_ActivityPartitionedGlobalCacheConfig partitionedGlobalCacheRequested; + + /** + * The partitioned global caching executed for the kernel. Partitioned + * global caching is required to enable caching on certain chips, such as + * devices with compute capability 5.2. Partitioned global caching can be + * automatically disabled if the occupancy requirement of the launch cannot + * support caching. + */ + CUpti_ActivityPartitionedGlobalCacheConfig partitionedGlobalCacheExecuted; + + /** + * The start timestamp for the kernel execution, in ns. A value of 0 + * for both the start and end timestamps indicates that timestamp + * information could not be collected for the kernel. + */ + uint64_t start; + + /** + * The end timestamp for the kernel execution, in ns. A value of 0 + * for both the start and end timestamps indicates that timestamp + * information could not be collected for the kernel. + */ + uint64_t end; + + /** + * The completed timestamp for the kernel execution, in ns. It + * represents the completion of all it's child kernels and the + * kernel itself. A value of CUPTI_TIMESTAMP_UNKNOWN indicates that + * the completion time is unknown. + */ + uint64_t completed; + + /** + * The ID of the device where the kernel is executing. + */ + uint32_t deviceId; + + /** + * The ID of the context where the kernel is executing. + */ + uint32_t contextId; + + /** + * The ID of the stream where the kernel is executing. + */ + uint32_t streamId; + + /** + * The X-dimension grid size for the kernel. + */ + int32_t gridX; + + /** + * The Y-dimension grid size for the kernel. + */ + int32_t gridY; + + /** + * The Z-dimension grid size for the kernel. + */ + int32_t gridZ; + + /** + * The X-dimension block size for the kernel. + */ + int32_t blockX; + + /** + * The Y-dimension block size for the kernel. + */ + int32_t blockY; + + /** + * The Z-dimension grid size for the kernel. + */ + int32_t blockZ; + + /** + * The static shared memory allocated for the kernel, in bytes. + */ + int32_t staticSharedMemory; + + /** + * The dynamic shared memory reserved for the kernel, in bytes. + */ + int32_t dynamicSharedMemory; + + /** + * The amount of local memory reserved for each thread, in bytes. + */ + uint32_t localMemoryPerThread; + + /** + * The total amount of local memory reserved for the kernel, in + * bytes. + */ + uint32_t localMemoryTotal; + + /** + * The correlation ID of the kernel. Each kernel execution is + * assigned a unique correlation ID that is identical to the + * correlation ID in the driver or runtime API activity record that + * launched the kernel. + */ + uint32_t correlationId; + + /** + * The grid ID of the kernel. Each kernel is assigned a unique + * grid ID at runtime. + */ + int64_t gridId; + + /** + * The name of the kernel. This name is shared across all activity + * records representing the same kernel, and so should not be + * modified. + */ + const char *name; + + /** + * Undefined. Reserved for internal use. + */ + void *reserved0; + + /** + * The timestamp when the kernel is queued up in the command buffer, in ns. + * A value of CUPTI_TIMESTAMP_UNKNOWN indicates that the queued time + * could not be collected for the kernel. This timestamp is not collected + * by default. Use API \ref cuptiActivityEnableLatencyTimestamps() to + * enable collection. + * + * Command buffer is a buffer written by CUDA driver to send commands + * like kernel launch, memory copy etc to the GPU. All launches of CUDA + * kernels are asynchrnous with respect to the host, the host requests + * the launch by writing commands into the command buffer, then returns + * without checking the GPU's progress. + */ + uint64_t queued; + + /** + * The timestamp when the command buffer containing the kernel launch + * is submitted to the GPU, in ns. A value of CUPTI_TIMESTAMP_UNKNOWN + * indicates that the submitted time could not be collected for the kernel. + * This timestamp is not collected by default. Use API \ref + * cuptiActivityEnableLatencyTimestamps() to enable collection. + */ + uint64_t submitted; + + /** + * The indicates if the kernel was executed via a regular launch or via a + * single/multi device cooperative launch. \see CUpti_ActivityLaunchType + */ + uint8_t launchType; + + /** + * This indicates if CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT was + * updated for the kernel launch + */ + uint8_t isSharedMemoryCarveoutRequested; + + /** + * Shared memory carveout value requested for the function in percentage of + * the total resource. The value will be updated only if field + * isSharedMemoryCarveoutRequested is set. + */ + uint8_t sharedMemoryCarveoutRequested; + + /** + * Undefined. Reserved for internal use. + */ + uint8_t padding; + + /** + * Shared memory size set by the driver. + */ + uint32_t sharedMemoryExecuted; + + /** + * The unique ID of the graph node that launched this kernel through graph launch APIs. + * This field will be 0 if the kernel is not launched through graph launch APIs. + */ + uint64_t graphNodeId; + + /** + * The shared memory limit config for the kernel. This field shows whether user has opted for a + * higher per block limit of dynamic shared memory. + */ + CUpti_FuncShmemLimitConfig shmemLimitConfig; + + /** + * The unique ID of the graph that launched this kernel through graph launch APIs. + * This field will be 0 if the kernel is not launched through graph launch APIs. + */ + uint32_t graphId; + + /** + * The pointer to the access policy window. The structure CUaccessPolicyWindow is + * defined in cuda.h. + */ + CUaccessPolicyWindow *pAccessPolicyWindow; + + /** + * The ID of the HW channel on which the kernel is launched. + */ + uint32_t channelID; + + /** + * The type of the channel + */ + CUpti_ChannelType channelType; +} CUpti_ActivityKernel7; + +/** + * \brief The activity record for kernel. + * + * This activity record represents a kernel execution + * (CUPTI_ACTIVITY_KIND_KERNEL and + * CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL) + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_KERNEL or + * CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL. + */ + CUpti_ActivityKind kind; + + /** + * For devices with compute capability 7.0+ cacheConfig values are not updated + * in case field isSharedMemoryCarveoutRequested is set + */ + union { + uint8_t both; + struct { + /** + * The cache configuration requested by the kernel. The value is one + * of the CUfunc_cache enumeration values from cuda.h. + */ + uint8_t requested:4; + + /** + * The cache configuration used for the kernel. The value is one of + * the CUfunc_cache enumeration values from cuda.h. + */ + uint8_t executed:4; + } config; + } cacheConfig; + + /** + * The shared memory configuration used for the kernel. The value is one of + * the CUsharedconfig enumeration values from cuda.h. + */ + uint8_t sharedMemoryConfig; + + /** + * The number of registers required for each thread executing the + * kernel. + */ + uint16_t registersPerThread; + + /** + * The partitioned global caching requested for the kernel. Partitioned + * global caching is required to enable caching on certain chips, such as + * devices with compute capability 5.2. + */ + CUpti_ActivityPartitionedGlobalCacheConfig partitionedGlobalCacheRequested; + + /** + * The partitioned global caching executed for the kernel. Partitioned + * global caching is required to enable caching on certain chips, such as + * devices with compute capability 5.2. Partitioned global caching can be + * automatically disabled if the occupancy requirement of the launch cannot + * support caching. + */ + CUpti_ActivityPartitionedGlobalCacheConfig partitionedGlobalCacheExecuted; + + /** + * The start timestamp for the kernel execution, in ns. A value of 0 + * for both the start and end timestamps indicates that timestamp + * information could not be collected for the kernel. + */ + uint64_t start; + + /** + * The end timestamp for the kernel execution, in ns. A value of 0 + * for both the start and end timestamps indicates that timestamp + * information could not be collected for the kernel. + */ + uint64_t end; + + /** + * The completed timestamp for the kernel execution, in ns. It + * represents the completion of all it's child kernels and the + * kernel itself. A value of CUPTI_TIMESTAMP_UNKNOWN indicates that + * the completion time is unknown. + */ + uint64_t completed; + + /** + * The ID of the device where the kernel is executing. + */ + uint32_t deviceId; + + /** + * The ID of the context where the kernel is executing. + */ + uint32_t contextId; + + /** + * The ID of the stream where the kernel is executing. + */ + uint32_t streamId; + + /** + * The X-dimension grid size for the kernel. + */ + int32_t gridX; + + /** + * The Y-dimension grid size for the kernel. + */ + int32_t gridY; + + /** + * The Z-dimension grid size for the kernel. + */ + int32_t gridZ; + + /** + * The X-dimension block size for the kernel. + */ + int32_t blockX; + + /** + * The Y-dimension block size for the kernel. + */ + int32_t blockY; + + /** + * The Z-dimension grid size for the kernel. + */ + int32_t blockZ; + + /** + * The static shared memory allocated for the kernel, in bytes. + */ + int32_t staticSharedMemory; + + /** + * The dynamic shared memory reserved for the kernel, in bytes. + */ + int32_t dynamicSharedMemory; + + /** + * The amount of local memory reserved for each thread, in bytes. + */ + uint32_t localMemoryPerThread; + + /** + * The total amount of local memory reserved for the kernel, in + * bytes (deprecated in CUDA 11.8). + * Refer field localMemoryTotal_v2 + */ + uint32_t localMemoryTotal; + + /** + * The correlation ID of the kernel. Each kernel execution is + * assigned a unique correlation ID that is identical to the + * correlation ID in the driver or runtime API activity record that + * launched the kernel. + */ + uint32_t correlationId; + + /** + * The grid ID of the kernel. Each kernel is assigned a unique + * grid ID at runtime. + */ + int64_t gridId; + + /** + * The name of the kernel. This name is shared across all activity + * records representing the same kernel, and so should not be + * modified. + */ + const char *name; + + /** + * Undefined. Reserved for internal use. + */ + void *reserved0; + + /** + * The timestamp when the kernel is queued up in the command buffer, in ns. + * A value of CUPTI_TIMESTAMP_UNKNOWN indicates that the queued time + * could not be collected for the kernel. This timestamp is not collected + * by default. Use API \ref cuptiActivityEnableLatencyTimestamps() to + * enable collection. + * + * Command buffer is a buffer written by CUDA driver to send commands + * like kernel launch, memory copy etc to the GPU. All launches of CUDA + * kernels are asynchrnous with respect to the host, the host requests + * the launch by writing commands into the command buffer, then returns + * without checking the GPU's progress. + */ + uint64_t queued; + + /** + * The timestamp when the command buffer containing the kernel launch + * is submitted to the GPU, in ns. A value of CUPTI_TIMESTAMP_UNKNOWN + * indicates that the submitted time could not be collected for the kernel. + * This timestamp is not collected by default. Use API \ref + * cuptiActivityEnableLatencyTimestamps() to enable collection. + */ + uint64_t submitted; + + /** + * The indicates if the kernel was executed via a regular launch or via a + * single/multi device cooperative launch. \see CUpti_ActivityLaunchType + */ + uint8_t launchType; + + /** + * This indicates if CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT was + * updated for the kernel launch + */ + uint8_t isSharedMemoryCarveoutRequested; + + /** + * Shared memory carveout value requested for the function in percentage of + * the total resource. The value will be updated only if field + * isSharedMemoryCarveoutRequested is set. + */ + uint8_t sharedMemoryCarveoutRequested; + + /** + * Undefined. Reserved for internal use. + */ + uint8_t padding; + + /** + * Shared memory size set by the driver. + */ + uint32_t sharedMemoryExecuted; + + /** + * The unique ID of the graph node that launched this kernel through graph launch APIs. + * This field will be 0 if the kernel is not launched through graph launch APIs. + */ + uint64_t graphNodeId; + + /** + * The shared memory limit config for the kernel. This field shows whether user has opted for a + * higher per block limit of dynamic shared memory. + */ + CUpti_FuncShmemLimitConfig shmemLimitConfig; + + /** + * The unique ID of the graph that launched this kernel through graph launch APIs. + * This field will be 0 if the kernel is not launched through graph launch APIs. + */ + uint32_t graphId; + + /** + * The pointer to the access policy window. The structure CUaccessPolicyWindow is + * defined in cuda.h. + */ + CUaccessPolicyWindow *pAccessPolicyWindow; + + /** + * The ID of the HW channel on which the kernel is launched. + */ + uint32_t channelID; + + /** + * The type of the channel + */ + CUpti_ChannelType channelType; + + /** + * The X-dimension cluster size for the kernel. + * Field is valid for devices with compute capability 9.0 and higher + */ + uint32_t clusterX; + + /** + * The Y-dimension cluster size for the kernel. + * Field is valid for devices with compute capability 9.0 and higher + */ + uint32_t clusterY; + + /** + * The Z-dimension cluster size for the kernel. + * Field is valid for devices with compute capability 9.0 and higher + */ + uint32_t clusterZ; + + /** + * The cluster scheduling policy for the kernel. Refer CUclusterSchedulingPolicy + * Field is valid for devices with compute capability 9.0 and higher + */ + uint32_t clusterSchedulingPolicy; + + /** + * The total amount of local memory reserved for the kernel, in + * bytes. + */ + uint64_t localMemoryTotal_v2; +} CUpti_ActivityKernel8; + +/** + * \brief The activity record for kernel. + * + * This activity record represents a kernel execution + * (CUPTI_ACTIVITY_KIND_KERNEL and + * CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL) + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_KERNEL or + * CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL. + */ + CUpti_ActivityKind kind; + + /** + * For devices with compute capability 7.0+ cacheConfig values are not updated + * in case field isSharedMemoryCarveoutRequested is set + */ + union { + uint8_t both; + struct { + /** + * The cache configuration requested by the kernel. The value is one + * of the CUfunc_cache enumeration values from cuda.h. + */ + uint8_t requested:4; + + /** + * The cache configuration used for the kernel. The value is one of + * the CUfunc_cache enumeration values from cuda.h. + */ + uint8_t executed:4; + } config; + } cacheConfig; + + /** + * The shared memory configuration used for the kernel. The value is one of + * the CUsharedconfig enumeration values from cuda.h. + */ + uint8_t sharedMemoryConfig; + + /** + * The number of registers required for each thread executing the + * kernel. + */ + uint16_t registersPerThread; + + /** + * The partitioned global caching requested for the kernel. Partitioned + * global caching is required to enable caching on certain chips, such as + * devices with compute capability 5.2. + */ + CUpti_ActivityPartitionedGlobalCacheConfig partitionedGlobalCacheRequested; + + /** + * The partitioned global caching executed for the kernel. Partitioned + * global caching is required to enable caching on certain chips, such as + * devices with compute capability 5.2. Partitioned global caching can be + * automatically disabled if the occupancy requirement of the launch cannot + * support caching. + */ + CUpti_ActivityPartitionedGlobalCacheConfig partitionedGlobalCacheExecuted; + + /** + * The start timestamp for the kernel execution, in ns. A value of 0 + * for both the start and end timestamps indicates that timestamp + * information could not be collected for the kernel. + */ + uint64_t start; + + /** + * The end timestamp for the kernel execution, in ns. A value of 0 + * for both the start and end timestamps indicates that timestamp + * information could not be collected for the kernel. + */ + uint64_t end; + + /** + * The completed timestamp for the kernel execution, in ns. It + * represents the completion of all it's child kernels and the + * kernel itself. A value of CUPTI_TIMESTAMP_UNKNOWN indicates that + * the completion time is unknown. + */ + uint64_t completed; + + /** + * The ID of the device where the kernel is executing. + */ + uint32_t deviceId; + + /** + * The ID of the context where the kernel is executing. + */ + uint32_t contextId; + + /** + * The ID of the stream where the kernel is executing. + */ + uint32_t streamId; + + /** + * The X-dimension grid size for the kernel. + */ + int32_t gridX; + + /** + * The Y-dimension grid size for the kernel. + */ + int32_t gridY; + + /** + * The Z-dimension grid size for the kernel. + */ + int32_t gridZ; + + /** + * The X-dimension block size for the kernel. + */ + int32_t blockX; + + /** + * The Y-dimension block size for the kernel. + */ + int32_t blockY; + + /** + * The Z-dimension grid size for the kernel. + */ + int32_t blockZ; + + /** + * The static shared memory allocated for the kernel, in bytes. + */ + int32_t staticSharedMemory; + + /** + * The dynamic shared memory reserved for the kernel, in bytes. + */ + int32_t dynamicSharedMemory; + + /** + * The amount of local memory reserved for each thread, in bytes. + */ + uint32_t localMemoryPerThread; + + /** + * The total amount of local memory reserved for the kernel, in + * bytes (deprecated in CUDA 11.8). + * Refer field localMemoryTotal_v2 + */ + uint32_t localMemoryTotal; + + /** + * The correlation ID of the kernel. Each kernel execution is + * assigned a unique correlation ID that is identical to the + * correlation ID in the driver or runtime API activity record that + * launched the kernel. + */ + uint32_t correlationId; + + /** + * The grid ID of the kernel. Each kernel is assigned a unique + * grid ID at runtime. + */ + int64_t gridId; + + /** + * The name of the kernel. This name is shared across all activity + * records representing the same kernel, and so should not be + * modified. + */ + const char *name; + + /** + * Undefined. Reserved for internal use. + */ + void *reserved0; + + /** + * The timestamp when the kernel is queued up in the command buffer, in ns. + * A value of CUPTI_TIMESTAMP_UNKNOWN indicates that the queued time + * could not be collected for the kernel. This timestamp is not collected + * by default. Use API \ref cuptiActivityEnableLatencyTimestamps() to + * enable collection. + * + * Command buffer is a buffer written by CUDA driver to send commands + * like kernel launch, memory copy etc to the GPU. All launches of CUDA + * kernels are asynchrnous with respect to the host, the host requests + * the launch by writing commands into the command buffer, then returns + * without checking the GPU's progress. + */ + uint64_t queued; + + /** + * The timestamp when the command buffer containing the kernel launch + * is submitted to the GPU, in ns. A value of CUPTI_TIMESTAMP_UNKNOWN + * indicates that the submitted time could not be collected for the kernel. + * This timestamp is not collected by default. Use API \ref + * cuptiActivityEnableLatencyTimestamps() to enable collection. + */ + uint64_t submitted; + + /** + * The indicates if the kernel was executed via a regular launch or via a + * single/multi device cooperative launch. \see CUpti_ActivityLaunchType + */ + uint8_t launchType; + + /** + * This indicates if CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT was + * updated for the kernel launch + */ + uint8_t isSharedMemoryCarveoutRequested; + + /** + * Shared memory carveout value requested for the function in percentage of + * the total resource. The value will be updated only if field + * isSharedMemoryCarveoutRequested is set. + */ + uint8_t sharedMemoryCarveoutRequested; + + /** + * Undefined. Reserved for internal use. + */ + uint8_t padding; + + /** + * Shared memory size set by the driver. + */ + uint32_t sharedMemoryExecuted; + + /** + * The unique ID of the graph node that launched this kernel through graph launch APIs. + * This field will be 0 if the kernel is not launched through graph launch APIs. + */ + uint64_t graphNodeId; + + /** + * The shared memory limit config for the kernel. This field shows whether user has opted for a + * higher per block limit of dynamic shared memory. + */ + CUpti_FuncShmemLimitConfig shmemLimitConfig; + + /** + * The unique ID of the graph that launched this kernel through graph launch APIs. + * This field will be 0 if the kernel is not launched through graph launch APIs. + */ + uint32_t graphId; + + /** + * The pointer to the access policy window. The structure CUaccessPolicyWindow is + * defined in cuda.h. + */ + CUaccessPolicyWindow *pAccessPolicyWindow; + + /** + * The ID of the HW channel on which the kernel is launched. + */ + uint32_t channelID; + + /** + * The type of the channel + */ + CUpti_ChannelType channelType; + + /** + * The X-dimension cluster size for the kernel. + * Field is valid for devices with compute capability 9.0 and higher + */ + uint32_t clusterX; + + /** + * The Y-dimension cluster size for the kernel. + * Field is valid for devices with compute capability 9.0 and higher + */ + uint32_t clusterY; + + /** + * The Z-dimension cluster size for the kernel. + * Field is valid for devices with compute capability 9.0 and higher + */ + uint32_t clusterZ; + + /** + * The cluster scheduling policy for the kernel. Refer CUclusterSchedulingPolicy + * Field is valid for devices with compute capability 9.0 and higher + */ + uint32_t clusterSchedulingPolicy; + + /** + * The total amount of local memory reserved for the kernel, in + * bytes. + */ + uint64_t localMemoryTotal_v2; + + /** + * The maximum cluster size for the kernel + */ + uint32_t maxPotentialClusterSize; + + /** + * The maximum clusters that could co-exist on the target device for the kernel + */ + uint32_t maxActiveClusters; +} CUpti_ActivityKernel9; + + +/** + * \brief The activity record for CDP (CUDA Dynamic Parallelism) + * kernel. + * + * This activity record represents a CDP kernel execution. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_CDP_KERNEL + */ + CUpti_ActivityKind kind; + + union { + uint8_t both; + struct { + /** + * The cache configuration requested by the kernel. The value is one + * of the CUfunc_cache enumeration values from cuda.h. + */ + uint8_t requested:4; + + /** + * The cache configuration used for the kernel. The value is one of + * the CUfunc_cache enumeration values from cuda.h. + */ + uint8_t executed:4; + } config; + } cacheConfig; + + /** + * The shared memory configuration used for the kernel. The value is one of + * the CUsharedconfig enumeration values from cuda.h. + */ + uint8_t sharedMemoryConfig; + + /** + * The number of registers required for each thread executing the + * kernel. + */ + uint16_t registersPerThread; + + /** + * The start timestamp for the kernel execution, in ns. A value of 0 + * for both the start and end timestamps indicates that timestamp + * information could not be collected for the kernel. + */ + uint64_t start; + + /** + * The end timestamp for the kernel execution, in ns. A value of 0 + * for both the start and end timestamps indicates that timestamp + * information could not be collected for the kernel. + */ + uint64_t end; + + /** + * The ID of the device where the kernel is executing. + */ + uint32_t deviceId; + + /** + * The ID of the context where the kernel is executing. + */ + uint32_t contextId; + + /** + * The ID of the stream where the kernel is executing. + */ + uint32_t streamId; + + /** + * The X-dimension grid size for the kernel. + */ + int32_t gridX; + + /** + * The Y-dimension grid size for the kernel. + */ + int32_t gridY; + + /** + * The Z-dimension grid size for the kernel. + */ + int32_t gridZ; + + /** + * The X-dimension block size for the kernel. + */ + int32_t blockX; + + /** + * The Y-dimension block size for the kernel. + */ + int32_t blockY; + + /** + * The Z-dimension grid size for the kernel. + */ + int32_t blockZ; + + /** + * The static shared memory allocated for the kernel, in bytes. + */ + int32_t staticSharedMemory; + + /** + * The dynamic shared memory reserved for the kernel, in bytes. + */ + int32_t dynamicSharedMemory; + + /** + * The amount of local memory reserved for each thread, in bytes. + */ + uint32_t localMemoryPerThread; + + /** + * The total amount of local memory reserved for the kernel, in + * bytes. + */ + uint32_t localMemoryTotal; + + /** + * The correlation ID of the kernel. Each kernel execution is + * assigned a unique correlation ID that is identical to the + * correlation ID in the driver API activity record that launched + * the kernel. + */ + uint32_t correlationId; + + /** + * The grid ID of the kernel. Each kernel execution + * is assigned a unique grid ID. + */ + int64_t gridId; + + /** + * The grid ID of the parent kernel. + */ + int64_t parentGridId; + + /** + * The timestamp when kernel is queued up, in ns. A value of + * CUPTI_TIMESTAMP_UNKNOWN indicates that the queued time is + * unknown. + */ + uint64_t queued; + + /** + * The timestamp when kernel is submitted to the gpu, in ns. A value + * of CUPTI_TIMESTAMP_UNKNOWN indicates that the submission time is + * unknown. + */ + uint64_t submitted; + + /** + * The timestamp when kernel is marked as completed, in ns. A value + * of CUPTI_TIMESTAMP_UNKNOWN indicates that the completion time is + * unknown. + */ + uint64_t completed; + + /** + * The X-dimension of the parent block. + */ + uint32_t parentBlockX; + + /** + * The Y-dimension of the parent block. + */ + uint32_t parentBlockY; + + /** + * The Z-dimension of the parent block. + */ + uint32_t parentBlockZ; + +#ifdef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +#endif + + /** + * The name of the kernel. This name is shared across all activity + * records representing the same kernel, and so should not be + * modified. + */ + const char *name; +} CUpti_ActivityCdpKernel; + +/** + * \brief The activity record for a preemption of a CDP kernel. + * + * This activity record represents a preemption of a CDP kernel. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_PREEMPTION + */ + CUpti_ActivityKind kind; + + /** + * kind of the preemption + */ + CUpti_ActivityPreemptionKind preemptionKind; + + /** + * The timestamp of the preemption, in ns. A value of 0 indicates + * that timestamp information could not be collected for the + * preemption. + */ + uint64_t timestamp; + + /** + * The grid-id of the block that is preempted + */ + int64_t gridId; + + /** + * The X-dimension of the block that is preempted + */ + uint32_t blockX; + + /** + * The Y-dimension of the block that is preempted + */ + uint32_t blockY; + + /** + * The Z-dimension of the block that is preempted + */ + uint32_t blockZ; + + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +} CUpti_ActivityPreemption; + +/** + * \brief The activity record for a driver or runtime API invocation. + * + * This activity record represents an invocation of a driver or + * runtime API (CUPTI_ACTIVITY_KIND_DRIVER and + * CUPTI_ACTIVITY_KIND_RUNTIME). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_DRIVER, + * CUPTI_ACTIVITY_KIND_RUNTIME, or CUPTI_ACTIVITY_KIND_INTERNAL_LAUNCH_API. + */ + CUpti_ActivityKind kind; + + /** + * The ID of the driver or runtime function. + */ + CUpti_CallbackId cbid; + + /** + * The start timestamp for the function, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the function. + */ + uint64_t start; + + /** + * The end timestamp for the function, in ns. A value of 0 for both + * the start and end timestamps indicates that timestamp information + * could not be collected for the function. + */ + uint64_t end; + + /** + * The ID of the process where the driver or runtime CUDA function + * is executing. + */ + uint32_t processId; + + /** + * The ID of the thread where the driver or runtime CUDA function is + * executing. + */ + uint32_t threadId; + + /** + * The correlation ID of the driver or runtime CUDA function. Each + * function invocation is assigned a unique correlation ID that is + * identical to the correlation ID in the memcpy, memset, or kernel + * activity record that is associated with this function. + */ + uint32_t correlationId; + + /** + * The return value for the function. For a CUDA driver function + * with will be a CUresult value, and for a CUDA runtime function + * this will be a cudaError_t value. + */ + uint32_t returnValue; +} CUpti_ActivityAPI; + +/** + * \brief The activity record for a CUPTI event. + * + * This activity record represents a CUPTI event value + * (CUPTI_ACTIVITY_KIND_EVENT). This activity record kind is not + * produced by the activity API but is included for completeness and + * ease-of-use. Profile frameworks built on top of CUPTI that collect + * event data may choose to use this type to store the collected event + * data. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_EVENT. + */ + CUpti_ActivityKind kind; + + /** + * The event ID. + */ + CUpti_EventID id; + + /** + * The event value. + */ + uint64_t value; + + /** + * The event domain ID. + */ + CUpti_EventDomainID domain; + + /** + * The correlation ID of the event. Use of this ID is user-defined, + * but typically this ID value will equal the correlation ID of the + * kernel for which the event was gathered. + */ + uint32_t correlationId; +} CUpti_ActivityEvent; + +/** + * \brief The activity record for a CUPTI event with instance + * information. + * + * This activity record represents the a CUPTI event value for a + * specific event domain instance + * (CUPTI_ACTIVITY_KIND_EVENT_INSTANCE). This activity record kind is + * not produced by the activity API but is included for completeness + * and ease-of-use. Profile frameworks built on top of CUPTI that + * collect event data may choose to use this type to store the + * collected event data. This activity record should be used when + * event domain instance information needs to be associated with the + * event. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be + * CUPTI_ACTIVITY_KIND_EVENT_INSTANCE. + */ + CUpti_ActivityKind kind; + + /** + * The event ID. + */ + CUpti_EventID id; + + /** + * The event domain ID. + */ + CUpti_EventDomainID domain; + + /** + * The event domain instance. + */ + uint32_t instance; + + /** + * The event value. + */ + uint64_t value; + + /** + * The correlation ID of the event. Use of this ID is user-defined, + * but typically this ID value will equal the correlation ID of the + * kernel for which the event was gathered. + */ + uint32_t correlationId; + + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +} CUpti_ActivityEventInstance; + +/** + * \brief The activity record for a CUPTI metric. + * + * This activity record represents the collection of a CUPTI metric + * value (CUPTI_ACTIVITY_KIND_METRIC). This activity record kind is not + * produced by the activity API but is included for completeness and + * ease-of-use. Profile frameworks built on top of CUPTI that collect + * metric data may choose to use this type to store the collected metric + * data. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_METRIC. + */ + CUpti_ActivityKind kind; + + /** + * The metric ID. + */ + CUpti_MetricID id; + + /** + * The metric value. + */ + CUpti_MetricValue value; + + /** + * The correlation ID of the metric. Use of this ID is user-defined, + * but typically this ID value will equal the correlation ID of the + * kernel for which the metric was gathered. + */ + uint32_t correlationId; + + /** + * The properties of this metric. \see CUpti_ActivityFlag + */ + uint8_t flags; + + /** + * Undefined. Reserved for internal use. + */ + uint8_t pad[3]; +} CUpti_ActivityMetric; + +/** + * \brief The activity record for a CUPTI metric with instance + * information. + * + * This activity record represents a CUPTI metric value + * for a specific metric domain instance + * (CUPTI_ACTIVITY_KIND_METRIC_INSTANCE). This activity record kind + * is not produced by the activity API but is included for + * completeness and ease-of-use. Profile frameworks built on top of + * CUPTI that collect metric data may choose to use this type to store + * the collected metric data. This activity record should be used when + * metric domain instance information needs to be associated with the + * metric. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be + * CUPTI_ACTIVITY_KIND_METRIC_INSTANCE. + */ + CUpti_ActivityKind kind; + + /** + * The metric ID. + */ + CUpti_MetricID id; + + /** + * The metric value. + */ + CUpti_MetricValue value; + + /** + * The metric domain instance. + */ + uint32_t instance; + + /** + * The correlation ID of the metric. Use of this ID is user-defined, + * but typically this ID value will equal the correlation ID of the + * kernel for which the metric was gathered. + */ + uint32_t correlationId; + + /** + * The properties of this metric. \see CUpti_ActivityFlag + */ + uint8_t flags; + + /** + * Undefined. Reserved for internal use. + */ + uint8_t pad[7]; +} CUpti_ActivityMetricInstance; + +/** + * \brief The activity record for source locator. + * + * This activity record represents a source locator + * (CUPTI_ACTIVITY_KIND_SOURCE_LOCATOR). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_SOURCE_LOCATOR. + */ + CUpti_ActivityKind kind; + + /** + * The ID for the source path, will be used in all the source level + * results. + */ + uint32_t id; + + /** + * The line number in the source . + */ + uint32_t lineNumber; + +#ifdef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +#endif + + /** + * The path for the file. + */ + const char *fileName; +} CUpti_ActivitySourceLocator; + +/** + * \brief The activity record for source-level global + * access. (deprecated) + * + * This activity records the locations of the global + * accesses in the source (CUPTI_ACTIVITY_KIND_GLOBAL_ACCESS). + * Global access activities are now reported using the + * CUpti_ActivityGlobalAccess3 activity record. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_GLOBAL_ACCESS. + */ + CUpti_ActivityKind kind; + + /** + * The properties of this global access. + */ + CUpti_ActivityFlag flags; + + /** + * The ID for source locator. + */ + uint32_t sourceLocatorId; + + /** + * The correlation ID of the kernel to which this result is associated. + */ + uint32_t correlationId; + + /** + * The pc offset for the access. + */ + uint32_t pcOffset; + + /** + * The number of times this instruction was executed per warp. It will be incremented + * when at least one of thread among warp is active with predicate and condition code + * evaluating to true. + */ + uint32_t executed; + + /** + * This increments each time when this instruction is executed by number + * of threads that executed this instruction with predicate and condition code evaluating to true. + */ + uint64_t threadsExecuted; + + /** + * The total number of 32 bytes transactions to L2 cache generated by this access + */ + uint64_t l2_transactions; +} CUpti_ActivityGlobalAccess; + +/** + * \brief The activity record for source-level global + * access. (deprecated in CUDA 9.0) + * + * This activity records the locations of the global + * accesses in the source (CUPTI_ACTIVITY_KIND_GLOBAL_ACCESS). + * Global access activities are now reported using the + * CUpti_ActivityGlobalAccess3 activity record. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_GLOBAL_ACCESS. + */ + CUpti_ActivityKind kind; + + /** + * The properties of this global access. + */ + CUpti_ActivityFlag flags; + + /** + * The ID for source locator. + */ + uint32_t sourceLocatorId; + + /** + * The correlation ID of the kernel to which this result is associated. + */ + uint32_t correlationId; + + /** + * Correlation ID with global/device function name + */ + uint32_t functionId; + + /** + * The pc offset for the access. + */ + uint32_t pcOffset; + + /** + * This increments each time when this instruction is executed by number + * of threads that executed this instruction with predicate and condition code evaluating to true. + */ + uint64_t threadsExecuted; + + /** + * The total number of 32 bytes transactions to L2 cache generated by this access + */ + uint64_t l2_transactions; + + /** + * The minimum number of L2 transactions possible based on the access pattern. + */ + uint64_t theoreticalL2Transactions; + + /** + * The number of times this instruction was executed per warp. It will be incremented + * when at least one of thread among warp is active with predicate and condition code + * evaluating to true. + */ + uint32_t executed; + + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +} CUpti_ActivityGlobalAccess2; + +/** + * \brief The activity record for source-level global + * access. + * + * This activity records the locations of the global + * accesses in the source (CUPTI_ACTIVITY_KIND_GLOBAL_ACCESS). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_GLOBAL_ACCESS. + */ + CUpti_ActivityKind kind; + + /** + * The properties of this global access. + */ + CUpti_ActivityFlag flags; + + /** + * The ID for source locator. + */ + uint32_t sourceLocatorId; + + /** + * The correlation ID of the kernel to which this result is associated. + */ + uint32_t correlationId; + + /** + * Correlation ID with global/device function name + */ + uint32_t functionId; + + /** + * The number of times this instruction was executed per warp. It will be incremented + * when at least one of thread among warp is active with predicate and condition code + * evaluating to true. + */ + uint32_t executed; + + /** + * The pc offset for the access. + */ + uint64_t pcOffset; + + /** + * This increments each time when this instruction is executed by number of + * threads that executed this instruction with predicate and condition code + * evaluating to true. + */ + uint64_t threadsExecuted; + + /** + * The total number of 32 bytes transactions to L2 cache generated by this + access + */ + uint64_t l2_transactions; + + /** + * The minimum number of L2 transactions possible based on the access pattern. + */ + uint64_t theoreticalL2Transactions; +} CUpti_ActivityGlobalAccess3; + +/** + * \brief The activity record for source level result + * branch. (deprecated) + * + * This activity record the locations of the branches in the + * source (CUPTI_ACTIVITY_KIND_BRANCH). + * Branch activities are now reported using the + * CUpti_ActivityBranch2 activity record. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_BRANCH. + */ + CUpti_ActivityKind kind; + + /** + * The ID for source locator. + */ + uint32_t sourceLocatorId; + + /** + * The correlation ID of the kernel to which this result is associated. + */ + uint32_t correlationId; + + /** + * The pc offset for the branch. + */ + uint32_t pcOffset; + + /** + * The number of times this instruction was executed per warp. It will be incremented + * regardless of predicate or condition code. + */ + uint32_t executed; + + /** + * Number of times this branch diverged + */ + uint32_t diverged; + + /** + * This increments each time when this instruction is executed by number + * of threads that executed this instruction + */ + uint64_t threadsExecuted; +} CUpti_ActivityBranch; + +/** + * \brief The activity record for source level result + * branch. + * + * This activity record the locations of the branches in the + * source (CUPTI_ACTIVITY_KIND_BRANCH). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_BRANCH. + */ + CUpti_ActivityKind kind; + + /** + * The ID for source locator. + */ + uint32_t sourceLocatorId; + + /** + * The correlation ID of the kernel to which this result is associated. + */ + uint32_t correlationId; + + /** + * Correlation ID with global/device function name + */ + uint32_t functionId; + + /** + * The pc offset for the branch. + */ + uint32_t pcOffset; + + /** + * Number of times this branch diverged + */ + uint32_t diverged; + + /** + * This increments each time when this instruction is executed by number + * of threads that executed this instruction + */ + uint64_t threadsExecuted; + + /** + * The number of times this instruction was executed per warp. It will be incremented + * regardless of predicate or condition code. + */ + uint32_t executed; + + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +} CUpti_ActivityBranch2; + + +/** + * \brief The activity record for a device. (deprecated) + * + * This activity record represents information about a GPU device + * (CUPTI_ACTIVITY_KIND_DEVICE). + * Device activity is now reported using the + * CUpti_ActivityDevice4 activity record. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_DEVICE. + */ + CUpti_ActivityKind kind; + + /** + * The flags associated with the device. \see CUpti_ActivityFlag + */ + CUpti_ActivityFlag flags; + + /** + * The global memory bandwidth available on the device, in + * kBytes/sec. + */ + uint64_t globalMemoryBandwidth; + + /** + * The amount of global memory on the device, in bytes. + */ + uint64_t globalMemorySize; + + /** + * The amount of constant memory on the device, in bytes. + */ + uint32_t constantMemorySize; + + /** + * The size of the L2 cache on the device, in bytes. + */ + uint32_t l2CacheSize; + + /** + * The number of threads per warp on the device. + */ + uint32_t numThreadsPerWarp; + + /** + * The core clock rate of the device, in kHz. + */ + uint32_t coreClockRate; + + /** + * Number of memory copy engines on the device. + */ + uint32_t numMemcpyEngines; + + /** + * Number of multiprocessors on the device. + */ + uint32_t numMultiprocessors; + + /** + * The maximum "instructions per cycle" possible on each device + * multiprocessor. + */ + uint32_t maxIPC; + + /** + * Maximum number of warps that can be present on a multiprocessor + * at any given time. + */ + uint32_t maxWarpsPerMultiprocessor; + + /** + * Maximum number of blocks that can be present on a multiprocessor + * at any given time. + */ + uint32_t maxBlocksPerMultiprocessor; + + /** + * Maximum number of registers that can be allocated to a block. + */ + uint32_t maxRegistersPerBlock; + + /** + * Maximum amount of shared memory that can be assigned to a block, + * in bytes. + */ + uint32_t maxSharedMemoryPerBlock; + + /** + * Maximum number of threads allowed in a block. + */ + uint32_t maxThreadsPerBlock; + + /** + * Maximum allowed X dimension for a block. + */ + uint32_t maxBlockDimX; + + /** + * Maximum allowed Y dimension for a block. + */ + uint32_t maxBlockDimY; + + /** + * Maximum allowed Z dimension for a block. + */ + uint32_t maxBlockDimZ; + + /** + * Maximum allowed X dimension for a grid. + */ + uint32_t maxGridDimX; + + /** + * Maximum allowed Y dimension for a grid. + */ + uint32_t maxGridDimY; + + /** + * Maximum allowed Z dimension for a grid. + */ + uint32_t maxGridDimZ; + + /** + * Compute capability for the device, major number. + */ + uint32_t computeCapabilityMajor; + + /** + * Compute capability for the device, minor number. + */ + uint32_t computeCapabilityMinor; + + /** + * The device ID. + */ + uint32_t id; + +#ifdef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +#endif + + /** + * The device name. This name is shared across all activity records + * representing instances of the device, and so should not be + * modified. + */ + const char *name; +} CUpti_ActivityDevice; + +/** + * \brief The activity record for a device. (deprecated) + * + * This activity record represents information about a GPU device + * (CUPTI_ACTIVITY_KIND_DEVICE). + * Device activity is now reported using the + * CUpti_ActivityDevice4 activity record. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_DEVICE. + */ + CUpti_ActivityKind kind; + + /** + * The flags associated with the device. \see CUpti_ActivityFlag + */ + CUpti_ActivityFlag flags; + + /** + * The global memory bandwidth available on the device, in + * kBytes/sec. + */ + uint64_t globalMemoryBandwidth; + + /** + * The amount of global memory on the device, in bytes. + */ + uint64_t globalMemorySize; + + /** + * The amount of constant memory on the device, in bytes. + */ + uint32_t constantMemorySize; + + /** + * The size of the L2 cache on the device, in bytes. + */ + uint32_t l2CacheSize; + + /** + * The number of threads per warp on the device. + */ + uint32_t numThreadsPerWarp; + + /** + * The core clock rate of the device, in kHz. + */ + uint32_t coreClockRate; + + /** + * Number of memory copy engines on the device. + */ + uint32_t numMemcpyEngines; + + /** + * Number of multiprocessors on the device. + */ + uint32_t numMultiprocessors; + + /** + * The maximum "instructions per cycle" possible on each device + * multiprocessor. + */ + uint32_t maxIPC; + + /** + * Maximum number of warps that can be present on a multiprocessor + * at any given time. + */ + uint32_t maxWarpsPerMultiprocessor; + + /** + * Maximum number of blocks that can be present on a multiprocessor + * at any given time. + */ + uint32_t maxBlocksPerMultiprocessor; + + /** + * Maximum amount of shared memory available per multiprocessor, in bytes. + */ + uint32_t maxSharedMemoryPerMultiprocessor; + + /** + * Maximum number of 32-bit registers available per multiprocessor. + */ + uint32_t maxRegistersPerMultiprocessor; + + /** + * Maximum number of registers that can be allocated to a block. + */ + uint32_t maxRegistersPerBlock; + + /** + * Maximum amount of shared memory that can be assigned to a block, + * in bytes. + */ + uint32_t maxSharedMemoryPerBlock; + + /** + * Maximum number of threads allowed in a block. + */ + uint32_t maxThreadsPerBlock; + + /** + * Maximum allowed X dimension for a block. + */ + uint32_t maxBlockDimX; + + /** + * Maximum allowed Y dimension for a block. + */ + uint32_t maxBlockDimY; + + /** + * Maximum allowed Z dimension for a block. + */ + uint32_t maxBlockDimZ; + + /** + * Maximum allowed X dimension for a grid. + */ + uint32_t maxGridDimX; + + /** + * Maximum allowed Y dimension for a grid. + */ + uint32_t maxGridDimY; + + /** + * Maximum allowed Z dimension for a grid. + */ + uint32_t maxGridDimZ; + + /** + * Compute capability for the device, major number. + */ + uint32_t computeCapabilityMajor; + + /** + * Compute capability for the device, minor number. + */ + uint32_t computeCapabilityMinor; + + /** + * The device ID. + */ + uint32_t id; + + /** + * ECC enabled flag for device + */ + uint32_t eccEnabled; + + /** + * The device UUID. This value is the globally unique immutable + * alphanumeric identifier of the device. + */ + CUuuid uuid; + +#ifndef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +#endif + + /** + * The device name. This name is shared across all activity records + * representing instances of the device, and so should not be + * modified. + */ + const char *name; +} CUpti_ActivityDevice2; + +/** + * \brief The activity record for a device. (CUDA 7.0 onwards) + * + * This activity record represents information about a GPU device + * (CUPTI_ACTIVITY_KIND_DEVICE). + * Device activity is now reported using the + * CUpti_ActivityDevice4 activity record. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_DEVICE. + */ + CUpti_ActivityKind kind; + + /** + * The flags associated with the device. \see CUpti_ActivityFlag + */ + CUpti_ActivityFlag flags; + + /** + * The global memory bandwidth available on the device, in + * kBytes/sec. + */ + uint64_t globalMemoryBandwidth; + + /** + * The amount of global memory on the device, in bytes. + */ + uint64_t globalMemorySize; + + /** + * The amount of constant memory on the device, in bytes. + */ + uint32_t constantMemorySize; + + /** + * The size of the L2 cache on the device, in bytes. + */ + uint32_t l2CacheSize; + + /** + * The number of threads per warp on the device. + */ + uint32_t numThreadsPerWarp; + + /** + * The core clock rate of the device, in kHz. + */ + uint32_t coreClockRate; + + /** + * Number of memory copy engines on the device. + */ + uint32_t numMemcpyEngines; + + /** + * Number of multiprocessors on the device. + */ + uint32_t numMultiprocessors; + + /** + * The maximum "instructions per cycle" possible on each device + * multiprocessor. + */ + uint32_t maxIPC; + + /** + * Maximum number of warps that can be present on a multiprocessor + * at any given time. + */ + uint32_t maxWarpsPerMultiprocessor; + + /** + * Maximum number of blocks that can be present on a multiprocessor + * at any given time. + */ + uint32_t maxBlocksPerMultiprocessor; + + /** + * Maximum amount of shared memory available per multiprocessor, in bytes. + */ + uint32_t maxSharedMemoryPerMultiprocessor; + + /** + * Maximum number of 32-bit registers available per multiprocessor. + */ + uint32_t maxRegistersPerMultiprocessor; + + /** + * Maximum number of registers that can be allocated to a block. + */ + uint32_t maxRegistersPerBlock; + + /** + * Maximum amount of shared memory that can be assigned to a block, + * in bytes. + */ + uint32_t maxSharedMemoryPerBlock; + + /** + * Maximum number of threads allowed in a block. + */ + uint32_t maxThreadsPerBlock; + + /** + * Maximum allowed X dimension for a block. + */ + uint32_t maxBlockDimX; + + /** + * Maximum allowed Y dimension for a block. + */ + uint32_t maxBlockDimY; + + /** + * Maximum allowed Z dimension for a block. + */ + uint32_t maxBlockDimZ; + + /** + * Maximum allowed X dimension for a grid. + */ + uint32_t maxGridDimX; + + /** + * Maximum allowed Y dimension for a grid. + */ + uint32_t maxGridDimY; + + /** + * Maximum allowed Z dimension for a grid. + */ + uint32_t maxGridDimZ; + + /** + * Compute capability for the device, major number. + */ + uint32_t computeCapabilityMajor; + + /** + * Compute capability for the device, minor number. + */ + uint32_t computeCapabilityMinor; + + /** + * The device ID. + */ + uint32_t id; + + /** + * ECC enabled flag for device + */ + uint32_t eccEnabled; + + /** + * The device UUID. This value is the globally unique immutable + * alphanumeric identifier of the device. + */ + CUuuid uuid; + +#ifndef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +#endif + + /** + * The device name. This name is shared across all activity records + * representing instances of the device, and so should not be + * modified. + */ + const char *name; + + /** + * Flag to indicate whether the device is visible to CUDA. Users can + * set the device visibility using CUDA_VISIBLE_DEVICES environment + */ + uint8_t isCudaVisible; + + uint8_t reserved[7]; +} CUpti_ActivityDevice3; + + +/** + * \brief The activity record for a device. (CUDA 11.6 onwards) + * + * This activity record represents information about a GPU device + * (CUPTI_ACTIVITY_KIND_DEVICE). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_DEVICE. + */ + CUpti_ActivityKind kind; + + /** + * The flags associated with the device. \see CUpti_ActivityFlag + */ + CUpti_ActivityFlag flags; + + /** + * The global memory bandwidth available on the device, in + * kBytes/sec. + */ + uint64_t globalMemoryBandwidth; + + /** + * The amount of global memory on the device, in bytes. + */ + uint64_t globalMemorySize; + + /** + * The amount of constant memory on the device, in bytes. + */ + uint32_t constantMemorySize; + + /** + * The size of the L2 cache on the device, in bytes. + */ + uint32_t l2CacheSize; + + /** + * The number of threads per warp on the device. + */ + uint32_t numThreadsPerWarp; + + /** + * The core clock rate of the device, in kHz. + */ + uint32_t coreClockRate; + + /** + * Number of memory copy engines on the device. + */ + uint32_t numMemcpyEngines; + + /** + * Number of multiprocessors on the device. + */ + uint32_t numMultiprocessors; + + /** + * The maximum "instructions per cycle" possible on each device + * multiprocessor. + */ + uint32_t maxIPC; + + /** + * Maximum number of warps that can be present on a multiprocessor + * at any given time. + */ + uint32_t maxWarpsPerMultiprocessor; + + /** + * Maximum number of blocks that can be present on a multiprocessor + * at any given time. + */ + uint32_t maxBlocksPerMultiprocessor; + + /** + * Maximum amount of shared memory available per multiprocessor, in bytes. + */ + uint32_t maxSharedMemoryPerMultiprocessor; + + /** + * Maximum number of 32-bit registers available per multiprocessor. + */ + uint32_t maxRegistersPerMultiprocessor; + + /** + * Maximum number of registers that can be allocated to a block. + */ + uint32_t maxRegistersPerBlock; + + /** + * Maximum amount of shared memory that can be assigned to a block, + * in bytes. + */ + uint32_t maxSharedMemoryPerBlock; + + /** + * Maximum number of threads allowed in a block. + */ + uint32_t maxThreadsPerBlock; + + /** + * Maximum allowed X dimension for a block. + */ + uint32_t maxBlockDimX; + + /** + * Maximum allowed Y dimension for a block. + */ + uint32_t maxBlockDimY; + + /** + * Maximum allowed Z dimension for a block. + */ + uint32_t maxBlockDimZ; + + /** + * Maximum allowed X dimension for a grid. + */ + uint32_t maxGridDimX; + + /** + * Maximum allowed Y dimension for a grid. + */ + uint32_t maxGridDimY; + + /** + * Maximum allowed Z dimension for a grid. + */ + uint32_t maxGridDimZ; + + /** + * Compute capability for the device, major number. + */ + uint32_t computeCapabilityMajor; + + /** + * Compute capability for the device, minor number. + */ + uint32_t computeCapabilityMinor; + + /** + * The device ID. + */ + uint32_t id; + + /** + * ECC enabled flag for device + */ + uint32_t eccEnabled; + + /** + * The device UUID. This value is the globally unique immutable + * alphanumeric identifier of the device. + */ + CUuuid uuid; + +#ifndef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +#endif + + /** + * The device name. This name is shared across all activity records + * representing instances of the device, and so should not be + * modified. + */ + const char *name; + + /** + * Flag to indicate whether the device is visible to CUDA. Users can + * set the device visibility using CUDA_VISIBLE_DEVICES environment + */ + uint8_t isCudaVisible; + + /** + * MIG enabled flag for device + */ + uint8_t isMigEnabled; + + uint8_t reserved[6]; + + /** + * GPU Instance id for MIG enabled devices. + * If mig mode is disabled value is set to UINT32_MAX + */ + uint32_t gpuInstanceId; + + /** + * Compute Instance id for MIG enabled devices. + * If mig mode is disabled value is set to UINT32_MAX + */ + uint32_t computeInstanceId; + + /** + * The MIG UUID. This value is the globally unique immutable + * alphanumeric identifier of the device. + */ + CUuuid migUuid; + +} CUpti_ActivityDevice4; + +/** + * \brief The activity record for a device attribute. + * + * This activity record represents information about a GPU device: + * either a CUpti_DeviceAttribute or CUdevice_attribute value + * (CUPTI_ACTIVITY_KIND_DEVICE_ATTRIBUTE). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be + * CUPTI_ACTIVITY_KIND_DEVICE_ATTRIBUTE. + */ + CUpti_ActivityKind kind; + + /** + * The flags associated with the device. \see CUpti_ActivityFlag + */ + CUpti_ActivityFlag flags; + + /** + * The ID of the device that this attribute applies to. + */ + uint32_t deviceId; + + /** + * The attribute, either a CUpti_DeviceAttribute or + * CUdevice_attribute. Flag + * CUPTI_ACTIVITY_FLAG_DEVICE_ATTRIBUTE_CUDEVICE is used to indicate + * what kind of attribute this is. If + * CUPTI_ACTIVITY_FLAG_DEVICE_ATTRIBUTE_CUDEVICE is 1 then + * CUdevice_attribute field is value, otherwise + * CUpti_DeviceAttribute field is valid. + */ + union { + CUdevice_attribute cu; + CUpti_DeviceAttribute cupti; + } attribute; + + /** + * The value for the attribute. See CUpti_DeviceAttribute and + * CUdevice_attribute for the type of the value for a given + * attribute. + */ + union { + double vDouble; + uint32_t vUint32; + uint64_t vUint64; + int32_t vInt32; + int64_t vInt64; + } value; +} CUpti_ActivityDeviceAttribute; + +/** + * \brief The activity record for a context. + * + * This activity record represents information about a context + * (CUPTI_ACTIVITY_KIND_CONTEXT). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_CONTEXT. + */ + CUpti_ActivityKind kind; + + /** + * The context ID. + */ + uint32_t contextId; + + /** + * The device ID. + */ + uint32_t deviceId; + + /** + * The compute API kind. \see CUpti_ActivityComputeApiKind + */ + uint16_t computeApiKind; + + /** + * The ID for the NULL stream in this context + */ + uint16_t nullStreamId; +} CUpti_ActivityContext; + +/** + * \brief The activity record providing a name. + * + * This activity record provides a name for a device, context, thread, + * etc. and other resource naming done via NVTX APIs + * (CUPTI_ACTIVITY_KIND_NAME). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_NAME. + */ + CUpti_ActivityKind kind; + + /** + * The kind of activity object being named. + */ + CUpti_ActivityObjectKind objectKind; + + /** + * The identifier for the activity object. 'objectKind' indicates + * which ID is valid for this record. + */ + CUpti_ActivityObjectKindId objectId; + +#ifdef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +#endif + + /** + * The name. + */ + const char *name; + +} CUpti_ActivityName; + +/** + * \brief The activity record providing a marker which is an + * instantaneous point in time. (deprecated in CUDA 8.0) + * + * The marker is specified with a descriptive name and unique id + * (CUPTI_ACTIVITY_KIND_MARKER). + * Marker activity is now reported using the + * CUpti_ActivityMarker2 activity record. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_MARKER. + */ + CUpti_ActivityKind kind; + + /** + * The flags associated with the marker. \see CUpti_ActivityFlag + */ + CUpti_ActivityFlag flags; + + /** + * The timestamp for the marker, in ns. A value of 0 indicates that + * timestamp information could not be collected for the marker. + */ + uint64_t timestamp; + + /** + * The marker ID. + */ + uint32_t id; + + /** + * The kind of activity object associated with this marker. + */ + CUpti_ActivityObjectKind objectKind; + + /** + * The identifier for the activity object associated with this + * marker. 'objectKind' indicates which ID is valid for this record. + */ + CUpti_ActivityObjectKindId objectId; + +#ifdef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +#endif + + /** + * The marker name for an instantaneous or start marker. This will + * be NULL for an end marker. + */ + const char *name; + +} CUpti_ActivityMarker; + +/** + * \brief The activity record providing a marker which is an + * instantaneous point in time. + * + * The marker is specified with a descriptive name and unique id + * (CUPTI_ACTIVITY_KIND_MARKER). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_MARKER. + */ + CUpti_ActivityKind kind; + + /** + * The flags associated with the marker. \see CUpti_ActivityFlag + */ + CUpti_ActivityFlag flags; + + /** + * The timestamp for the marker, in ns. A value of 0 indicates that + * timestamp information could not be collected for the marker. + */ + uint64_t timestamp; + + /** + * The marker ID. + */ + uint32_t id; + + /** + * The kind of activity object associated with this marker. + */ + CUpti_ActivityObjectKind objectKind; + + /** + * The identifier for the activity object associated with this + * marker. 'objectKind' indicates which ID is valid for this record. + */ + CUpti_ActivityObjectKindId objectId; + + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; + + + /** + * The marker name for an instantaneous or start marker. This will + * be NULL for an end marker. + */ + const char *name; + + /** + * The name of the domain to which this marker belongs to. + * This will be NULL for default domain. + */ + const char *domain; + +} CUpti_ActivityMarker2; + +/** + * \brief The activity record providing detailed information for a marker. + * + * The marker data contains color, payload, and category. + * (CUPTI_ACTIVITY_KIND_MARKER_DATA). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be + * CUPTI_ACTIVITY_KIND_MARKER_DATA. + */ + CUpti_ActivityKind kind; + + /** + * The flags associated with the marker. \see CUpti_ActivityFlag + */ + CUpti_ActivityFlag flags; + + /** + * The marker ID. + */ + uint32_t id; + + /** + * Defines the payload format for the value associated with the marker. + */ + CUpti_MetricValueKind payloadKind; + + /** + * The payload value. + */ + CUpti_MetricValue payload; + + /** + * The color for the marker. + */ + uint32_t color; + + /** + * The category for the marker. + */ + uint32_t category; + +} CUpti_ActivityMarkerData; + +/** + * \brief The activity record for CUPTI and driver overheads. + * + * This activity record provides CUPTI and driver overhead information + * (CUPTI_ACTIVITY_OVERHEAD). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_OVERHEAD. + */ + CUpti_ActivityKind kind; + + /** + * The kind of overhead, CUPTI, DRIVER, COMPILER etc. + */ + CUpti_ActivityOverheadKind overheadKind; + + /** + * The kind of activity object that the overhead is associated with. + */ + CUpti_ActivityObjectKind objectKind; + + /** + * The identifier for the activity object. 'objectKind' indicates + * which ID is valid for this record. + */ + CUpti_ActivityObjectKindId objectId; + + /** + * The start timestamp for the overhead, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the overhead. + */ + uint64_t start; + + /** + * The end timestamp for the overhead, in ns. A value of 0 for both + * the start and end timestamps indicates that timestamp information + * could not be collected for the overhead. + */ + uint64_t end; +} CUpti_ActivityOverhead; + +/** + * \brief The activity record for CUPTI environmental data. + * + * This activity record provides CUPTI environmental data, include + * power, clocks, and thermals. This information is sampled at + * various rates and returned in this activity record. The consumer + * of the record needs to check the environmentKind field to figure + * out what kind of environmental record this is. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_ENVIRONMENT. + */ + CUpti_ActivityKind kind; + + /** + * The ID of the device + */ + uint32_t deviceId; + + /** + * The timestamp when this sample was retrieved, in ns. A value of 0 + * indicates that timestamp information could not be collected for + * the marker. + */ + uint64_t timestamp; + + /** + * The kind of data reported in this record. + */ + CUpti_ActivityEnvironmentKind environmentKind; + + union { + /** + * Data returned for CUPTI_ACTIVITY_ENVIRONMENT_SPEED environment + * kind. + */ + struct { + /** + * The SM frequency in MHz + */ + uint32_t smClock; + + /** + * The memory frequency in MHz + */ + uint32_t memoryClock; + + /** + * The PCIe link generation. + */ + uint32_t pcieLinkGen; + + /** + * The PCIe link width. + */ + uint32_t pcieLinkWidth; + + /** + * The clocks throttle reasons. + */ + CUpti_EnvironmentClocksThrottleReason clocksThrottleReasons; + } speed; + + /** + * Data returned for CUPTI_ACTIVITY_ENVIRONMENT_TEMPERATURE + * environment kind. + */ + struct { + /** + * The GPU temperature in degrees C. + */ + uint32_t gpuTemperature; + } temperature; + + /** + * Data returned for CUPTI_ACTIVITY_ENVIRONMENT_POWER environment + * kind. + */ + struct { + /** + * The power in milliwatts consumed by GPU and associated + * circuitry. + */ + uint32_t power; + + /** + * The power in milliwatts that will trigger power management + * algorithm. + */ + uint32_t powerLimit; + } power; + + /** + * Data returned for CUPTI_ACTIVITY_ENVIRONMENT_COOLING + * environment kind. + */ + struct { + /** + * The fan speed as percentage of maximum. + */ + uint32_t fanSpeed; + } cooling; + } data; +} CUpti_ActivityEnvironment; + +/** + * \brief The activity record for source-level instruction execution. + * + * This activity records result for source level instruction execution. + * (CUPTI_ACTIVITY_KIND_INSTRUCTION_EXECUTION). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_INSTRUCTION_EXECUTION. + */ + CUpti_ActivityKind kind; + + /** + * The properties of this instruction execution. + */ + CUpti_ActivityFlag flags; + + /** + * The ID for source locator. + */ + uint32_t sourceLocatorId; + + /** + * The correlation ID of the kernel to which this result is associated. + */ + uint32_t correlationId; + + /** + * Correlation ID with global/device function name + */ + uint32_t functionId; + + /** + * The pc offset for the instruction. + */ + uint32_t pcOffset; + + /** + * This increments each time when this instruction is executed by number + * of threads that executed this instruction, regardless of predicate or condition code. + */ + uint64_t threadsExecuted; + + /** + * This increments each time when this instruction is executed by number + * of threads that executed this instruction with predicate and condition code evaluating to true. + */ + uint64_t notPredOffThreadsExecuted; + + /** + * The number of times this instruction was executed per warp. It will be incremented + * regardless of predicate or condition code. + */ + uint32_t executed; + + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +} CUpti_ActivityInstructionExecution; + +/** + * \brief The activity record for PC sampling. (deprecated in CUDA 8.0) + * + * This activity records information obtained by sampling PC + * (CUPTI_ACTIVITY_KIND_PC_SAMPLING). + * PC sampling activities are now reported using the + * CUpti_ActivityPCSampling2 activity record. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_PC_SAMPLING. + */ + CUpti_ActivityKind kind; + + /** + * The properties of this instruction. + */ + CUpti_ActivityFlag flags; + + /** + * The ID for source locator. + */ + uint32_t sourceLocatorId; + + /** + * The correlation ID of the kernel to which this result is associated. + */ + uint32_t correlationId; + + /** + * Correlation ID with global/device function name + */ + uint32_t functionId; + + /** + * The pc offset for the instruction. + */ + uint32_t pcOffset; + + /** + * Number of times the PC was sampled with the stallReason in the record. + * The same PC can be sampled with different stall reasons. + */ + uint32_t samples; + + /** + * Current stall reason. Includes one of the reasons from + * \ref CUpti_ActivityPCSamplingStallReason + */ + CUpti_ActivityPCSamplingStallReason stallReason; +} CUpti_ActivityPCSampling; + +/** + * \brief The activity record for PC sampling. (deprecated in CUDA 9.0) + * + * This activity records information obtained by sampling PC + * (CUPTI_ACTIVITY_KIND_PC_SAMPLING). + * PC sampling activities are now reported using the + * CUpti_ActivityPCSampling3 activity record. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_PC_SAMPLING. + */ + CUpti_ActivityKind kind; + + /** + * The properties of this instruction. + */ + CUpti_ActivityFlag flags; + + /** + * The ID for source locator. + */ + uint32_t sourceLocatorId; + + /** + * The correlation ID of the kernel to which this result is associated. + */ + uint32_t correlationId; + + /** + * Correlation ID with global/device function name + */ + uint32_t functionId; + + /** + * The pc offset for the instruction. + */ + uint32_t pcOffset; + + /** + * Number of times the PC was sampled with the stallReason in the record. + * These samples indicate that no instruction was issued in that cycle from + * the warp scheduler from where the warp was sampled. + * Field is valid for devices with compute capability 6.0 and higher + */ + uint32_t latencySamples; + + /** + * Number of times the PC was sampled with the stallReason in the record. + * The same PC can be sampled with different stall reasons. The count includes + * latencySamples. + */ + uint32_t samples; + + /** + * Current stall reason. Includes one of the reasons from + * \ref CUpti_ActivityPCSamplingStallReason + */ + CUpti_ActivityPCSamplingStallReason stallReason; + + uint32_t pad; +} CUpti_ActivityPCSampling2; + +/** + * \brief The activity record for PC sampling. + * + * This activity records information obtained by sampling PC + * (CUPTI_ACTIVITY_KIND_PC_SAMPLING). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_PC_SAMPLING. + */ + CUpti_ActivityKind kind; + + /** + * The properties of this instruction. + */ + CUpti_ActivityFlag flags; + + /** + * The ID for source locator. + */ + uint32_t sourceLocatorId; + + /** + * The correlation ID of the kernel to which this result is associated. + */ + uint32_t correlationId; + + /** + * Correlation ID with global/device function name + */ + uint32_t functionId; + + /** + * Number of times the PC was sampled with the stallReason in the record. + * These samples indicate that no instruction was issued in that cycle from + * the warp scheduler from where the warp was sampled. + * Field is valid for devices with compute capability 6.0 and higher + */ + uint32_t latencySamples; + + /** + * Number of times the PC was sampled with the stallReason in the record. + * The same PC can be sampled with different stall reasons. The count includes + * latencySamples. + */ + uint32_t samples; + + /** + * Current stall reason. Includes one of the reasons from + * \ref CUpti_ActivityPCSamplingStallReason + */ + CUpti_ActivityPCSamplingStallReason stallReason; + + /** + * The pc offset for the instruction. + */ + uint64_t pcOffset; +} CUpti_ActivityPCSampling3; + +/** + * \brief The activity record for record status for PC sampling. + * + * This activity records information obtained by sampling PC + * (CUPTI_ACTIVITY_KIND_PC_SAMPLING_RECORD_INFO). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_PC_SAMPLING_RECORD_INFO. + */ + CUpti_ActivityKind kind; + + /** + * The correlation ID of the kernel to which this result is associated. + */ + uint32_t correlationId; + + /** + * Number of times the PC was sampled for this kernel instance including all + * dropped samples. + */ + uint64_t totalSamples; + + /** + * Number of samples that were dropped by hardware due to backpressure/overflow. + */ + uint64_t droppedSamples; + /** + * Sampling period in terms of number of cycles . + */ + uint64_t samplingPeriodInCycles; +} CUpti_ActivityPCSamplingRecordInfo; + +/** + * \brief The activity record for Unified Memory counters (deprecated in CUDA 7.0) + * + * This activity record represents a Unified Memory counter + * (CUPTI_ACTIVITY_KIND_UNIFIED_MEMORY_COUNTER). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_UNIFIED_MEMORY_COUNTER + */ + CUpti_ActivityKind kind; + + /** + * The Unified Memory counter kind. See \ref CUpti_ActivityUnifiedMemoryCounterKind + */ + CUpti_ActivityUnifiedMemoryCounterKind counterKind; + + /** + * Scope of the Unified Memory counter. See \ref CUpti_ActivityUnifiedMemoryCounterScope + */ + CUpti_ActivityUnifiedMemoryCounterScope scope; + + /** + * The ID of the device involved in the memory transfer operation. + * It is not relevant if the scope of the counter is global (all devices). + */ + uint32_t deviceId; + + /** + * Value of the counter + * + */ + uint64_t value; + + /** + * The timestamp when this sample was retrieved, in ns. A value of 0 + * indicates that timestamp information could not be collected + */ + uint64_t timestamp; + + /** + * The ID of the process to which this record belongs to. In case of + * global scope, processId is undefined. + */ + uint32_t processId; + + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +} CUpti_ActivityUnifiedMemoryCounter; + +/** + * \brief The activity record for Unified Memory counters (CUDA 7.0 and beyond) + * + * This activity record represents a Unified Memory counter + * (CUPTI_ACTIVITY_KIND_UNIFIED_MEMORY_COUNTER). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_UNIFIED_MEMORY_COUNTER + */ + CUpti_ActivityKind kind; + + /** + * The Unified Memory counter kind + */ + CUpti_ActivityUnifiedMemoryCounterKind counterKind; + + /** + * Value of the counter + * For counterKind CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_HTOD, + * CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOH, + * CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_THREASHING and + * CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_REMOTE_MAP, it is the size of the + * memory region in bytes. + * For counterKind CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_GPU_PAGE_FAULT, it + * is the number of page fault groups for the same page. + * For counterKind CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_CPU_PAGE_FAULT_COUNT, + * it is the program counter for the instruction that caused fault. + */ + uint64_t value; + + /** + * The start timestamp of the counter, in ns. + * For counterKind CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_HTOD and + * CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOH, timestamp is + * captured when activity starts on GPU. + * For counterKind CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_GPU_PAGE_FAULT and + * CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_CPU_PAGE_FAULT_COUNT, timestamp is + * captured when CUDA driver started processing the fault. + * For counterKind CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_THRASHING, timestamp + * is captured when CUDA driver detected thrashing of memory region. + * For counterKind CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_THROTTLING, + * timestamp is captured when throttling opeeration was started by CUDA driver. + * For counterKind CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_REMOTE_MAP, + * timestamp is captured when CUDA driver has pushed all required operations + * to the processor specified by dstId. + */ + uint64_t start; + + /** + * The end timestamp of the counter, in ns. + * Ignore this field if counterKind is + * CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_CPU_PAGE_FAULT_COUNT or + * CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_THRASHING or + * CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_REMOTE_MAP. + * For counterKind CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_HTOD and + * CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOH, timestamp is + * captured when activity finishes on GPU. + * For counterKind CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_GPU_PAGE_FAULT, timestamp is + * captured when CUDA driver queues the replay of faulting memory accesses on the GPU + * For counterKind CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_THROTTLING, timestamp + * is captured when throttling operation was finished by CUDA driver + */ + uint64_t end; + + /** + * This is the virtual base address of the page/s being transferred. For cpu and + * gpu faults, the virtual address for the page that faulted. + */ + uint64_t address; + + /** + * The ID of the source CPU/device involved in the memory transfer, page fault, thrashing, + * throttling or remote map operation. For counterKind + * CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_THRASHING, it is a bitwise ORing of the + * device IDs fighting for the memory region. Ignore this field if counterKind is + * CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_CPU_PAGE_FAULT_COUNT + */ + uint32_t srcId; + + /** + * The ID of the destination CPU/device involved in the memory transfer or remote map + * operation. Ignore this field if counterKind is + * CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_GPU_PAGE_FAULT or + * CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_CPU_PAGE_FAULT_COUNT or + * CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_THRASHING or + * CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_THROTTLING + */ + uint32_t dstId; + + /** + * The ID of the stream causing the transfer. + * This value of this field is invalid. + */ + uint32_t streamId; + + /** + * The ID of the process to which this record belongs to. + */ + uint32_t processId; + + /** + * The flags associated with this record. See enums \ref CUpti_ActivityUnifiedMemoryAccessType + * if counterKind is CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_GPU_PAGE_FAULT + * and \ref CUpti_ActivityUnifiedMemoryMigrationCause if counterKind is + * CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_HTOD or + * CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_HTOD + * and \ref CUpti_ActivityUnifiedMemoryRemoteMapCause if counterKind is + * CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_REMOTE_MAP and \ref CUpti_ActivityFlag + * if counterKind is CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_THRASHING or + * CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_THROTTLING + */ + uint32_t flags; + + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +} CUpti_ActivityUnifiedMemoryCounter2; + +/** + * \brief The activity record for global/device functions. + * + * This activity records function name and corresponding module + * information. + * (CUPTI_ACTIVITY_KIND_FUNCTION). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_FUNCTION. + */ + CUpti_ActivityKind kind; + + /** + * ID to uniquely identify the record + */ + uint32_t id; + + /** + * The ID of the context where the function is launched. + */ + uint32_t contextId; + + /** + * The module ID in which this global/device function is present. + */ + uint32_t moduleId; + + /** + * The function's unique symbol index in the module. + */ + uint32_t functionIndex; + +#ifdef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +#endif + + /** + * The name of the function. This name is shared across all activity + * records representing the same kernel, and so should not be + * modified. + */ + const char *name; +} CUpti_ActivityFunction; + +/** + * \brief The activity record for a CUDA module. + * + * This activity record represents a CUDA module + * (CUPTI_ACTIVITY_KIND_MODULE). This activity record kind is not + * produced by the activity API but is included for completeness and + * ease-of-use. Profile frameworks built on top of CUPTI that collect + * module data from the module callback may choose to use this type to + * store the collected module data. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_MODULE. + */ + CUpti_ActivityKind kind; + + /** + * The ID of the context where the module is loaded. + */ + uint32_t contextId; + + /** + * The module ID. + */ + uint32_t id; + + /** + * The cubin size. + */ + uint32_t cubinSize; + +#ifndef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +#endif + + /** + * The pointer to cubin. + */ + const void *cubin; +} CUpti_ActivityModule; + +/** + * \brief The activity record for source-level shared + * access. + * + * This activity records the locations of the shared + * accesses in the source + * (CUPTI_ACTIVITY_KIND_SHARED_ACCESS). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_SHARED_ACCESS. + */ + CUpti_ActivityKind kind; + + /** + * The properties of this shared access. + */ + CUpti_ActivityFlag flags; + + /** + * The ID for source locator. + */ + uint32_t sourceLocatorId; + + /** + * The correlation ID of the kernel to which this result is associated. + */ + uint32_t correlationId; + + /** + * Correlation ID with global/device function name + */ + uint32_t functionId; + + /** + * The pc offset for the access. + */ + uint32_t pcOffset; + + /** + * This increments each time when this instruction is executed by number + * of threads that executed this instruction with predicate and condition code evaluating to true. + */ + uint64_t threadsExecuted; + + /** + * The total number of shared memory transactions generated by this access + */ + uint64_t sharedTransactions; + + /** + * The minimum number of shared memory transactions possible based on the access pattern. + */ + uint64_t theoreticalSharedTransactions; + + /** + * The number of times this instruction was executed per warp. It will be incremented + * when at least one of thread among warp is active with predicate and condition code + * evaluating to true. + */ + uint32_t executed; + + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +} CUpti_ActivitySharedAccess; + +/** + * \brief The activity record for CUDA event. + * + * This activity is used to track recorded events. + * (CUPTI_ACTIVITY_KIND_CUDA_EVENT). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_CUDA_EVENT. + */ + CUpti_ActivityKind kind; + + /** + * The correlation ID of the API to which this result is associated. + */ + uint32_t correlationId; + + /** + * The ID of the context where the event was recorded. + */ + uint32_t contextId; + + /** + * The compute stream where the event was recorded. + */ + uint32_t streamId; + + /** + * A unique event ID to identify the event record. + */ + uint32_t eventId; + + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +} CUpti_ActivityCudaEvent; + +/** + * \brief The activity record for CUDA stream. + * + * This activity is used to track created streams. + * (CUPTI_ACTIVITY_KIND_STREAM). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_STREAM. + */ + CUpti_ActivityKind kind; + /** + * The ID of the context where the stream was created. + */ + uint32_t contextId; + + /** + * A unique stream ID to identify the stream. + */ + uint32_t streamId; + + /** + * The clamped priority for the stream. + */ + uint32_t priority; + + /** + * Flags associated with the stream. + */ + CUpti_ActivityStreamFlag flag; + + /** + * The correlation ID of the API to which this result is associated. + */ + uint32_t correlationId; +} CUpti_ActivityStream; + +/** + * \brief The activity record for synchronization management. + * + * This activity is used to track various CUDA synchronization APIs. + * (CUPTI_ACTIVITY_KIND_SYNCHRONIZATION). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_SYNCHRONIZATION. + */ + CUpti_ActivityKind kind; + + /** + * The type of record. + */ + CUpti_ActivitySynchronizationType type; + + /** + * The start timestamp for the function, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the function. + */ + uint64_t start; + + /** + * The end timestamp for the function, in ns. A value of 0 for both + * the start and end timestamps indicates that timestamp information + * could not be collected for the function. + */ + uint64_t end; + + /** + * The correlation ID of the API to which this result is associated. + */ + uint32_t correlationId; + + /** + * The ID of the context for which the synchronization API is called. + * In case of context synchronization API it is the context id for which the API is called. + * In case of stream/event synchronization it is the ID of the context where the stream/event was created. + */ + uint32_t contextId; + + /** + * The compute stream for which the synchronization API is called. + * A CUPTI_SYNCHRONIZATION_INVALID_VALUE value indicate the field is not applicable for this record. + * Not valid for cuCtxSynchronize, cuEventSynchronize. + */ + uint32_t streamId; + + /** + * The event ID for which the synchronization API is called. + * A CUPTI_SYNCHRONIZATION_INVALID_VALUE value indicate the field is not applicable for this record. + * Not valid for cuCtxSynchronize, cuStreamSynchronize. + */ + uint32_t cudaEventId; +} CUpti_ActivitySynchronization; + + +/** + * \brief The activity record for source-level sass/source + * line-by-line correlation. + * + * This activity records source level sass/source correlation + * information. + * (CUPTI_ACTIVITY_KIND_INSTRUCTION_CORRELATION). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_INSTRUCTION_CORRELATION. + */ + CUpti_ActivityKind kind; + + /** + * The properties of this instruction. + */ + CUpti_ActivityFlag flags; + + /** + * The ID for source locator. + */ + uint32_t sourceLocatorId; + + /** + * Correlation ID with global/device function name + */ + uint32_t functionId; + + /** + * The pc offset for the instruction. + */ + uint32_t pcOffset; + + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad; +} CUpti_ActivityInstructionCorrelation; + +/** + * \brief The OpenAcc event kind for OpenAcc activity records. + * + * \see CUpti_ActivityKindOpenAcc + */ +typedef enum { + CUPTI_OPENACC_EVENT_KIND_INVALID = 0, + CUPTI_OPENACC_EVENT_KIND_DEVICE_INIT = 1, + CUPTI_OPENACC_EVENT_KIND_DEVICE_SHUTDOWN = 2, + CUPTI_OPENACC_EVENT_KIND_RUNTIME_SHUTDOWN = 3, + CUPTI_OPENACC_EVENT_KIND_ENQUEUE_LAUNCH = 4, + CUPTI_OPENACC_EVENT_KIND_ENQUEUE_UPLOAD = 5, + CUPTI_OPENACC_EVENT_KIND_ENQUEUE_DOWNLOAD = 6, + CUPTI_OPENACC_EVENT_KIND_WAIT = 7, + CUPTI_OPENACC_EVENT_KIND_IMPLICIT_WAIT = 8, + CUPTI_OPENACC_EVENT_KIND_COMPUTE_CONSTRUCT = 9, + CUPTI_OPENACC_EVENT_KIND_UPDATE = 10, + CUPTI_OPENACC_EVENT_KIND_ENTER_DATA = 11, + CUPTI_OPENACC_EVENT_KIND_EXIT_DATA = 12, + CUPTI_OPENACC_EVENT_KIND_CREATE = 13, + CUPTI_OPENACC_EVENT_KIND_DELETE = 14, + CUPTI_OPENACC_EVENT_KIND_ALLOC = 15, + CUPTI_OPENACC_EVENT_KIND_FREE = 16, + CUPTI_OPENACC_EVENT_KIND_FORCE_INT = 0x7fffffff +} CUpti_OpenAccEventKind; + +/** + * \brief The OpenAcc parent construct kind for OpenAcc activity records. + */ +typedef enum { + CUPTI_OPENACC_CONSTRUCT_KIND_UNKNOWN = 0, + CUPTI_OPENACC_CONSTRUCT_KIND_PARALLEL = 1, + CUPTI_OPENACC_CONSTRUCT_KIND_KERNELS = 2, + CUPTI_OPENACC_CONSTRUCT_KIND_LOOP = 3, + CUPTI_OPENACC_CONSTRUCT_KIND_DATA = 4, + CUPTI_OPENACC_CONSTRUCT_KIND_ENTER_DATA = 5, + CUPTI_OPENACC_CONSTRUCT_KIND_EXIT_DATA = 6, + CUPTI_OPENACC_CONSTRUCT_KIND_HOST_DATA = 7, + CUPTI_OPENACC_CONSTRUCT_KIND_ATOMIC = 8, + CUPTI_OPENACC_CONSTRUCT_KIND_DECLARE = 9, + CUPTI_OPENACC_CONSTRUCT_KIND_INIT = 10, + CUPTI_OPENACC_CONSTRUCT_KIND_SHUTDOWN = 11, + CUPTI_OPENACC_CONSTRUCT_KIND_SET = 12, + CUPTI_OPENACC_CONSTRUCT_KIND_UPDATE = 13, + CUPTI_OPENACC_CONSTRUCT_KIND_ROUTINE = 14, + CUPTI_OPENACC_CONSTRUCT_KIND_WAIT = 15, + CUPTI_OPENACC_CONSTRUCT_KIND_RUNTIME_API = 16, + CUPTI_OPENACC_CONSTRUCT_KIND_FORCE_INT = 0x7fffffff + +} CUpti_OpenAccConstructKind; + +typedef enum { + CUPTI_OPENMP_EVENT_KIND_INVALID = 0, + CUPTI_OPENMP_EVENT_KIND_PARALLEL = 1, + CUPTI_OPENMP_EVENT_KIND_TASK = 2, + CUPTI_OPENMP_EVENT_KIND_THREAD = 3, + CUPTI_OPENMP_EVENT_KIND_IDLE = 4, + CUPTI_OPENMP_EVENT_KIND_WAIT_BARRIER = 5, + CUPTI_OPENMP_EVENT_KIND_WAIT_TASKWAIT = 6, + CUPTI_OPENMP_EVENT_KIND_FORCE_INT = 0x7fffffff +} CUpti_OpenMpEventKind; + +/** + * \brief The base activity record for OpenAcc records. + * + * The OpenACC activity API part uses a CUpti_ActivityOpenAcc as a generic + * representation for any OpenACC activity. The 'kind' field is used to determine the + * specific activity kind, and from that the CUpti_ActivityOpenAcc object can + * be cast to the specific OpenACC activity record type appropriate for that kind. + * + * Note that all OpenACC activity record types are padded and aligned to + * ensure that each member of the record is naturally aligned. + * + * \see CUpti_ActivityKind + */ +typedef struct PACKED_ALIGNMENT { + /** + * The kind of this activity. + */ + CUpti_ActivityKind kind; + + /** + * CUPTI OpenACC event kind (\see CUpti_OpenAccEventKind) + */ + CUpti_OpenAccEventKind eventKind; + + /** + * CUPTI OpenACC parent construct kind (\see CUpti_OpenAccConstructKind) + * + * Note that for applications using PGI OpenACC runtime < 16.1, this + * will always be CUPTI_OPENACC_CONSTRUCT_KIND_UNKNOWN. + */ + CUpti_OpenAccConstructKind parentConstruct; + + /** + * Version number + */ + uint32_t version; + + /** + * 1 for any implicit event, such as an implicit wait at a synchronous data construct + * 0 otherwise + */ + uint32_t implicit; + + /** + * Device type + */ + uint32_t deviceType; + + /** + * Device number + */ + uint32_t deviceNumber; + + /** + * ThreadId + */ + uint32_t threadId; + + /** + * Value of async() clause of the corresponding directive + */ + uint64_t async; + + /** + * Internal asynchronous queue number used + */ + uint64_t asyncMap; + + /** + * The line number of the directive or program construct or the starting line + * number of the OpenACC construct corresponding to the event. + * A zero value means the line number is not known. + */ + uint32_t lineNo; + + /** + * For an OpenACC construct, this contains the line number of the end + * of the construct. A zero value means the line number is not known. + */ + uint32_t endLineNo; + + /** + * The line number of the first line of the function named in funcName. + * A zero value means the line number is not known. + */ + uint32_t funcLineNo; + + /** + * The last line number of the function named in funcName. + * A zero value means the line number is not known. + */ + uint32_t funcEndLineNo; + + /** + * CUPTI start timestamp + */ + uint64_t start; + + /** + * CUPTI end timestamp + */ + uint64_t end; + + /** + * CUDA device id + * Valid only if deviceType is acc_device_nvidia. + */ + uint32_t cuDeviceId; + + /** + * CUDA context id + * Valid only if deviceType is acc_device_nvidia. + */ + uint32_t cuContextId; + + /** + * CUDA stream id + * Valid only if deviceType is acc_device_nvidia. + */ + uint32_t cuStreamId; + + /** + * The ID of the process where the OpenACC activity is executing. + */ + uint32_t cuProcessId; + + /** + * The ID of the thread where the OpenACC activity is executing. + */ + uint32_t cuThreadId; + + /** + * The OpenACC correlation ID. + * Valid only if deviceType is acc_device_nvidia. + * If not 0, it uniquely identifies this record. It is identical to the + * externalId in the preceeding external correlation record of type + * CUPTI_EXTERNAL_CORRELATION_KIND_OPENACC. + */ + uint32_t externalId; + + /* + * A pointer to null-terminated string containing the name of or path to + * the source file, if known, or a null pointer if not. + */ + const char *srcFile; + + /* + * A pointer to a null-terminated string containing the name of the + * function in which the event occurred. + */ + const char *funcName; +} CUpti_ActivityOpenAcc; + +/** + * \brief The activity record for OpenACC data. + * + * (CUPTI_ACTIVITY_KIND_OPENACC_DATA). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_OPENACC_DATA. + */ + CUpti_ActivityKind kind; + + /** + * CUPTI OpenACC event kind (\see CUpti_OpenAccEventKind) + */ + CUpti_OpenAccEventKind eventKind; + + /* + * CUPTI OpenACC parent construct kind (\see CUpti_OpenAccConstructKind) + * + * Note that for applications using PGI OpenACC runtime < 16.1, this + * will always be CUPTI_OPENACC_CONSTRUCT_KIND_UNKNOWN. + */ + CUpti_OpenAccConstructKind parentConstruct; + + /* + * Version number + */ + uint32_t version; + + /* + * 1 for any implicit event, such as an implicit wait at a synchronous data construct + * 0 otherwise + */ + uint32_t implicit; + + /* + * Device type + */ + uint32_t deviceType; + + /* + * Device number + */ + uint32_t deviceNumber; + + /** + * ThreadId + */ + uint32_t threadId; + + /* + * Value of async() clause of the corresponding directive + */ + uint64_t async; + + /* + * Internal asynchronous queue number used + */ + uint64_t asyncMap; + + /* + * The line number of the directive or program construct or the starting line + * number of the OpenACC construct corresponding to the event. + * A negative or zero value means the line number is not known. + */ + uint32_t lineNo; + + /* + * For an OpenACC construct, this contains the line number of the end + * of the construct. A negative or zero value means the line number is not known. + */ + uint32_t endLineNo; + + /* + * The line number of the first line of the function named in func_name. + * A negative or zero value means the line number is not known. + */ + uint32_t funcLineNo; + + /* + * The last line number of the function named in func_name. + * A negative or zero value means the line number is not known. + */ + uint32_t funcEndLineNo; + + /** + * CUPTI start timestamp + */ + uint64_t start; + + /** + * CUPTI end timestamp + */ + uint64_t end; + + /** + * CUDA device id + * Valid only if deviceType is acc_device_nvidia. + */ + uint32_t cuDeviceId; + + /** + * CUDA context id + * Valid only if deviceType is acc_device_nvidia. + */ + uint32_t cuContextId; + + /** + * CUDA stream id + * Valid only if deviceType is acc_device_nvidia. + */ + uint32_t cuStreamId; + + /** + * The ID of the process where the OpenACC activity is executing. + */ + uint32_t cuProcessId; + + /** + * The ID of the thread where the OpenACC activity is executing. + */ + uint32_t cuThreadId; + + /** + * The OpenACC correlation ID. + * Valid only if deviceType is acc_device_nvidia. + * If not 0, it uniquely identifies this record. It is identical to the + * externalId in the preceeding external correlation record of type + * CUPTI_EXTERNAL_CORRELATION_KIND_OPENACC. + */ + uint32_t externalId; + + /* + * A pointer to null-terminated string containing the name of or path to + * the source file, if known, or a null pointer if not. + */ + const char *srcFile; + + /* + * A pointer to a null-terminated string containing the name of the + * function in which the event occurred. + */ + const char *funcName; + + /* --- end of common CUpti_ActivityOpenAcc part --- */ + + /** + * Number of bytes + */ + uint64_t bytes; + + /** + * Host pointer if available + */ + uint64_t hostPtr; + + /** + * Device pointer if available + */ + uint64_t devicePtr; + +#ifndef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad1; +#endif + + /* + * A pointer to null-terminated string containing the name of the variable + * for which this event is triggered, if known, or a null pointer if not. + */ + const char *varName; + +} CUpti_ActivityOpenAccData; + +/** + * \brief The activity record for OpenACC launch. + * + * (CUPTI_ACTIVITY_KIND_OPENACC_LAUNCH). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_OPENACC_LAUNCH. + */ + CUpti_ActivityKind kind; + + /** + * CUPTI OpenACC event kind (\see CUpti_OpenAccEventKind) + */ + CUpti_OpenAccEventKind eventKind; + + /** + * CUPTI OpenACC parent construct kind (\see CUpti_OpenAccConstructKind) + * + * Note that for applications using PGI OpenACC runtime < 16.1, this + * will always be CUPTI_OPENACC_CONSTRUCT_KIND_UNKNOWN. + */ + CUpti_OpenAccConstructKind parentConstruct; + + /** + * Version number + */ + uint32_t version; + + /** + * 1 for any implicit event, such as an implicit wait at a synchronous data construct + * 0 otherwise + */ + uint32_t implicit; + + /** + * Device type + */ + uint32_t deviceType; + + /** + * Device number + */ + uint32_t deviceNumber; + + /** + * ThreadId + */ + uint32_t threadId; + + /** + * Value of async() clause of the corresponding directive + */ + uint64_t async; + + /** + * Internal asynchronous queue number used + */ + uint64_t asyncMap; + + /** + * The line number of the directive or program construct or the starting line + * number of the OpenACC construct corresponding to the event. + * A negative or zero value means the line number is not known. + */ + uint32_t lineNo; + + /** + * For an OpenACC construct, this contains the line number of the end + * of the construct. A negative or zero value means the line number is not known. + */ + uint32_t endLineNo; + + /** + * The line number of the first line of the function named in func_name. + * A negative or zero value means the line number is not known. + */ + uint32_t funcLineNo; + + /** + * The last line number of the function named in func_name. + * A negative or zero value means the line number is not known. + */ + uint32_t funcEndLineNo; + + /** + * CUPTI start timestamp + */ + uint64_t start; + + /** + * CUPTI end timestamp + */ + uint64_t end; + + /** + * CUDA device id + * Valid only if deviceType is acc_device_nvidia. + */ + uint32_t cuDeviceId; + + /** + * CUDA context id + * Valid only if deviceType is acc_device_nvidia. + */ + uint32_t cuContextId; + + /** + * CUDA stream id + * Valid only if deviceType is acc_device_nvidia. + */ + uint32_t cuStreamId; + + /** + * The ID of the process where the OpenACC activity is executing. + */ + uint32_t cuProcessId; + + /** + * The ID of the thread where the OpenACC activity is executing. + */ + uint32_t cuThreadId; + + /** + * The OpenACC correlation ID. + * Valid only if deviceType is acc_device_nvidia. + * If not 0, it uniquely identifies this record. It is identical to the + * externalId in the preceeding external correlation record of type + * CUPTI_EXTERNAL_CORRELATION_KIND_OPENACC. + */ + uint32_t externalId; + + /** + * A pointer to null-terminated string containing the name of or path to + * the source file, if known, or a null pointer if not. + */ + const char *srcFile; + + /** + * A pointer to a null-terminated string containing the name of the + * function in which the event occurred. + */ + const char *funcName; + + /* --- end of common CUpti_ActivityOpenAcc part --- */ + + /** + * The number of gangs created for this kernel launch + */ + uint64_t numGangs; + + /** + * The number of workers created for this kernel launch + */ + uint64_t numWorkers; + + /** + * The number of vector lanes created for this kernel launch + */ + uint64_t vectorLength; + +#ifndef CUPTILP64 + /** + * Undefined. Reserved for internal use. + */ + uint32_t pad1; +#endif + + /** + * A pointer to null-terminated string containing the name of the + * kernel being launched, if known, or a null pointer if not. + */ + const char *kernelName; + +} CUpti_ActivityOpenAccLaunch; + +/** + * \brief The activity record for OpenACC other. + * + * (CUPTI_ACTIVITY_KIND_OPENACC_OTHER). + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_OPENACC_OTHER. + */ + CUpti_ActivityKind kind; + + /** + * CUPTI OpenACC event kind (\see CUpti_OpenAccEventKind) + */ + CUpti_OpenAccEventKind eventKind; + + /** + * CUPTI OpenACC parent construct kind (\see CUpti_OpenAccConstructKind) + * + * Note that for applications using PGI OpenACC runtime < 16.1, this + * will always be CUPTI_OPENACC_CONSTRUCT_KIND_UNKNOWN. + */ + CUpti_OpenAccConstructKind parentConstruct; + + /** + * Version number + */ + uint32_t version; + + /** + * 1 for any implicit event, such as an implicit wait at a synchronous data construct + * 0 otherwise + */ + uint32_t implicit; + + /** + * Device type + */ + uint32_t deviceType; + + /** + * Device number + */ + uint32_t deviceNumber; + + /** + * ThreadId + */ + uint32_t threadId; + + /** + * Value of async() clause of the corresponding directive + */ + uint64_t async; + + /** + * Internal asynchronous queue number used + */ + uint64_t asyncMap; + + /** + * The line number of the directive or program construct or the starting line + * number of the OpenACC construct corresponding to the event. + * A negative or zero value means the line number is not known. + */ + uint32_t lineNo; + + /** + * For an OpenACC construct, this contains the line number of the end + * of the construct. A negative or zero value means the line number is not known. + */ + uint32_t endLineNo; + + /** + * The line number of the first line of the function named in func_name. + * A negative or zero value means the line number is not known. + */ + uint32_t funcLineNo; + + /** + * The last line number of the function named in func_name. + * A negative or zero value means the line number is not known. + */ + uint32_t funcEndLineNo; + + /** + * CUPTI start timestamp + */ + uint64_t start; + + /** + * CUPTI end timestamp + */ + uint64_t end; + + /** + * CUDA device id + * Valid only if deviceType is acc_device_nvidia. + */ + uint32_t cuDeviceId; + + /** + * CUDA context id + * Valid only if deviceType is acc_device_nvidia. + */ + uint32_t cuContextId; + + /** + * CUDA stream id + * Valid only if deviceType is acc_device_nvidia. + */ + uint32_t cuStreamId; + + /** + * The ID of the process where the OpenACC activity is executing. + */ + uint32_t cuProcessId; + + /** + * The ID of the thread where the OpenACC activity is executing. + */ + uint32_t cuThreadId; + + /** + * The OpenACC correlation ID. + * Valid only if deviceType is acc_device_nvidia. + * If not 0, it uniquely identifies this record. It is identical to the + * externalId in the preceeding external correlation record of type + * CUPTI_EXTERNAL_CORRELATION_KIND_OPENACC. + */ + uint32_t externalId; + + /** + * A pointer to null-terminated string containing the name of or path to + * the source file, if known, or a null pointer if not. + */ + const char *srcFile; + + /** + * A pointer to a null-terminated string containing the name of the + * function in which the event occurred. + */ + const char *funcName; + + /* --- end of common CUpti_ActivityOpenAcc part --- */ +} CUpti_ActivityOpenAccOther; + + +/** + * \brief The base activity record for OpenMp records. + * + * \see CUpti_ActivityKind + */ +typedef struct PACKED_ALIGNMENT { + + /** + * The kind of this activity. + */ + CUpti_ActivityKind kind; + + /** + * CUPTI OpenMP event kind (\see CUpti_OpenMpEventKind) + */ + CUpti_OpenMpEventKind eventKind; + + /** + * Version number + */ + uint32_t version; + + /** + * ThreadId + */ + uint32_t threadId; + + /** + * CUPTI start timestamp + */ + uint64_t start; + + /** + * CUPTI end timestamp + */ + uint64_t end; + + /** + * The ID of the process where the OpenMP activity is executing. + */ + uint32_t cuProcessId; + + /** + * The ID of the thread where the OpenMP activity is executing. + */ + uint32_t cuThreadId; +} CUpti_ActivityOpenMp; + +/** + * \brief The kind of external APIs supported for correlation. + * + * Custom correlation kinds are reserved for usage in external tools. + * + * \see CUpti_ActivityExternalCorrelation + */ +typedef enum { + CUPTI_EXTERNAL_CORRELATION_KIND_INVALID = 0, + + /** + * The external API is unknown to CUPTI + */ + CUPTI_EXTERNAL_CORRELATION_KIND_UNKNOWN = 1, + + /** + * The external API is OpenACC + */ + CUPTI_EXTERNAL_CORRELATION_KIND_OPENACC = 2, + + /** + * The external API is custom0 + */ + CUPTI_EXTERNAL_CORRELATION_KIND_CUSTOM0 = 3, + + /** + * The external API is custom1 + */ + CUPTI_EXTERNAL_CORRELATION_KIND_CUSTOM1 = 4, + + /** + * The external API is custom2 + */ + CUPTI_EXTERNAL_CORRELATION_KIND_CUSTOM2 = 5, + + /** + * Add new kinds before this line + */ + CUPTI_EXTERNAL_CORRELATION_KIND_SIZE, + + CUPTI_EXTERNAL_CORRELATION_KIND_FORCE_INT = 0x7fffffff +} CUpti_ExternalCorrelationKind; + +/** + * \brief The activity record for correlation with external records + * + * This activity record correlates native CUDA records (e.g. CUDA Driver API, + * kernels, memcpys, ...) with records from external APIs such as OpenACC. + * (CUPTI_ACTIVITY_KIND_EXTERNAL_CORRELATION). + * + * \see CUpti_ActivityKind + */ +typedef struct PACKED_ALIGNMENT { + /** + * The kind of this activity. + */ + CUpti_ActivityKind kind; + + /** + * The kind of external API this record correlated to. + */ + CUpti_ExternalCorrelationKind externalKind; + + /** + * The correlation ID of the associated non-CUDA API record. + * The exact field in the associated external record depends + * on that record's activity kind (\see externalKind). + */ + uint64_t externalId; + + /** + * The correlation ID of the associated CUDA driver or runtime API record. + */ + uint32_t correlationId; + + /** + * Undefined. Reserved for internal use. + */ + uint32_t reserved; +} CUpti_ActivityExternalCorrelation; + +/** +* \brief The device type for device connected to NVLink. +*/ +typedef enum { + CUPTI_DEV_TYPE_INVALID = 0, + + /** + * The device type is GPU. + */ + CUPTI_DEV_TYPE_GPU = 1, + + /** + * The device type is NVLink processing unit in CPU. + */ + CUPTI_DEV_TYPE_NPU = 2, + + CUPTI_DEV_TYPE_FORCE_INT = 0x7fffffff +} CUpti_DevType; + +/** +* \brief NVLink information. (deprecated in CUDA 9.0) +* +* This structure gives capabilities of each logical NVLink connection between two devices, +* gpu<->gpu or gpu<->CPU which can be used to understand the topology. +* NVLink information are now reported using the +* CUpti_ActivityNvLink2 activity record. +*/ + +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_NVLINK. + */ + CUpti_ActivityKind kind; + + /** + * NVLink version. + */ + uint32_t nvlinkVersion; + + /** + * Type of device 0 \ref CUpti_DevType + */ + CUpti_DevType typeDev0; + + /** + * Type of device 1 \ref CUpti_DevType + */ + CUpti_DevType typeDev1; + + /** + * If typeDev0 is CUPTI_DEV_TYPE_GPU, UUID for device 0. \ref CUpti_ActivityDevice4. + * If typeDev0 is CUPTI_DEV_TYPE_NPU, struct npu for NPU. + */ + union { + CUuuid uuidDev; + struct { + /** + * Index of the NPU. First index will always be zero. + */ + uint32_t index; + + /** + * Domain ID of NPU. On Linux, this can be queried using lspci. + */ + uint32_t domainId; + } npu; + } idDev0; + + /** + * If typeDev1 is CUPTI_DEV_TYPE_GPU, UUID for device 1. \ref CUpti_ActivityDevice4. + * If typeDev1 is CUPTI_DEV_TYPE_NPU, struct npu for NPU. + */ + union { + CUuuid uuidDev; + struct { + /** + * Index of the NPU. First index will always be zero. + */ + uint32_t index; + + /** + * Domain ID of NPU. On Linux, this can be queried using lspci. + */ + uint32_t domainId; + } npu; + } idDev1; + + /** + * Flag gives capabilities of the link \see CUpti_LinkFlag + */ + uint32_t flag; + + /** + * Number of physical NVLinks present between two devices. + */ + uint32_t physicalNvLinkCount; + + /** + * Port numbers for maximum 4 NVLinks connected to device 0. + * If typeDev0 is CUPTI_DEV_TYPE_NPU, ignore this field. + * In case of invalid/unknown port number, this field will be set + * to value CUPTI_NVLINK_INVALID_PORT. + * This will be used to correlate the metric values to individual + * physical link and attribute traffic to the logical NVLink in + * the topology. + */ + int8_t portDev0[4]; + + /** + * Port numbers for maximum 4 NVLinks connected to device 1. + * If typeDev1 is CUPTI_DEV_TYPE_NPU, ignore this field. + * In case of invalid/unknown port number, this field will be set + * to value CUPTI_NVLINK_INVALID_PORT. + * This will be used to correlate the metric values to individual + * physical link and attribute traffic to the logical NVLink in + * the topology. + */ + int8_t portDev1[4]; + + /** + * Banwidth of NVLink in kbytes/sec + */ + uint64_t bandwidth; +} CUpti_ActivityNvLink; + +/** +* \brief NVLink information. (deprecated in CUDA 10.0) +* +* This structure gives capabilities of each logical NVLink connection between two devices, +* gpu<->gpu or gpu<->CPU which can be used to understand the topology. +* NvLink information are now reported using the +* CUpti_ActivityNvLink4 activity record. +*/ + +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_NVLINK. + */ + CUpti_ActivityKind kind; + + /** + * NvLink version. + */ + uint32_t nvlinkVersion; + + /** + * Type of device 0 \ref CUpti_DevType + */ + CUpti_DevType typeDev0; + + /** + * Type of device 1 \ref CUpti_DevType + */ + CUpti_DevType typeDev1; + + /** + * If typeDev0 is CUPTI_DEV_TYPE_GPU, UUID for device 0. \ref CUpti_ActivityDevice4. + * If typeDev0 is CUPTI_DEV_TYPE_NPU, struct npu for NPU. + */ + union { + CUuuid uuidDev; + struct { + /** + * Index of the NPU. First index will always be zero. + */ + uint32_t index; + + /** + * Domain ID of NPU. On Linux, this can be queried using lspci. + */ + uint32_t domainId; + } npu; + } idDev0; + + /** + * If typeDev1 is CUPTI_DEV_TYPE_GPU, UUID for device 1. \ref CUpti_ActivityDevice4. + * If typeDev1 is CUPTI_DEV_TYPE_NPU, struct npu for NPU. + */ + union { + CUuuid uuidDev; + struct { + /** + * Index of the NPU. First index will always be zero. + */ + uint32_t index; + + /** + * Domain ID of NPU. On Linux, this can be queried using lspci. + */ + uint32_t domainId; + } npu; + } idDev1; + + /** + * Flag gives capabilities of the link \see CUpti_LinkFlag + */ + uint32_t flag; + + /** + * Number of physical NVLinks present between two devices. + */ + uint32_t physicalNvLinkCount; + + /** + * Port numbers for maximum 16 NVLinks connected to device 0. + * If typeDev0 is CUPTI_DEV_TYPE_NPU, ignore this field. + * In case of invalid/unknown port number, this field will be set + * to value CUPTI_NVLINK_INVALID_PORT. + * This will be used to correlate the metric values to individual + * physical link and attribute traffic to the logical NVLink in + * the topology. + */ + int8_t portDev0[CUPTI_MAX_NVLINK_PORTS]; + + /** + * Port numbers for maximum 16 NVLinks connected to device 1. + * If typeDev1 is CUPTI_DEV_TYPE_NPU, ignore this field. + * In case of invalid/unknown port number, this field will be set + * to value CUPTI_NVLINK_INVALID_PORT. + * This will be used to correlate the metric values to individual + * physical link and attribute traffic to the logical NVLink in + * the topology. + */ + int8_t portDev1[CUPTI_MAX_NVLINK_PORTS]; + + /** + * Banwidth of NVLink in kbytes/sec + */ + uint64_t bandwidth; +} CUpti_ActivityNvLink2; + +/** +* \brief NVLink information. +* +* This structure gives capabilities of each logical NVLink connection between two devices, +* gpu<->gpu or gpu<->CPU which can be used to understand the topology. +* NvLink information are now reported using the +* CUpti_ActivityNvLink4 activity record. +*/ + +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_NVLINK. + */ + CUpti_ActivityKind kind; + /** + * NvLink version. + */ + uint32_t nvlinkVersion; + + /** + * Type of device 0 \ref CUpti_DevType + */ + CUpti_DevType typeDev0; + + /** + * Type of device 1 \ref CUpti_DevType + */ + CUpti_DevType typeDev1; + + /** + * If typeDev0 is CUPTI_DEV_TYPE_GPU, UUID for device 0. \ref CUpti_ActivityDevice4. + * If typeDev0 is CUPTI_DEV_TYPE_NPU, struct npu for NPU. + */ + union { + CUuuid uuidDev; + struct { + /** + * Index of the NPU. First index will always be zero. + */ + uint32_t index; + + /** + * Domain ID of NPU. On Linux, this can be queried using lspci. + */ + uint32_t domainId; + } npu; + } idDev0; + + /** + * If typeDev1 is CUPTI_DEV_TYPE_GPU, UUID for device 1. \ref CUpti_ActivityDevice4. + * If typeDev1 is CUPTI_DEV_TYPE_NPU, struct npu for NPU. + */ + union { + CUuuid uuidDev; + struct { + /** + * Index of the NPU. First index will always be zero. + */ + uint32_t index; + + /** + * Domain ID of NPU. On Linux, this can be queried using lspci. + */ + uint32_t domainId; + } npu; + } idDev1; + + /** + * Flag gives capabilities of the link \see CUpti_LinkFlag + */ + uint32_t flag; + + /** + * Number of physical NVLinks present between two devices. + */ + uint32_t physicalNvLinkCount; + + /** + * Port numbers for maximum 16 NVLinks connected to device 0. + * If typeDev0 is CUPTI_DEV_TYPE_NPU, ignore this field. + * In case of invalid/unknown port number, this field will be set + * to value CUPTI_NVLINK_INVALID_PORT. + * This will be used to correlate the metric values to individual + * physical link and attribute traffic to the logical NVLink in + * the topology. + */ + int8_t portDev0[CUPTI_MAX_NVLINK_PORTS]; + + /** + * Port numbers for maximum 16 NVLinks connected to device 1. + * If typeDev1 is CUPTI_DEV_TYPE_NPU, ignore this field. + * In case of invalid/unknown port number, this field will be set + * to value CUPTI_NVLINK_INVALID_PORT. + * This will be used to correlate the metric values to individual + * physical link and attribute traffic to the logical NVLink in + * the topology. + */ + int8_t portDev1[CUPTI_MAX_NVLINK_PORTS]; + + /** + * Banwidth of NVLink in kbytes/sec + */ + uint64_t bandwidth; + + /** + * NVSwitch is connected as an intermediate node. + */ + uint8_t nvswitchConnected; + + /** + * Undefined. reserved for internal use + */ + uint8_t pad[7]; +} CUpti_ActivityNvLink3; + +/** +* \brief NVLink information. +* +* This structure gives capabilities of each logical NVLink connection between two devices, +* gpu<->gpu or gpu<->CPU which can be used to understand the topology. +*/ + +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_NVLINK. + */ + CUpti_ActivityKind kind; + + /** + * NvLink version. + */ + uint32_t nvlinkVersion; + + /** + * Type of device 0 \ref CUpti_DevType + */ + CUpti_DevType typeDev0; + + /** + * Type of device 1 \ref CUpti_DevType + */ + CUpti_DevType typeDev1; + + /** + * If typeDev0 is CUPTI_DEV_TYPE_GPU, UUID for device 0. \ref CUpti_ActivityDevice4. + * If typeDev0 is CUPTI_DEV_TYPE_NPU, struct npu for NPU. + */ + union { + CUuuid uuidDev; + struct { + /** + * Index of the NPU. First index will always be zero. + */ + uint32_t index; + + /** + * Domain ID of NPU. On Linux, this can be queried using lspci. + */ + uint32_t domainId; + } npu; + } idDev0; + + /** + * If typeDev1 is CUPTI_DEV_TYPE_GPU, UUID for device 1. \ref CUpti_ActivityDevice4. + * If typeDev1 is CUPTI_DEV_TYPE_NPU, struct npu for NPU. + */ + union { + CUuuid uuidDev; + struct { + + /** + * Index of the NPU. First index will always be zero. + */ + uint32_t index; + + /** + * Domain ID of NPU. On Linux, this can be queried using lspci. + */ + uint32_t domainId; + } npu; + } idDev1; + + /** + * Flag gives capabilities of the link \see CUpti_LinkFlag + */ + uint32_t flag; + + /** + * Number of physical NVLinks present between two devices. + */ + uint32_t physicalNvLinkCount; + + /** + * Port numbers for maximum 32 NVLinks connected to device 0. + * If typeDev0 is CUPTI_DEV_TYPE_NPU, ignore this field. + * In case of invalid/unknown port number, this field will be set + * to value CUPTI_NVLINK_INVALID_PORT. + * This will be used to correlate the metric values to individual + * physical link and attribute traffic to the logical NVLink in + * the topology. + */ + int8_t portDev0[CUPTI_MAX_NVLINK_PORTS]; + + /** + * Port numbers for maximum 32 NVLinks connected to device 1. + * If typeDev1 is CUPTI_DEV_TYPE_NPU, ignore this field. + * In case of invalid/unknown port number, this field will be set + * to value CUPTI_NVLINK_INVALID_PORT. + * This will be used to correlate the metric values to individual + * physical link and attribute traffic to the logical NVLink in + * the topology. + */ + int8_t portDev1[CUPTI_MAX_NVLINK_PORTS]; + + /** + * Banwidth of NVLink in kbytes/sec + */ + uint64_t bandwidth; + + /** + * NVSwitch is connected as an intermediate node. + */ + uint8_t nvswitchConnected; + + /** + * Undefined. reserved for internal use + */ + uint8_t pad[7]; +} CUpti_ActivityNvLink4; + +#define CUPTI_MAX_GPUS 32 +/** + * Field to differentiate whether PCIE Activity record + * is of a GPU or a PCI Bridge + */ +typedef enum { + /** + * PCIE GPU record + */ + CUPTI_PCIE_DEVICE_TYPE_GPU = 0, + + /** + * PCIE Bridge record + */ + CUPTI_PCIE_DEVICE_TYPE_BRIDGE = 1, + + CUPTI_PCIE_DEVICE_TYPE_FORCE_INT = 0x7fffffff +} CUpti_PcieDeviceType; + +/** + * \brief PCI devices information required to construct topology + * + * This structure gives capabilities of GPU and PCI bridge connected to the PCIE bus + * which can be used to understand the topology. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_PCIE. + */ + CUpti_ActivityKind kind; + + /** + * Type of device in topology, \ref CUpti_PcieDeviceType. If type is + * CUPTI_PCIE_DEVICE_TYPE_GPU use devId for id and gpuAttr and if type is + * CUPTI_PCIE_DEVICE_TYPE_BRIDGE use bridgeId for id and bridgeAttr. + */ + CUpti_PcieDeviceType type; + + /** + * A unique identifier for GPU or Bridge in Topology + */ + union { + /** + * GPU device ID + */ + CUdevice devId; + + /** + * A unique identifier for Bridge in the Topology + */ + uint32_t bridgeId; + } id; + + /** + * Domain for the GPU or Bridge, required to identify which PCIE bus it belongs to in + * multiple NUMA systems. + */ + uint32_t domain; + + /** + * PCIE Generation of GPU or Bridge. + */ + uint16_t pcieGeneration; + + /** + * Link rate of the GPU or bridge in gigatransfers per second (GT/s) + */ + uint16_t linkRate; + + /** + * Link width of the GPU or bridge + */ + uint16_t linkWidth; + + /** + * Upstream bus ID for the GPU or PCI bridge. Required to identify which bus it is + * connected to in the topology. + */ + uint16_t upstreamBus; + + /** + * Attributes for more information about GPU (gpuAttr) or PCI Bridge (bridgeAttr) + */ + union { + struct { + /** + * UUID for the device. \ref CUpti_ActivityDevice4. + */ + CUuuid uuidDev; + + /** + * CUdevice with which this device has P2P capability. + * This can also be obtained by querying cuDeviceCanAccessPeer or + * cudaDeviceCanAccessPeer APIs + */ + CUdevice peerDev[CUPTI_MAX_GPUS]; + } gpuAttr; + + struct { + /** + * The downstream bus number, used to search downstream devices/bridges connected + * to this bridge. + */ + uint16_t secondaryBus; + + /** + * Device ID of the bridge + */ + uint16_t deviceId; + + /** + * Vendor ID of the bridge + */ + uint16_t vendorId; + + /** + * Padding for alignment + */ + uint16_t pad0; + } bridgeAttr; + } attr; +} CUpti_ActivityPcie; + +/** + * \brief PCIE Generation. + * + * Enumeration of PCIE Generation for + * pcie activity attribute pcieGeneration + */ +typedef enum { + /** + * PCIE Generation 1 + */ + CUPTI_PCIE_GEN_GEN1 = 1, + + /** + * PCIE Generation 2 + */ + CUPTI_PCIE_GEN_GEN2 = 2, + + /** + * PCIE Generation 3 + */ + CUPTI_PCIE_GEN_GEN3 = 3, + + /** + * PCIE Generation 4 + */ + CUPTI_PCIE_GEN_GEN4 = 4, + + /** + * PCIE Generation 5 + */ + CUPTI_PCIE_GEN_GEN5 = 5, + + CUPTI_PCIE_GEN_FORCE_INT = 0x7fffffff +} CUpti_PcieGen; + +/** + * \brief The activity record for an instantaneous CUPTI event. + * + * This activity record represents a CUPTI event value + * (CUPTI_ACTIVITY_KIND_EVENT) sampled at a particular instant. + * This activity record kind is not produced by the activity API but is + * included for completeness and ease-of-use. Profiler frameworks built on + * top of CUPTI that collect event data at a particular time may choose to + * use this type to store the collected event data. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_INSTANTANEOUS_EVENT. + */ + CUpti_ActivityKind kind; + + /** + * The event ID. + */ + CUpti_EventID id; + + /** + * The event value. + */ + uint64_t value; + + /** + * The timestamp at which event is sampled + */ + uint64_t timestamp; + + /** + * The device id + */ + uint32_t deviceId; + + /** + * Undefined. reserved for internal use + */ + uint32_t reserved; +} CUpti_ActivityInstantaneousEvent; + +/** + * \brief The activity record for an instantaneous CUPTI event + * with event domain instance information. + * + * This activity record represents the a CUPTI event value for a + * specific event domain instance + * (CUPTI_ACTIVITY_KIND_EVENT_INSTANCE) sampled at a particular instant. + * This activity record kind is not produced by the activity API but is + * included for completeness and ease-of-use. Profiler frameworks built on + * top of CUPTI that collect event data may choose to use this type to store the + * collected event data. This activity record should be used when + * event domain instance information needs to be associated with the + * event. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_INSTANTANEOUS_EVENT_INSTANCE. + */ + CUpti_ActivityKind kind; + + /** + * The event ID. + */ + CUpti_EventID id; + + /** + * The event value. + */ + uint64_t value; + + /** + * The timestamp at which event is sampled + */ + uint64_t timestamp; + + /** + * The device id + */ + uint32_t deviceId; + + /** + * The event domain instance + */ + uint8_t instance; + + /** + * Undefined. reserved for internal use + */ + uint8_t pad[3]; +} CUpti_ActivityInstantaneousEventInstance; + +/** + * \brief The activity record for an instantaneous CUPTI metric. + * + * This activity record represents the collection of a CUPTI metric + * value (CUPTI_ACTIVITY_KIND_METRIC) at a particular instance. + * This activity record kind is not produced by the activity API but + * is included for completeness and ease-of-use. Profiler frameworks built + * on top of CUPTI that collect metric data may choose to use this type to + * store the collected metric data. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_INSTANTANEOUS_METRIC. + */ + CUpti_ActivityKind kind; + + /** + * The metric ID. + */ + CUpti_MetricID id; + + /** + * The metric value. + */ + CUpti_MetricValue value; + + /** + * The timestamp at which metric is sampled + */ + uint64_t timestamp; + + /** + * The device id + */ + uint32_t deviceId; + + /** + * The properties of this metric. \see CUpti_ActivityFlag + */ + uint8_t flags; + + /** + * Undefined. reserved for internal use + */ + uint8_t pad[3]; +} CUpti_ActivityInstantaneousMetric; + +/** + * \brief The instantaneous activity record for a CUPTI metric with instance + * information. + + * This activity record represents a CUPTI metric value + * for a specific metric domain instance + * (CUPTI_ACTIVITY_KIND_METRIC_INSTANCE) sampled at a particular time. This + * activity record kind is not produced by the activity API but is included for + * completeness and ease-of-use. Profiler frameworks built on top of + * CUPTI that collect metric data may choose to use this type to store + * the collected metric data. This activity record should be used when + * metric domain instance information needs to be associated with the + * metric. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_INSTANTANEOUS_METRIC_INSTANCE. + */ + CUpti_ActivityKind kind; + + /** + * The metric ID. + */ + CUpti_MetricID id; + + /** + * The metric value. + */ + CUpti_MetricValue value; + + /** + * The timestamp at which metric is sampled + */ + uint64_t timestamp; + + /** + * The device id + */ + uint32_t deviceId; + + /** + * The properties of this metric. \see CUpti_ActivityFlag + */ + uint8_t flags; + + /** + * The metric domain instance + */ + uint8_t instance; + + /** + * Undefined. reserved for internal use + */ + uint8_t pad[2]; +} CUpti_ActivityInstantaneousMetricInstance; + +/** + * \brief The types of JIT entry. + * + * To be used in CUpti_ActivityJit. + */ +typedef enum { + CUPTI_ACTIVITY_JIT_ENTRY_INVALID= 0, + + /** + * PTX to CUBIN. + */ + CUPTI_ACTIVITY_JIT_ENTRY_PTX_TO_CUBIN = 1, + + /** + * NVVM-IR to PTX + */ + CUPTI_ACTIVITY_JIT_ENTRY_NVVM_IR_TO_PTX = 2, + + CUPTI_ACTIVITY_JIT_ENTRY_TYPE_FORCE_INT = 0x7fffffff +} CUpti_ActivityJitEntryType; + +/** + * \brief The types of JIT compilation operations. + * + * To be used in CUpti_ActivityJit. + */ + +typedef enum { + CUPTI_ACTIVITY_JIT_OPERATION_INVALID = 0, + /** + * Loaded from the compute cache. + */ + CUPTI_ACTIVITY_JIT_OPERATION_CACHE_LOAD = 1, + + /** + * Stored in the compute cache. + */ + CUPTI_ACTIVITY_JIT_OPERATION_CACHE_STORE = 2, + + /** + * JIT compilation. + */ + CUPTI_ACTIVITY_JIT_OPERATION_COMPILE = 3, + + CUPTI_ACTIVITY_JIT_OPERATION_TYPE_FORCE_INT = 0x7fffffff +} CUpti_ActivityJitOperationType; + +/** + * \brief The activity record for JIT operations. + * This activity represents the JIT operations (compile, load, store) of a CUmodule + * from the Compute Cache. + * Gives the exact hashed path of where the cached module is loaded from, + * or where the module will be stored after Just-In-Time (JIT) compilation. + */ +typedef struct PACKED_ALIGNMENT { + /** + * The activity record kind must be CUPTI_ACTIVITY_KIND_JIT. + */ + CUpti_ActivityKind kind; + + /** + * The JIT entry type. + */ + CUpti_ActivityJitEntryType jitEntryType; + + /** + * The JIT operation type. + */ + CUpti_ActivityJitOperationType jitOperationType; + + /** + * The device ID. + */ + uint32_t deviceId; + + /** + * The start timestamp for the JIT operation, in ns. A value of 0 for + * both the start and end timestamps indicates that timestamp + * information could not be collected for the JIT operation. + */ + uint64_t start; + + /** + * The end timestamp for the JIT operation, in ns. A value of 0 for both + * the start and end timestamps indicates that timestamp information + * could not be collected for the JIT operation. + */ + uint64_t end; + + /** + * The correlation ID of the JIT operation to which + * records belong to. Each JIT operation is + * assigned a unique correlation ID that is identical to the + * correlation ID in the driver or runtime API activity record that + * launched the JIT operation. + */ + uint32_t correlationId; + + /** + * Internal use. + */ + uint32_t padding; + + /** + * The correlation ID to correlate JIT compilation, load and store operations. + * Each JIT compilation unit is assigned a unique correlation ID + * at the time of the JIT compilation. This correlation id can be used + * to find the matching JIT cache load/store records. + */ + uint64_t jitOperationCorrelationId; + + /** + * The size of compute cache. + */ + uint64_t cacheSize; + + /** + * The path where the fat binary is cached. + */ + const char* cachePath; +} CUpti_ActivityJit; + + +/** + * \brief The activity record for trace of graph execution. + * + * This activity record represents execution for a graph without giving visibility + * about the execution of its nodes. This is intended to reduce overheads in tracing + * each node. The activity kind is CUPTI_ACTIVITY_KIND_GRAPH_TRACE + */ +typedef struct { + /** + * The activity record kind, must be CUPTI_ACTIVITY_KIND_GRAPH_TRACE + */ + CUpti_ActivityKind kind; + + /** + * The correlation ID of the graph launch. Each graph launch is + * assigned a unique correlation ID that is identical to the + * correlation ID in the driver API activity record that launched + * the graph. + */ + uint32_t correlationId; + + /** + * The start timestamp for the graph execution, in ns. A value of 0 + * for both the start and end timestamps indicates that timestamp + * information could not be collected for the graph. + */ + uint64_t start; + + /** + * The end timestamp for the graph execution, in ns. A value of 0 + * for both the start and end timestamps indicates that timestamp + * information could not be collected for the graph. + */ + uint64_t end; + + /** + * The ID of the device where the graph execution is occurring. + */ + uint32_t deviceId; + + /** + * The unique ID of the graph that is launched. + */ + uint32_t graphId; + + /** + * The ID of the context where the graph is being launched. + */ + uint32_t contextId; + + /** + * The ID of the stream where the graph is being launched. + */ + uint32_t streamId; + + /** + * This field is reserved for internal use + */ + void *reserved; +} CUpti_ActivityGraphTrace; + +END_PACKED_ALIGNMENT + +/** + * \brief Activity attributes. + * + * These attributes are used to control the behavior of the activity + * API. + */ +typedef enum { + /** + * The device memory size (in bytes) reserved for storing profiling data for concurrent + * kernels (activity kind \ref CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL), memcopies and memsets + * for each buffer on a context. The value is a size_t. + * + * There is a limit on how many device buffers can be allocated per context. User + * can query and set this limit using the attribute + * \ref CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_POOL_LIMIT. + * CUPTI doesn't pre-allocate all the buffers, it pre-allocates only those many + * buffers as set by the attribute \ref CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_PRE_ALLOCATE_VALUE. + * When all of the data in a buffer is consumed, it is added in the reuse pool, and + * CUPTI picks a buffer from this pool when a new buffer is needed. Thus memory + * footprint does not scale with the kernel count. Applications with the high density + * of kernels, memcopies and memsets might result in having CUPTI to allocate more device buffers. + * CUPTI allocates another buffer only when it runs out of the buffers in the + * reuse pool. + * + * Since buffer allocation happens in the main application thread, this might result + * in stalls in the critical path. CUPTI pre-allocates 3 buffers of the same size to + * mitigate this issue. User can query and set the pre-allocation limit using the + * attribute \ref CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_PRE_ALLOCATE_VALUE. + * + * Having larger buffer size leaves less device memory for the application. + * Having smaller buffer size increases the risk of dropping timestamps for + * records if too many kernels or memcopies or memsets are launched at one time. + * + * This value only applies to new buffer allocations. Set this value before initializing + * CUDA or before creating a context to ensure it is considered for the following allocations. + * + * The default value is 3200000 (~3MB) which can accommodate profiling data + * up to 100,000 kernels, memcopies and memsets combined. + * + * Note: Starting with the CUDA 12.0 Update 1 release, CUPTI allocates profiling buffer in the + * device memory by default as this might help in improving the performance of the + * tracing run. Refer to the description of the attribute + * \ref CUPTI_ACTIVITY_ATTR_MEM_ALLOCATION_TYPE_HOST_PINNED for more details. + * Size of the memory and maximum number of pools are still controlled by the attributes + * \ref CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_SIZE and \ref CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_POOL_LIMIT. + * + * Note: The actual amount of device memory per buffer reserved by CUPTI might be larger. + */ + CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_SIZE = 0, + + /** + * The device memory size (in bytes) reserved for storing profiling + * data for CDP operations for each buffer on a context. The + * value is a size_t. + * + * Having larger buffer size means less flush operations but + * consumes more device memory. This value only applies to new + * allocations. + * + * Set this value before initializing CUDA or before creating a + * context to ensure it is considered for the following allocations. + * + * The default value is 8388608 (8MB). + * + * Note: The actual amount of device memory per context reserved by + * CUPTI might be larger. + */ + CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_SIZE_CDP = 1, + + /** + * The maximum number of device memory buffers per context. The value is a size_t. + * + * For an application with high rate of kernel launches, memcopies and memsets having a bigger pool + * limit helps in timestamp collection for all these activties at the expense of a larger memory footprint. + * Refer to the description of the attribute \ref CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_SIZE + * for more details. + * + * Setting this value will not modify the number of memory buffers + * currently stored. + * + * Set this value before initializing CUDA to ensure the limit is + * not exceeded. + * + * The default value is 250. + */ + CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_POOL_LIMIT = 2, + + /** + * The profiling semaphore pool size reserved for storing profiling data for + * serialized kernels tracing (activity kind \ref CUPTI_ACTIVITY_KIND_KERNEL) + * for each context. The value is a size_t. + * + * There is a limit on how many semaphore pools can be allocated per context. User + * can query and set this limit using the attribute + * \ref CUPTI_ACTIVITY_ATTR_PROFILING_SEMAPHORE_POOL_LIMIT. + * CUPTI doesn't pre-allocate all the semaphore pools, it pre-allocates only those many + * semaphore pools as set by the attribute \ref CUPTI_ACTIVITY_ATTR_PROFILING_SEMAPHORE_PRE_ALLOCATE_VALUE. + * When all of the data in a semaphore pool is consumed, it is added in the reuse pool, and + * CUPTI picks a semaphore pool from the reuse pool when a new semaphore pool is needed. Thus memory + * footprint does not scale with the kernel count. Applications with the high density + * of kernels might result in having CUPTI to allocate more semaphore pools. + * CUPTI allocates another semaphore pool only when it runs out of the semaphore pools in the + * reuse pool. + * + * Since semaphore pool allocation happens in the main application thread, this might result + * in stalls in the critical path. CUPTI pre-allocates 3 semaphore pools of the same size to + * mitigate this issue. User can query and set the pre-allocation limit using the + * attribute \ref CUPTI_ACTIVITY_ATTR_PROFILING_SEMAPHORE_PRE_ALLOCATE_VALUE. + * + * Having larger semaphore pool size leaves less device memory for the application. + * Having smaller semaphore pool size increases the risk of dropping timestamps for + * kernel records if too many kernels are issued/launched at one time. + * + * This value only applies to new semaphore pool allocations. Set this value before initializing + * CUDA or before creating a context to ensure it is considered for the following allocations. + * + * The default value is 25000 which can accommodate profiling data for upto 25,000 kernels. + * + */ + CUPTI_ACTIVITY_ATTR_PROFILING_SEMAPHORE_POOL_SIZE = 3, + + /** + * The maximum number of profiling semaphore pools per context. The value is a size_t. + * + * For an application with high rate of kernel launches, having a bigger + * pool limit helps in timestamp collection for all the kernels, at the + * expense of a larger device memory footprint. + * Refer to the description of the attribute \ref CUPTI_ACTIVITY_ATTR_PROFILING_SEMAPHORE_POOL_SIZE + * for more details. + * + * Set this value before initializing CUDA to ensure the limit is not exceeded. + * + * The default value is 250. + */ + CUPTI_ACTIVITY_ATTR_PROFILING_SEMAPHORE_POOL_LIMIT = 4, + + /** + * The flag to indicate whether user should provide activity buffer of zero value. + * The value is a uint8_t. + * + * If the value of this attribute is non-zero, user should provide + * a zero value buffer in the \ref CUpti_BuffersCallbackRequestFunc. + * If the user does not provide a zero value buffer after setting this to non-zero, + * the activity buffer may contain some uninitialized values when CUPTI returns it in + * \ref CUpti_BuffersCallbackCompleteFunc + * + * If the value of this attribute is zero, CUPTI will initialize the user buffer + * received in the \ref CUpti_BuffersCallbackRequestFunc to zero before filling it. + * If the user sets this to zero, a few stalls may appear in critical path because CUPTI + * will zero out the buffer in the main thread. + * Set this value before returning from \ref CUpti_BuffersCallbackRequestFunc to + * ensure it is considered for all the subsequent user buffers. + * + * The default value is 0. + */ + CUPTI_ACTIVITY_ATTR_ZEROED_OUT_ACTIVITY_BUFFER = 5, + + /** + * Number of device buffers to pre-allocate for a context during the initialization phase. + * The value is a size_t. + * + * Refer to the description of the attribute \ref CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_SIZE + * for details. + * + * This value must be less than the maximum number of device buffers set using + * the attribute \ref CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_POOL_LIMIT + * + * Set this value before initializing CUDA or before creating a context to ensure it + * is considered by the CUPTI. + * + * The default value is set to 3 to ping pong between these buffers (if possible). + */ + CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_PRE_ALLOCATE_VALUE = 6, + + /** + * Number of profiling semaphore pools to pre-allocate for a context during the + * initialization phase. The value is a size_t. + * + * Refer to the description of the attribute \ref CUPTI_ACTIVITY_ATTR_PROFILING_SEMAPHORE_POOL_SIZE + * for details. + * + * This value must be less than the maximum number of profiling semaphore pools set + * using the attribute \ref CUPTI_ACTIVITY_ATTR_PROFILING_SEMAPHORE_POOL_LIMIT + * + * Set this value before initializing CUDA or before creating a context to ensure it + * is considered by the CUPTI. + * + * The default value is set to 3 to ping pong between these pools (if possible). + */ + CUPTI_ACTIVITY_ATTR_PROFILING_SEMAPHORE_PRE_ALLOCATE_VALUE = 7, + + /** + * Allocate page-locked (pinned) host memory for storing profiling data for concurrent + * kernels, memcopies and memsets for each buffer on a context. The value is a uint8_t. + * + * Starting with the CUDA 11.2 release, CUPTI allocates profiling buffer in the pinned host + * memory by default as this might help in improving the performance of the tracing run. + * Allocating excessive amounts of pinned memory may degrade system performance, since it + * reduces the amount of memory available to the system for paging. For this reason user + * might want to change the location from pinned host memory to device memory by setting + * value of this attribute to 0. + * + * The default value is 1. + */ + CUPTI_ACTIVITY_ATTR_MEM_ALLOCATION_TYPE_HOST_PINNED = 8, + + CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_FORCE_INT = 0x7fffffff +} CUpti_ActivityAttribute; + +/** + * \brief Thread-Id types. + * + * CUPTI uses different methods to obtain the thread-id depending on the + * support and the underlying platform. This enum documents these methods + * for each type. APIs \ref cuptiSetThreadIdType and \ref cuptiGetThreadIdType + * can be used to set and get the thread-id type. + */ +typedef enum { + /** + * Default type + * Windows uses API GetCurrentThreadId() + * Linux/Mac/Android/QNX use POSIX pthread API pthread_self() + */ + CUPTI_ACTIVITY_THREAD_ID_TYPE_DEFAULT = 0, + + /** + * This type is based on the system API available on the underlying platform + * and thread-id obtained is supposed to be unique for the process lifetime. + * Windows uses API GetCurrentThreadId() + * Linux uses syscall SYS_gettid + * Mac uses syscall SYS_thread_selfid + * Android/QNX use gettid() + */ + CUPTI_ACTIVITY_THREAD_ID_TYPE_SYSTEM = 1, + + CUPTI_ACTIVITY_THREAD_ID_TYPE_FORCE_INT = 0x7fffffff +} CUpti_ActivityThreadIdType; + +/** + * \brief Get the CUPTI timestamp. + * + * Returns a timestamp normalized to correspond with the start and end + * timestamps reported in the CUPTI activity records. The timestamp is + * reported in nanoseconds. + * + * \param timestamp Returns the CUPTI timestamp + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p timestamp is NULL + */ +CUptiResult CUPTIAPI cuptiGetTimestamp(uint64_t *timestamp); + +/** + * \brief Get the ID of a context. + * + * Get the ID of a context. + * + * \param context The context + * \param contextId Returns a process-unique ID for the context + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_CONTEXT The context is NULL or not valid. + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p contextId is NULL + */ +CUptiResult CUPTIAPI cuptiGetContextId(CUcontext context, uint32_t *contextId); + +/** + * \brief Get the ID of a stream. + * + * Get the ID of a stream. The stream ID is unique within a context + * (i.e. all streams within a context will have unique stream + * IDs). + * + * \param context If non-NULL then the stream is checked to ensure + * that it belongs to this context. Typically this parameter should be + * null. + * \param stream The stream + * \param streamId Returns a context-unique ID for the stream + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_STREAM if unable to get stream ID, or + * if \p context is non-NULL and \p stream does not belong to the + * context + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p streamId is NULL + * + * **DEPRECATED** This method is deprecated as of CUDA 8.0. + * Use method cuptiGetStreamIdEx instead. + */ +CUptiResult CUPTIAPI cuptiGetStreamId(CUcontext context, CUstream stream, uint32_t *streamId); + +/** +* \brief Get the ID of a stream. +* +* Get the ID of a stream. The stream ID is unique within a context +* (i.e. all streams within a context will have unique stream +* IDs). +* +* \param context If non-NULL then the stream is checked to ensure +* that it belongs to this context. Typically this parameter should be +* null. +* \param stream The stream +* \param perThreadStream Flag to indicate if program is compiled for per-thread streams +* \param streamId Returns a context-unique ID for the stream +* +* \retval CUPTI_SUCCESS +* \retval CUPTI_ERROR_NOT_INITIALIZED +* \retval CUPTI_ERROR_INVALID_STREAM if unable to get stream ID, or +* if \p context is non-NULL and \p stream does not belong to the +* context +* \retval CUPTI_ERROR_INVALID_PARAMETER if \p streamId is NULL +*/ +CUptiResult CUPTIAPI cuptiGetStreamIdEx(CUcontext context, CUstream stream, uint8_t perThreadStream, uint32_t *streamId); + +/** + * \brief Get the ID of a device + * + * If \p context is NULL, returns the ID of the device that contains + * the currently active context. If \p context is non-NULL, returns + * the ID of the device which contains that context. Operates in a + * similar manner to cudaGetDevice() or cuCtxGetDevice() but may be + * called from within callback functions. + * + * \param context The context, or NULL to indicate the current context. + * \param deviceId Returns the ID of the device that is current for + * the calling thread. + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_DEVICE if unable to get device ID + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p deviceId is NULL + */ +CUptiResult CUPTIAPI cuptiGetDeviceId(CUcontext context, uint32_t *deviceId); + +/** + * \brief Get the unique ID of a graph node + * + * Returns the unique ID of the CUDA graph node. + * + * \param node The graph node. + * \param nodeId Returns the unique ID of the node + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p node is NULL + */ +CUptiResult CUPTIAPI cuptiGetGraphNodeId(CUgraphNode node, uint64_t *nodeId); + +/** + * \brief Get the unique ID of graph + * + * Returns the unique ID of CUDA graph. + * + * \param graph The graph. + * \param pId Returns the unique ID of the graph + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p graph is NULL + */ +CUptiResult CUPTIAPI cuptiGetGraphId(CUgraph graph, uint32_t *pId); + +/** + * \brief Enable collection of a specific kind of activity record. + * + * Enable collection of a specific kind of activity record. Multiple + * kinds can be enabled by calling this function multiple times. By + * default all activity kinds are disabled for collection. + * + * \param kind The kind of activity record to collect + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_NOT_COMPATIBLE if the activity kind cannot be enabled + * \retval CUPTI_ERROR_INVALID_KIND if the activity kind is not supported + */ +CUptiResult CUPTIAPI cuptiActivityEnable(CUpti_ActivityKind kind); + +/** + * \brief Enable collection of a specific kind of activity record. For certain activity kinds + * it dumps existing records. + * + * In general, the behavior of this API is similar to the API \ref cuptiActivityEnable i.e. it + * enables the collection of a specific kind of activity record. + * Additionally, this API can help in dumping the records for activities which happened in + * the past before enabling the corresponding activity kind. + * The API allows to get records for the current resource allocations done in CUDA + * For CUPTI_ACTIVITY_KIND_DEVICE, existing device records are dumped + * For CUPTI_ACTIVITY_KIND_CONTEXT, existing context records are dumped + * For CUPTI_ACTIVITY_KIND_STREAM, existing stream records are dumped + * For CUPTI_ACTIVITY_KIND_ NVLINK, existing NVLINK records are dumped + * For CUPTI_ACTIVITY_KIND_PCIE, existing PCIE records are dumped + * For other activities, the behavior is similar to the API \ref cuptiActivityEnable + * + * Device records are emitted in CUPTI on CUDA driver initialization. Those records + * can only be retrieved by the user if CUPTI is attached before CUDA initialization. + * Context and stream records are emitted on context and stream creation. + * The use case of the API is to provide the records for CUDA resources + * (contexs/streams/devices) that are currently active if user late attaches CUPTI. + * + * Before calling this function, the user must register buffer callbacks + * to get the activity records by calling \ref cuptiActivityRegisterCallbacks. + * If the user does not register the buffers and calls API \ref cuptiActivityEnableAndDump, + * then CUPTI will enable the activity kind but not provide any records for that + * activity kind. + * + * \param kind The kind of activity record to collect + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_UNKNOWN if buffer is not initialized. + * \retval CUPTI_ERROR_NOT_COMPATIBLE if the activity kind cannot be enabled + * \retval CUPTI_ERROR_INVALID_KIND if the activity kind is not supported + */ +CUptiResult CUPTIAPI cuptiActivityEnableAndDump(CUpti_ActivityKind kind); + +/** + * \brief Disable collection of a specific kind of activity record. + * + * Disable collection of a specific kind of activity record. Multiple + * kinds can be disabled by calling this function multiple times. By + * default all activity kinds are disabled for collection. + * + * \param kind The kind of activity record to stop collecting + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_KIND if the activity kind is not supported + */ +CUptiResult CUPTIAPI cuptiActivityDisable(CUpti_ActivityKind kind); + +/** + * \brief Enable collection of a specific kind of activity record for + * a context. + * + * Enable collection of a specific kind of activity record for a + * context. This setting done by this API will supersede the global + * settings for activity records enabled by \ref cuptiActivityEnable. + * Multiple kinds can be enabled by calling this function multiple + * times. + * + * \param context The context for which activity is to be enabled + * \param kind The kind of activity record to collect + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_NOT_COMPATIBLE if the activity kind cannot be enabled + * \retval CUPTI_ERROR_INVALID_KIND if the activity kind is not supported + */ +CUptiResult CUPTIAPI cuptiActivityEnableContext(CUcontext context, CUpti_ActivityKind kind); + +/** + * \brief Disable collection of a specific kind of activity record for + * a context. + * + * Disable collection of a specific kind of activity record for a context. + * This setting done by this API will supersede the global settings + * for activity records. + * Multiple kinds can be enabled by calling this function multiple times. + * + * \param context The context for which activity is to be disabled + * \param kind The kind of activity record to stop collecting + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_KIND if the activity kind is not supported + */ +CUptiResult CUPTIAPI cuptiActivityDisableContext(CUcontext context, CUpti_ActivityKind kind); + +/** + * \brief Get the number of activity records that were dropped of + * insufficient buffer space. + * + * Get the number of records that were dropped because of insufficient + * buffer space. The dropped count includes records that could not be + * recorded because CUPTI did not have activity buffer space available + * for the record (because the CUpti_BuffersCallbackRequestFunc + * callback did not return an empty buffer of sufficient size) and + * also CDP records that could not be record because the device-size + * buffer was full (size is controlled by the + * CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_SIZE_CDP attribute). The dropped + * count maintained for the queue is reset to zero when this function + * is called. + * + * \param context The context, or NULL to get dropped count from global queue + * \param streamId The stream ID + * \param dropped The number of records that were dropped since the last call + * to this function. + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p dropped is NULL + */ +CUptiResult CUPTIAPI cuptiActivityGetNumDroppedRecords(CUcontext context, uint32_t streamId, + size_t *dropped); + +/** + * \brief Iterate over the activity records in a buffer. + * + * This is a helper function to iterate over the activity records in a + * buffer. A buffer of activity records is typically obtained by + * receiving a CUpti_BuffersCallbackCompleteFunc callback. + * + * An example of typical usage: + * \code + * CUpti_Activity *record = NULL; + * CUptiResult status = CUPTI_SUCCESS; + * do { + * status = cuptiActivityGetNextRecord(buffer, validSize, &record); + * if(status == CUPTI_SUCCESS) { + * // Use record here... + * } + * else if (status == CUPTI_ERROR_MAX_LIMIT_REACHED) + * break; + * else { + * goto Error; + * } + * } while (1); + * \endcode + * + * \param buffer The buffer containing activity records + * \param record Inputs the previous record returned by + * cuptiActivityGetNextRecord and returns the next activity record + * from the buffer. If input value is NULL, returns the first activity + * record in the buffer. Records of kind CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL + * may contain invalid (0) timestamps, indicating that no timing information could + * be collected for lack of device memory. + * \param validBufferSizeBytes The number of valid bytes in the buffer. + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_MAX_LIMIT_REACHED if no more records in the buffer + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p buffer is NULL. + */ +CUptiResult CUPTIAPI cuptiActivityGetNextRecord(uint8_t* buffer, size_t validBufferSizeBytes, + CUpti_Activity **record); + +/** + * \brief Function type for callback used by CUPTI to request an empty + * buffer for storing activity records. + * + * This callback function signals the CUPTI client that an activity + * buffer is needed by CUPTI. The activity buffer is used by CUPTI to + * store activity records. The callback function can decline the + * request by setting \p *buffer to NULL. In this case CUPTI may drop + * activity records. + * + * \param buffer Returns the new buffer. If set to NULL then no buffer + * is returned. + * \param size Returns the size of the returned buffer. + * \param maxNumRecords Returns the maximum number of records that + * should be placed in the buffer. If 0 then the buffer is filled with + * as many records as possible. If > 0 the buffer is filled with at + * most that many records before it is returned. + */ +typedef void (CUPTIAPI *CUpti_BuffersCallbackRequestFunc)( + uint8_t **buffer, + size_t *size, + size_t *maxNumRecords); + +/** + * \brief Function type for callback used by CUPTI to return a buffer + * of activity records. + * + * This callback function returns to the CUPTI client a buffer + * containing activity records. The buffer contains \p validSize + * bytes of activity records which should be read using + * cuptiActivityGetNextRecord. The number of dropped records can be + * read using cuptiActivityGetNumDroppedRecords. After this call CUPTI + * relinquished ownership of the buffer and will not use it + * anymore. The client may return the buffer to CUPTI using the + * CUpti_BuffersCallbackRequestFunc callback. + * Note: CUDA 6.0 onwards, all buffers returned by this callback are + * global buffers i.e. there is no context/stream specific buffer. + * User needs to parse the global buffer to extract the context/stream + * specific activity records. + * + * \param context The context this buffer is associated with. If NULL, the + * buffer is associated with the global activities. This field is deprecated + * as of CUDA 6.0 and will always be NULL. + * \param streamId The stream id this buffer is associated with. + * This field is deprecated as of CUDA 6.0 and will always be NULL. + * \param buffer The activity record buffer. + * \param size The total size of the buffer in bytes as set in + * CUpti_BuffersCallbackRequestFunc. + * \param validSize The number of valid bytes in the buffer. + */ +typedef void (CUPTIAPI *CUpti_BuffersCallbackCompleteFunc)( + CUcontext context, + uint32_t streamId, + uint8_t *buffer, + size_t size, + size_t validSize); + +/** + * \brief Registers callback functions with CUPTI for activity buffer + * handling. + * + * This function registers two callback functions to be used in asynchronous + * buffer handling. If registered, activity record buffers are handled using + * asynchronous requested/completed callbacks from CUPTI. + * + * Registering these callbacks prevents the client from using CUPTI's + * blocking enqueue/dequeue functions. + * + * \param funcBufferRequested callback which is invoked when an empty + * buffer is requested by CUPTI + * \param funcBufferCompleted callback which is invoked when a buffer + * containing activity records is available from CUPTI + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER if either \p + * funcBufferRequested or \p funcBufferCompleted is NULL + */ +CUptiResult CUPTIAPI cuptiActivityRegisterCallbacks(CUpti_BuffersCallbackRequestFunc funcBufferRequested, + CUpti_BuffersCallbackCompleteFunc funcBufferCompleted); + +/** + * \brief Wait for all activity records to be delivered via the + * completion callback. + * + * This function does not return until all activity records associated + * with the specified context/stream are returned to the CUPTI client + * using the callback registered in cuptiActivityRegisterCallbacks. To + * ensure that all activity records are complete, the requested + * stream(s), if any, are synchronized. + * + * If \p context is NULL, the global activity records (i.e. those not + * associated with a particular stream) are flushed (in this case no + * streams are synchonized). If \p context is a valid CUcontext and + * \p streamId is 0, the buffers of all streams of this context are + * flushed. Otherwise, the buffers of the specified stream in this + * context is flushed. + * + * Before calling this function, the buffer handling callback api + * must be activated by calling cuptiActivityRegisterCallbacks. + * + * \param context A valid CUcontext or NULL. + * \param streamId The stream ID. + * \param flag The flag can be set to indicate a forced flush. See CUpti_ActivityFlag + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_CUPTI_ERROR_INVALID_OPERATION if not preceeded + * by a successful call to cuptiActivityRegisterCallbacks + * \retval CUPTI_ERROR_UNKNOWN an internal error occurred + * + * **DEPRECATED** This method is deprecated + * CONTEXT and STREAMID will be ignored. Use cuptiActivityFlushAll + * to flush all data. + */ +CUptiResult CUPTIAPI cuptiActivityFlush(CUcontext context, uint32_t streamId, uint32_t flag); + +/** + * \brief Request to deliver activity records via the buffer completion callback. + * + * This function returns the activity records associated with all contexts/streams + * (and the global buffers not associated with any stream) to the CUPTI client + * using the callback registered in cuptiActivityRegisterCallbacks. + * + * This is a blocking call but it doesn't issue any CUDA synchronization calls + * implicitly thus it's not guaranteed that all activities are completed on the + * underlying devices. Activity record is considered as completed if it has all + * the information filled up including the timestamps if any. It is the client's + * responsibility to issue necessary CUDA synchronization calls before calling + * this function if all activity records with complete information are expected + * to be delivered. + * + * Behavior of the function based on the input flag: + * - ::For default flush i.e. when flag is set as 0, it returns all the + * activity buffers which have all the activity records completed, buffers need not + * to be full though. It doesn't return buffers which have one or more incomplete + * records. Default flush can be done at a regular interval in a separate thread. + * - ::For forced flush i.e. when flag CUPTI_ACTIVITY_FLAG_FLUSH_FORCED is passed + * to the function, it returns all the activity buffers including the ones which have + * one or more incomplete activity records. It's suggested for clients to do the + * force flush before the termination of the profiling session to allow remaining + * buffers to be delivered. In general, it can be done in the at-exit handler. + * + * Before calling this function, the buffer handling callback api must be activated + * by calling cuptiActivityRegisterCallbacks. + * + * \param flag The flag can be set to indicate a forced flush. See CUpti_ActivityFlag + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_OPERATION if not preceeded by a + * successful call to cuptiActivityRegisterCallbacks + * \retval CUPTI_ERROR_UNKNOWN an internal error occurred + * + * \see cuptiActivityFlushPeriod + */ +CUptiResult CUPTIAPI cuptiActivityFlushAll(uint32_t flag); + +/** + * \brief Read an activity API attribute. + * + * Read an activity API attribute and return it in \p *value. + * + * \param attr The attribute to read + * \param valueSize Size of buffer pointed by the value, and + * returns the number of bytes written to \p value + * \param value Returns the value of the attribute + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value is NULL, or + * if \p attr is not an activity attribute + * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT Indicates that + * the \p value buffer is too small to hold the attribute value. + */ +CUptiResult CUPTIAPI cuptiActivityGetAttribute(CUpti_ActivityAttribute attr, + size_t *valueSize, void* value); + +/** + * \brief Write an activity API attribute. + * + * Write an activity API attribute. + * + * \param attr The attribute to write + * \param valueSize The size, in bytes, of the value + * \param value The attribute value to write + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value is NULL, or + * if \p attr is not an activity attribute + * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT Indicates that + * the \p value buffer is too small to hold the attribute value. + */ +CUptiResult CUPTIAPI cuptiActivitySetAttribute(CUpti_ActivityAttribute attr, + size_t *valueSize, void* value); + + +/** + * \brief Set Unified Memory Counter configuration. + * + * \param config A pointer to \ref CUpti_ActivityUnifiedMemoryCounterConfig structures + * containing Unified Memory counter configuration. + * \param count Number of Unified Memory counter configuration structures + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p config is NULL or + * any parameter in the \p config structures is not a valid value + * \retval CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED One potential reason is that + * platform (OS/arch) does not support the unified memory counters + * \retval CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_ON_DEVICE Indicates that the device + * does not support the unified memory counters + * \retval CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_ON_NON_P2P_DEVICES Indicates that + * multi-GPU configuration without P2P support between any pair of devices + * does not support the unified memory counters + */ +CUptiResult CUPTIAPI cuptiActivityConfigureUnifiedMemoryCounter(CUpti_ActivityUnifiedMemoryCounterConfig *config, uint32_t count); + +/** + * \brief Get auto boost state + * + * The profiling results can be inconsistent in case auto boost is enabled. + * CUPTI tries to disable auto boost while profiling. It can fail to disable in + * cases where user does not have the permissions or CUDA_AUTO_BOOST env + * variable is set. The function can be used to query whether auto boost is + * enabled. + * + * \param context A valid CUcontext. + * \param state A pointer to \ref CUpti_ActivityAutoBoostState structure which + * contains the current state and the id of the process that has requested the + * current state + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p CUcontext or \p state is NULL + * \retval CUPTI_ERROR_NOT_SUPPORTED Indicates that the device does not support auto boost + * \retval CUPTI_ERROR_UNKNOWN an internal error occurred + */ +CUptiResult CUPTIAPI cuptiGetAutoBoostState(CUcontext context, CUpti_ActivityAutoBoostState *state); + +/** + * \brief Set PC sampling configuration. + * + * For Pascal and older GPU architectures this API must be called before enabling + * activity kind CUPTI_ACTIVITY_KIND_PC_SAMPLING. There is no such requirement + * for Volta and newer GPU architectures. + * + * For Volta and newer GPU architectures if this API is called in the middle of + * execution, PC sampling configuration will be updated for subsequent kernel launches. + * + * \param ctx The context + * \param config A pointer to \ref CUpti_ActivityPCSamplingConfig structure + * containing PC sampling configuration. + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_OPERATION if this api is called while + * some valid event collection method is set. + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p config is NULL or + * any parameter in the \p config structures is not a valid value + * \retval CUPTI_ERROR_NOT_SUPPORTED Indicates that the system/device + * does not support the unified memory counters + */ +CUptiResult CUPTIAPI cuptiActivityConfigurePCSampling(CUcontext ctx, CUpti_ActivityPCSamplingConfig *config); + +/** + * \brief Returns the last error from a cupti call or callback + * + * Returns the last error that has been produced by any of the cupti api calls + * or the callback in the same host thread and resets it to CUPTI_SUCCESS. + */ +CUptiResult CUPTIAPI cuptiGetLastError(void); + +/** + * \brief Set the thread-id type + * + * CUPTI uses the method corresponding to set type to generate the thread-id. + * See enum \ref CUpti_ActivityThreadIdType for the list of methods. + * Activity records having thread-id field contain the same value. + * Thread id type must not be changed during the profiling session to + * avoid thread-id value mismatch across activity records. + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_SUPPORTED if \p type is not supported on the platform + */ +CUptiResult CUPTIAPI cuptiSetThreadIdType(CUpti_ActivityThreadIdType type); + +/** + * \brief Get the thread-id type + * + * Returns the thread-id type used in CUPTI + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p type is NULL + */ +CUptiResult CUPTIAPI cuptiGetThreadIdType(CUpti_ActivityThreadIdType *type); + +/** +* \brief Check support for a compute capability +* +* This function is used to check the support for a device based on +* it's compute capability. It sets the \p support when the compute +* capability is supported by the current version of CUPTI, and clears +* it otherwise. This version of CUPTI might not support all GPUs sharing +* the same compute capability. It is suggested to use API \ref +* cuptiDeviceSupported which provides correct information. +* +* \param major The major revision number of the compute capability +* \param minor The minor revision number of the compute capability +* \param support Pointer to an integer to return the support status +* +* \retval CUPTI_SUCCESS +* \retval CUPTI_ERROR_INVALID_PARAMETER if \p support is NULL +* +* \sa ::cuptiDeviceSupported +*/ +CUptiResult CUPTIAPI cuptiComputeCapabilitySupported(int major, int minor, int *support); + +/** +* \brief Check support for a compute device +* +* This function is used to check the support for a compute device. +* It sets the \p support when the device is supported by the current +* version of CUPTI, and clears it otherwise. +* +* \param dev The device handle returned by CUDA Driver API cuDeviceGet +* \param support Pointer to an integer to return the support status +* +* \retval CUPTI_SUCCESS +* \retval CUPTI_ERROR_INVALID_PARAMETER if \p support is NULL +* \retval CUPTI_ERROR_INVALID_DEVICE if \p dev is not a valid device +* +* \sa ::cuptiComputeCapabilitySupported +*/ +CUptiResult CUPTIAPI cuptiDeviceSupported(CUdevice dev, int *support); + +/** + * This indicates the virtualization mode in which CUDA device is running + */ +typedef enum { + /** + * No virtualization mode isassociated with the device + * i.e. it's a baremetal GPU + */ + CUPTI_DEVICE_VIRTUALIZATION_MODE_NONE = 0, + /** + * The device is associated with the pass-through GPU. + * In this mode, an entire physical GPU is directly assigned + * to one virtual machine (VM). + */ + CUPTI_DEVICE_VIRTUALIZATION_MODE_PASS_THROUGH = 1, + /** + * The device is associated with the virtual GPU (vGPU). + * In this mode multiple virtual machines (VMs) have simultaneous, + * direct access to a single physical GPU. + */ + CUPTI_DEVICE_VIRTUALIZATION_MODE_VIRTUAL_GPU = 2, + + CUPTI_DEVICE_VIRTUALIZATION_MODE_FORCE_INT = 0x7fffffff +} CUpti_DeviceVirtualizationMode; + +/** + * \brief Query the virtualization mode of the device + * + * This function is used to query the virtualization mode of the CUDA device. + * + * \param dev The device handle returned by CUDA Driver API cuDeviceGet + * \param mode Pointer to an CUpti_DeviceVirtualizationMode to return the virtualization mode + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_DEVICE if \p dev is not a valid device + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p mode is NULL + * + */ +CUptiResult CUPTIAPI cuptiDeviceVirtualizationMode(CUdevice dev, CUpti_DeviceVirtualizationMode *mode); + +/** + * \brief Detach CUPTI from the running process + * + * This API detaches the CUPTI from the running process. It destroys and cleans up all the + * resources associated with CUPTI in the current process. After CUPTI detaches from the process, + * the process will keep on running with no CUPTI attached to it. + * For safe operation of the API, it is recommended this API is invoked from the exit callsite + * of any of the CUDA Driver or Runtime API. Otherwise CUPTI client needs to make sure that + * required CUDA synchronization and CUPTI activity buffer flush is done before calling the API. + * Sample code showing the usage of the API in the cupti callback handler code: + * \code + void CUPTIAPI + cuptiCallbackHandler(void *userdata, CUpti_CallbackDomain domain, + CUpti_CallbackId cbid, void *cbdata) + { + const CUpti_CallbackData *cbInfo = (CUpti_CallbackData *)cbdata; + + // Take this code path when CUPTI detach is requested + if (detachCupti) { + switch(domain) + { + case CUPTI_CB_DOMAIN_RUNTIME_API: + case CUPTI_CB_DOMAIN_DRIVER_API: + if (cbInfo->callbackSite == CUPTI_API_EXIT) { + // call the CUPTI detach API + cuptiFinalize(); + } + break; + default: + break; + } + } + } + \endcode + */ +CUptiResult CUPTIAPI cuptiFinalize(void); + +/** + * \brief Push an external correlation id for the calling thread + * + * This function notifies CUPTI that the calling thread is entering an external API region. + * When a CUPTI activity API record is created while within an external API region and + * CUPTI_ACTIVITY_KIND_EXTERNAL_CORRELATION is enabled, the activity API record will + * be preceeded by a CUpti_ActivityExternalCorrelation record for each \ref CUpti_ExternalCorrelationKind. + * + * \param kind The kind of external API activities should be correlated with. + * \param id External correlation id. + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER The external API kind is invalid + */ +CUptiResult CUPTIAPI cuptiActivityPushExternalCorrelationId(CUpti_ExternalCorrelationKind kind, uint64_t id); + +/** + * \brief Pop an external correlation id for the calling thread + * + * This function notifies CUPTI that the calling thread is leaving an external API region. + * + * \param kind The kind of external API activities should be correlated with. + * \param lastId If the function returns successful, contains the last external correlation id for this \p kind, can be NULL. + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER The external API kind is invalid. + * \retval CUPTI_ERROR_QUEUE_EMPTY No external id is currently associated with \p kind. + */ +CUptiResult CUPTIAPI cuptiActivityPopExternalCorrelationId(CUpti_ExternalCorrelationKind kind, uint64_t *lastId); + +/** + * \brief Controls the collection of queued and submitted timestamps for kernels. + * + * This API is used to control the collection of queued and submitted timestamps + * for kernels whose records are provided through the struct \ref CUpti_ActivityKernel9. + * Default value is 0, i.e. these timestamps are not collected. This API needs + * to be called before initialization of CUDA and this setting should not be + * changed during the profiling session. + * + * \param enable is a boolean, denoting whether these timestamps should be + * collected + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + */ +CUptiResult CUPTIAPI cuptiActivityEnableLatencyTimestamps(uint8_t enable); + +/** + * \brief Sets the flush period for the worker thread + * + * CUPTI creates a worker thread to minimize the perturbance for the application created + * threads. CUPTI offloads certain operations from the application threads to the worker + * thread, this includes synchronization of profiling resources between host and device, + * delivery of the activity buffers to the client using the callback registered in + * cuptiActivityRegisterCallbacks. For performance reasons, CUPTI wakes up the worker + * thread based on certain heuristics. + * + * This API is used to control the flush period of the worker thread. This setting will + * override the CUPTI heurtistics. Setting time to zero disables the periodic flush and + * restores the default behavior. + * + * Periodic flush can return only those activity buffers which are full and have all the + * activity records completed. + * + * It's allowed to use the API \ref cuptiActivityFlushAll to flush the data on-demand, even + * when client sets the periodic flush. + * + * \param time flush period in msec + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * + * \see cuptiActivityFlushAll + */ +CUptiResult CUPTIAPI cuptiActivityFlushPeriod(uint32_t time); + +/** + * \brief Controls the collection of launch attributes for kernels. + * + * This API is used to control the collection of launch attributes for kernels whose + * records are provided through the struct \ref CUpti_ActivityKernel9. + * Default value is 0, i.e. these attributes are not collected. + * + * \param enable is a boolean denoting whether these launch attributes should be collected + */ +CUptiResult CUPTIAPI cuptiActivityEnableLaunchAttributes(uint8_t enable); + +/** + * \brief Function type for callback used by CUPTI to request a timestamp + * to be used in activity records. + * + * This callback function signals the CUPTI client that a timestamp needs + * to be returned. This timestamp would be treated as normalized timestamp + * to be used for various purposes in CUPTI. For example to store start and + * end timestamps reported in the CUPTI activity records. + * The returned timestamp must be in nanoseconds. + * + * \sa ::cuptiActivityRegisterTimestampCallback + */ +typedef uint64_t (CUPTIAPI *CUpti_TimestampCallbackFunc)(void); + +/** + * \brief Registers callback function with CUPTI for providing timestamp. + * + * This function registers a callback function to obtain timestamp of user's + * choice instead of using CUPTI provided timestamp. + * By default CUPTI uses different methods, based on the underlying platform, + * to retrieve the timestamp + * Linux and Android use clock_gettime(CLOCK_REALTIME, ..) + * Windows uses QueryPerformanceCounter() + * Mac uses mach_absolute_time() + * QNX uses ClockCycles() + * Timestamps retrieved using these methods are converted to nanosecond if needed + * before usage. + * + * The registration of timestamp callback should be done before any of the CUPTI + * activity kinds are enabled to make sure that all the records report the timestamp using + * the callback function registered through cuptiActivityRegisterTimestampCallback API. + * + * Changing the timestamp callback function in CUPTI through + * cuptiActivityRegisterTimestampCallback API in the middle of the profiling + * session can cause records generated prior to the change to report + * timestamps through previous timestamp method. + * + * \param funcTimestamp callback which is invoked when a timestamp is + * needed by CUPTI + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p funcTimestamp is NULL + * \retval CUPTI_ERROR_NOT_INITIALIZED + */ +CUptiResult CUPTIAPI cuptiActivityRegisterTimestampCallback(CUpti_TimestampCallbackFunc funcTimestamp); + +/** @} */ /* END CUPTI_ACTIVITY_API */ + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility pop +#endif + +#if defined(__cplusplus) +} +#endif + +#endif /*_CUPTI_ACTIVITY_H_*/ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_callbacks.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_callbacks.h new file mode 100644 index 0000000000000000000000000000000000000000..147f4c47b7281a154b1353065617df938d575f25 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_callbacks.h @@ -0,0 +1,762 @@ +/* + * Copyright 2010-2020 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUPTI_CALLBACKS_H__) +#define __CUPTI_CALLBACKS_H__ + +#include +#include +#include +#include +#include + +#ifndef CUPTIAPI +#ifdef _WIN32 +#define CUPTIAPI __stdcall +#else +#define CUPTIAPI +#endif +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility push(default) +#endif + +/** + * \defgroup CUPTI_CALLBACK_API CUPTI Callback API + * Functions, types, and enums that implement the CUPTI Callback API. + * @{ + */ + +/** + * \brief Specifies the point in an API call that a callback is issued. + * + * Specifies the point in an API call that a callback is issued. This + * value is communicated to the callback function via \ref + * CUpti_CallbackData::callbackSite. + */ +typedef enum { + /** + * The callback is at the entry of the API call. + */ + CUPTI_API_ENTER = 0, + /** + * The callback is at the exit of the API call. + */ + CUPTI_API_EXIT = 1, + CUPTI_API_CBSITE_FORCE_INT = 0x7fffffff +} CUpti_ApiCallbackSite; + +/** + * \brief Callback domains. + * + * Callback domains. Each domain represents callback points for a + * group of related API functions or CUDA driver activity. + */ +typedef enum { + /** + * Invalid domain. + */ + CUPTI_CB_DOMAIN_INVALID = 0, + /** + * Domain containing callback points for all driver API functions. + */ + CUPTI_CB_DOMAIN_DRIVER_API = 1, + /** + * Domain containing callback points for all runtime API + * functions. + */ + CUPTI_CB_DOMAIN_RUNTIME_API = 2, + /** + * Domain containing callback points for CUDA resource tracking. + */ + CUPTI_CB_DOMAIN_RESOURCE = 3, + /** + * Domain containing callback points for CUDA synchronization. + */ + CUPTI_CB_DOMAIN_SYNCHRONIZE = 4, + /** + * Domain containing callback points for NVTX API functions. + */ + CUPTI_CB_DOMAIN_NVTX = 5, + CUPTI_CB_DOMAIN_SIZE, + + CUPTI_CB_DOMAIN_FORCE_INT = 0x7fffffff +} CUpti_CallbackDomain; + +/** + * \brief Callback IDs for resource domain. + * + * Callback IDs for resource domain, CUPTI_CB_DOMAIN_RESOURCE. This + * value is communicated to the callback function via the \p cbid + * parameter. + */ +typedef enum { + /** + * Invalid resource callback ID. + */ + CUPTI_CBID_RESOURCE_INVALID = 0, + /** + * A new context has been created. + */ + CUPTI_CBID_RESOURCE_CONTEXT_CREATED = 1, + /** + * A context is about to be destroyed. + */ + CUPTI_CBID_RESOURCE_CONTEXT_DESTROY_STARTING = 2, + /** + * A new stream has been created. + */ + CUPTI_CBID_RESOURCE_STREAM_CREATED = 3, + /** + * A stream is about to be destroyed. + */ + CUPTI_CBID_RESOURCE_STREAM_DESTROY_STARTING = 4, + /** + * The driver has finished initializing. + */ + CUPTI_CBID_RESOURCE_CU_INIT_FINISHED = 5, + /** + * A module has been loaded. + */ + CUPTI_CBID_RESOURCE_MODULE_LOADED = 6, + /** + * A module is about to be unloaded. + */ + CUPTI_CBID_RESOURCE_MODULE_UNLOAD_STARTING = 7, + /** + * The current module which is being profiled. + */ + CUPTI_CBID_RESOURCE_MODULE_PROFILED = 8, + /** + * CUDA graph has been created. + */ + CUPTI_CBID_RESOURCE_GRAPH_CREATED = 9, + /** + * CUDA graph is about to be destroyed. + */ + CUPTI_CBID_RESOURCE_GRAPH_DESTROY_STARTING = 10, + /** + * CUDA graph is cloned. + */ + CUPTI_CBID_RESOURCE_GRAPH_CLONED = 11, + /** + * CUDA graph node is about to be created + */ + CUPTI_CBID_RESOURCE_GRAPHNODE_CREATE_STARTING = 12, + /** + * CUDA graph node is created. + */ + CUPTI_CBID_RESOURCE_GRAPHNODE_CREATED = 13, + /** + * CUDA graph node is about to be destroyed. + */ + CUPTI_CBID_RESOURCE_GRAPHNODE_DESTROY_STARTING = 14, + /** + * Dependency on a CUDA graph node is created. + */ + CUPTI_CBID_RESOURCE_GRAPHNODE_DEPENDENCY_CREATED = 15, + /** + * Dependency on a CUDA graph node is destroyed. + */ + CUPTI_CBID_RESOURCE_GRAPHNODE_DEPENDENCY_DESTROY_STARTING = 16, + /** + * An executable CUDA graph is about to be created. + */ + CUPTI_CBID_RESOURCE_GRAPHEXEC_CREATE_STARTING = 17, + /** + * An executable CUDA graph is created. + */ + CUPTI_CBID_RESOURCE_GRAPHEXEC_CREATED = 18, + /** + * An executable CUDA graph is about to be destroyed. + */ + CUPTI_CBID_RESOURCE_GRAPHEXEC_DESTROY_STARTING = 19, + /** + * CUDA graph node is cloned. + */ + CUPTI_CBID_RESOURCE_GRAPHNODE_CLONED = 20, + + CUPTI_CBID_RESOURCE_SIZE, + CUPTI_CBID_RESOURCE_FORCE_INT = 0x7fffffff +} CUpti_CallbackIdResource; + +/** + * \brief Callback IDs for synchronization domain. + * + * Callback IDs for synchronization domain, + * CUPTI_CB_DOMAIN_SYNCHRONIZE. This value is communicated to the + * callback function via the \p cbid parameter. + */ +typedef enum { + /** + * Invalid synchronize callback ID. + */ + CUPTI_CBID_SYNCHRONIZE_INVALID = 0, + /** + * Stream synchronization has completed for the stream. + */ + CUPTI_CBID_SYNCHRONIZE_STREAM_SYNCHRONIZED = 1, + /** + * Context synchronization has completed for the context. + */ + CUPTI_CBID_SYNCHRONIZE_CONTEXT_SYNCHRONIZED = 2, + CUPTI_CBID_SYNCHRONIZE_SIZE, + CUPTI_CBID_SYNCHRONIZE_FORCE_INT = 0x7fffffff +} CUpti_CallbackIdSync; + + +/** + * \brief Data passed into a runtime or driver API callback function. + * + * Data passed into a runtime or driver API callback function as the + * \p cbdata argument to \ref CUpti_CallbackFunc. The \p cbdata will + * be this type for \p domain equal to CUPTI_CB_DOMAIN_DRIVER_API or + * CUPTI_CB_DOMAIN_RUNTIME_API. The callback data is valid only within + * the invocation of the callback function that is passed the data. If + * you need to retain some data for use outside of the callback, you + * must make a copy of that data. For example, if you make a shallow + * copy of CUpti_CallbackData within a callback, you cannot + * dereference \p functionParams outside of that callback to access + * the function parameters. \p functionName is an exception: the + * string pointed to by \p functionName is a global constant and so + * may be accessed outside of the callback. + */ +typedef struct { + /** + * Point in the runtime or driver function from where the callback + * was issued. + */ + CUpti_ApiCallbackSite callbackSite; + + /** + * Name of the runtime or driver API function which issued the + * callback. This string is a global constant and so may be + * accessed outside of the callback. + */ + const char *functionName; + + /** + * Pointer to the arguments passed to the runtime or driver API + * call. See generated_cuda_runtime_api_meta.h and + * generated_cuda_meta.h for structure definitions for the + * parameters for each runtime and driver API function. + */ + const void *functionParams; + + /** + * Pointer to the return value of the runtime or driver API + * call. This field is only valid within the exit::CUPTI_API_EXIT + * callback. For a runtime API \p functionReturnValue points to a + * \p cudaError_t. For a driver API \p functionReturnValue points + * to a \p CUresult. + */ + void *functionReturnValue; + + /** + * Name of the symbol operated on by the runtime or driver API + * function which issued the callback. This entry is valid only for + * driver and runtime launch callbacks, where it returns the name of + * the kernel. + */ + const char *symbolName; + + /** + * Driver context current to the thread, or null if no context is + * current. This value can change from the entry to exit callback + * of a runtime API function if the runtime initializes a context. + */ + CUcontext context; + + /** + * Unique ID for the CUDA context associated with the thread. The + * UIDs are assigned sequentially as contexts are created and are + * unique within a process. + */ + uint32_t contextUid; + + /** + * Pointer to data shared between the entry and exit callbacks of + * a given runtime or drive API function invocation. This field + * can be used to pass 64-bit values from the entry callback to + * the corresponding exit callback. + */ + uint64_t *correlationData; + + /** + * The activity record correlation ID for this callback. For a + * driver domain callback (i.e. \p domain + * CUPTI_CB_DOMAIN_DRIVER_API) this ID will equal the correlation ID + * in the CUpti_ActivityAPI record corresponding to the CUDA driver + * function call. For a runtime domain callback (i.e. \p domain + * CUPTI_CB_DOMAIN_RUNTIME_API) this ID will equal the correlation + * ID in the CUpti_ActivityAPI record corresponding to the CUDA + * runtime function call. Within the callback, this ID can be + * recorded to correlate user data with the activity record. This + * field is new in 4.1. + */ + uint32_t correlationId; + +} CUpti_CallbackData; + +/** + * \brief Data passed into a resource callback function. + * + * Data passed into a resource callback function as the \p cbdata + * argument to \ref CUpti_CallbackFunc. The \p cbdata will be this + * type for \p domain equal to CUPTI_CB_DOMAIN_RESOURCE. The callback + * data is valid only within the invocation of the callback function + * that is passed the data. If you need to retain some data for use + * outside of the callback, you must make a copy of that data. + */ +typedef struct { + /** + * For CUPTI_CBID_RESOURCE_CONTEXT_CREATED and + * CUPTI_CBID_RESOURCE_CONTEXT_DESTROY_STARTING, the context being + * created or destroyed. For CUPTI_CBID_RESOURCE_STREAM_CREATED and + * CUPTI_CBID_RESOURCE_STREAM_DESTROY_STARTING, the context + * containing the stream being created or destroyed. + */ + CUcontext context; + + union { + /** + * For CUPTI_CBID_RESOURCE_STREAM_CREATED and + * CUPTI_CBID_RESOURCE_STREAM_DESTROY_STARTING, the stream being + * created or destroyed. + */ + CUstream stream; + } resourceHandle; + + /** + * Reserved for future use. + */ + void *resourceDescriptor; +} CUpti_ResourceData; + + +/** + * \brief Module data passed into a resource callback function. + * + * CUDA module data passed into a resource callback function as the \p cbdata + * argument to \ref CUpti_CallbackFunc. The \p cbdata will be this + * type for \p domain equal to CUPTI_CB_DOMAIN_RESOURCE. The module + * data is valid only within the invocation of the callback function + * that is passed the data. If you need to retain some data for use + * outside of the callback, you must make a copy of that data. + */ + +typedef struct { + /** + * Identifier to associate with the CUDA module. + */ + uint32_t moduleId; + + /** + * The size of the cubin. + */ + size_t cubinSize; + + /** + * Pointer to the associated cubin. + */ + const char *pCubin; +} CUpti_ModuleResourceData; + +/** + * \brief CUDA graphs data passed into a resource callback function. + * + * CUDA graphs data passed into a resource callback function as the \p cbdata + * argument to \ref CUpti_CallbackFunc. The \p cbdata will be this + * type for \p domain equal to CUPTI_CB_DOMAIN_RESOURCE. The graph + * data is valid only within the invocation of the callback function + * that is passed the data. If you need to retain some data for use + * outside of the callback, you must make a copy of that data. + */ + +typedef struct { + /** + * CUDA graph + */ + CUgraph graph; + /** + * The original CUDA graph from which \param graph is cloned + */ + CUgraph originalGraph; + /** + * CUDA graph node + */ + CUgraphNode node; + /** + * The original CUDA graph node from which \param node is cloned + */ + CUgraphNode originalNode; + /** + * Type of the \param node + */ + CUgraphNodeType nodeType; + /** + * The dependent graph node + * The size of the array is \param numDependencies. + */ + CUgraphNode dependency; + /** + * CUDA executable graph + */ + CUgraphExec graphExec; +} CUpti_GraphData; + +/** + * \brief Data passed into a synchronize callback function. + * + * Data passed into a synchronize callback function as the \p cbdata + * argument to \ref CUpti_CallbackFunc. The \p cbdata will be this + * type for \p domain equal to CUPTI_CB_DOMAIN_SYNCHRONIZE. The + * callback data is valid only within the invocation of the callback + * function that is passed the data. If you need to retain some data + * for use outside of the callback, you must make a copy of that data. + */ +typedef struct { + /** + * The context of the stream being synchronized. + */ + CUcontext context; + /** + * The stream being synchronized. + */ + CUstream stream; +} CUpti_SynchronizeData; + +/** + * \brief Data passed into a NVTX callback function. + * + * Data passed into a NVTX callback function as the \p cbdata argument + * to \ref CUpti_CallbackFunc. The \p cbdata will be this type for \p + * domain equal to CUPTI_CB_DOMAIN_NVTX. Unless otherwise notes, the + * callback data is valid only within the invocation of the callback + * function that is passed the data. If you need to retain some data + * for use outside of the callback, you must make a copy of that data. + */ +typedef struct { + /** + * Name of the NVTX API function which issued the callback. This + * string is a global constant and so may be accessed outside of the + * callback. + */ + const char *functionName; + + /** + * Pointer to the arguments passed to the NVTX API call. See + * generated_nvtx_meta.h for structure definitions for the + * parameters for each NVTX API function. + */ + const void *functionParams; + + /** + * Pointer to the return value of the NVTX API call. See + * nvToolsExt.h for each NVTX API function's return value. + */ + const void *functionReturnValue; +} CUpti_NvtxData; + +/** + * \brief An ID for a driver API, runtime API, resource or + * synchronization callback. + * + * An ID for a driver API, runtime API, resource or synchronization + * callback. Within a driver API callback this should be interpreted + * as a CUpti_driver_api_trace_cbid value (these values are defined in + * cupti_driver_cbid.h). Within a runtime API callback this should be + * interpreted as a CUpti_runtime_api_trace_cbid value (these values + * are defined in cupti_runtime_cbid.h). Within a resource API + * callback this should be interpreted as a \ref + * CUpti_CallbackIdResource value. Within a synchronize API callback + * this should be interpreted as a \ref CUpti_CallbackIdSync value. + */ +typedef uint32_t CUpti_CallbackId; + +/** + * \brief Function type for a callback. + * + * Function type for a callback. The type of the data passed to the + * callback in \p cbdata depends on the \p domain. If \p domain is + * CUPTI_CB_DOMAIN_DRIVER_API or CUPTI_CB_DOMAIN_RUNTIME_API the type + * of \p cbdata will be CUpti_CallbackData. If \p domain is + * CUPTI_CB_DOMAIN_RESOURCE the type of \p cbdata will be + * CUpti_ResourceData. If \p domain is CUPTI_CB_DOMAIN_SYNCHRONIZE the + * type of \p cbdata will be CUpti_SynchronizeData. If \p domain is + * CUPTI_CB_DOMAIN_NVTX the type of \p cbdata will be CUpti_NvtxData. + * + * \param userdata User data supplied at subscription of the callback + * \param domain The domain of the callback + * \param cbid The ID of the callback + * \param cbdata Data passed to the callback. + */ +typedef void (CUPTIAPI *CUpti_CallbackFunc)( + void *userdata, + CUpti_CallbackDomain domain, + CUpti_CallbackId cbid, + const void *cbdata); + +/** + * \brief A callback subscriber. + */ +typedef struct CUpti_Subscriber_st *CUpti_SubscriberHandle; + +/** + * \brief Pointer to an array of callback domains. + */ +typedef CUpti_CallbackDomain *CUpti_DomainTable; + +/** + * \brief Get the available callback domains. + * + * Returns in \p *domainTable an array of size \p *domainCount of all + * the available callback domains. + * \note \b Thread-safety: this function is thread safe. + * + * \param domainCount Returns number of callback domains + * \param domainTable Returns pointer to array of available callback domains + * + * \retval CUPTI_SUCCESS on success + * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialize CUPTI + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p domainCount or \p domainTable are NULL + */ +CUptiResult CUPTIAPI cuptiSupportedDomains(size_t *domainCount, + CUpti_DomainTable *domainTable); + +/** + * \brief Initialize a callback subscriber with a callback function + * and user data. + * + * Initializes a callback subscriber with a callback function and + * (optionally) a pointer to user data. The returned subscriber handle + * can be used to enable and disable the callback for specific domains + * and callback IDs. + * \note Only a single subscriber can be registered at a time. To ensure + * that no other CUPTI client interrupts the profiling session, it's the + * responsibility of all the CUPTI clients to call this function before + * starting the profling session. In case profiling session is already + * started by another CUPTI client, this function returns the error code + * CUPTI_ERROR_MULTIPLE_SUBSCRIBERS_NOT_SUPPORTED. + * Note that this function returns the same error when application is + * launched using NVIDIA tools like nvprof, Visual Profiler, Nsight Systems, + * Nsight Compute, cuda-gdb and cuda-memcheck. + * \note This function does not enable any callbacks. + * \note \b Thread-safety: this function is thread safe. + * + * \param subscriber Returns handle to initialize subscriber + * \param callback The callback function + * \param userdata A pointer to user data. This data will be passed to + * the callback function via the \p userdata paramater. + * + * \retval CUPTI_SUCCESS on success + * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialize CUPTI + * \retval CUPTI_ERROR_MULTIPLE_SUBSCRIBERS_NOT_SUPPORTED if there is already a CUPTI subscriber + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p subscriber is NULL + */ +CUptiResult CUPTIAPI cuptiSubscribe(CUpti_SubscriberHandle *subscriber, + CUpti_CallbackFunc callback, + void *userdata); + +/** + * \brief Unregister a callback subscriber. + * + * Removes a callback subscriber so that no future callbacks will be + * issued to that subscriber. + * \note \b Thread-safety: this function is thread safe. + * + * \param subscriber Handle to the initialize subscriber + * + * \retval CUPTI_SUCCESS on success + * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialized CUPTI + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p subscriber is NULL or not initialized + */ +CUptiResult CUPTIAPI cuptiUnsubscribe(CUpti_SubscriberHandle subscriber); + +/** + * \brief Get the current enabled/disabled state of a callback for a specific + * domain and function ID. + * + * Returns non-zero in \p *enable if the callback for a domain and + * callback ID is enabled, and zero if not enabled. + * + * \note \b Thread-safety: a subscriber must serialize access to + * cuptiGetCallbackState, cuptiEnableCallback, cuptiEnableDomain, and + * cuptiEnableAllDomains. For example, if cuptiGetCallbackState(sub, + * d, c) and cuptiEnableCallback(sub, d, c) are called concurrently, + * the results are undefined. + * + * \param enable Returns non-zero if callback enabled, zero if not enabled + * \param subscriber Handle to the initialize subscriber + * \param domain The domain of the callback + * \param cbid The ID of the callback + * + * \retval CUPTI_SUCCESS on success + * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialized CUPTI + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p enabled is NULL, or if \p + * subscriber, \p domain or \p cbid is invalid. + */ +CUptiResult CUPTIAPI cuptiGetCallbackState(uint32_t *enable, + CUpti_SubscriberHandle subscriber, + CUpti_CallbackDomain domain, + CUpti_CallbackId cbid); + +/** + * \brief Enable or disabled callbacks for a specific domain and + * callback ID. + * + * Enable or disabled callbacks for a subscriber for a specific domain + * and callback ID. + * + * \note \b Thread-safety: a subscriber must serialize access to + * cuptiGetCallbackState, cuptiEnableCallback, cuptiEnableDomain, and + * cuptiEnableAllDomains. For example, if cuptiGetCallbackState(sub, + * d, c) and cuptiEnableCallback(sub, d, c) are called concurrently, + * the results are undefined. + * + * \param enable New enable state for the callback. Zero disables the + * callback, non-zero enables the callback. + * \param subscriber - Handle to callback subscription + * \param domain The domain of the callback + * \param cbid The ID of the callback + * + * \retval CUPTI_SUCCESS on success + * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialized CUPTI + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p subscriber, \p domain or \p + * cbid is invalid. + */ +CUptiResult CUPTIAPI cuptiEnableCallback(uint32_t enable, + CUpti_SubscriberHandle subscriber, + CUpti_CallbackDomain domain, + CUpti_CallbackId cbid); + +/** + * \brief Enable or disabled all callbacks for a specific domain. + * + * Enable or disabled all callbacks for a specific domain. + * + * \note \b Thread-safety: a subscriber must serialize access to + * cuptiGetCallbackState, cuptiEnableCallback, cuptiEnableDomain, and + * cuptiEnableAllDomains. For example, if cuptiGetCallbackEnabled(sub, + * d, *) and cuptiEnableDomain(sub, d) are called concurrently, the + * results are undefined. + * + * \param enable New enable state for all callbacks in the + * domain. Zero disables all callbacks, non-zero enables all + * callbacks. + * \param subscriber - Handle to callback subscription + * \param domain The domain of the callback + * + * \retval CUPTI_SUCCESS on success + * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialized CUPTI + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p subscriber or \p domain is invalid + */ +CUptiResult CUPTIAPI cuptiEnableDomain(uint32_t enable, + CUpti_SubscriberHandle subscriber, + CUpti_CallbackDomain domain); + +/** + * \brief Enable or disable all callbacks in all domains. + * + * Enable or disable all callbacks in all domains. + * + * \note \b Thread-safety: a subscriber must serialize access to + * cuptiGetCallbackState, cuptiEnableCallback, cuptiEnableDomain, and + * cuptiEnableAllDomains. For example, if cuptiGetCallbackState(sub, + * d, *) and cuptiEnableAllDomains(sub) are called concurrently, the + * results are undefined. + * + * \param enable New enable state for all callbacks in all + * domain. Zero disables all callbacks, non-zero enables all + * callbacks. + * \param subscriber - Handle to callback subscription + * + * \retval CUPTI_SUCCESS on success + * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialized CUPTI + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p subscriber is invalid + */ +CUptiResult CUPTIAPI cuptiEnableAllDomains(uint32_t enable, + CUpti_SubscriberHandle subscriber); + +/** + * \brief Get the name of a callback for a specific domain and callback ID. + * + * Returns a pointer to the name c_string in \p **name. + * + * \note \b Names are available only for the DRIVER and RUNTIME domains. + * + * \param domain The domain of the callback + * \param cbid The ID of the callback + * \param name Returns pointer to the name string on success, NULL otherwise + * + * \retval CUPTI_SUCCESS on success + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p name is NULL, or if + * \p domain or \p cbid is invalid. + */ +CUptiResult CUPTIAPI cuptiGetCallbackName(CUpti_CallbackDomain domain, + uint32_t cbid, + const char **name); + +/** @} */ /* END CUPTI_CALLBACK_API */ + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility pop +#endif + +#if defined(__cplusplus) +} +#endif + +#endif // file guard + diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_checkpoint.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_checkpoint.h new file mode 100644 index 0000000000000000000000000000000000000000..36eeddc4e2b7bfd1902ce313d71f173db70beaef --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_checkpoint.h @@ -0,0 +1,127 @@ +#pragma once + +#include +#include + +#include +#include + +namespace NV { namespace Cupti { namespace Checkpoint { + +#ifdef __cplusplus +extern "C" +{ +#endif + +/** + * \defgroup CUPTI_CHECKPOINT_API CUPTI Checkpoint API + * Functions, types, and enums that implement the CUPTI Checkpoint API. + * @{ + */ + +/** + * \brief Specifies optimization options for a checkpoint, may be OR'd together to specify multiple options. + */ +typedef enum +{ + CUPTI_CHECKPOINT_OPT_NONE = 0, //!< Default behavior + CUPTI_CHECKPOINT_OPT_TRANSFER = 1, //!< Determine which mem blocks have changed, and only restore those. This optimization is cached, which means cuptiCheckpointRestore must always be called at the same point in the application when this option is enabled, or the result may be incorrect. +} CUpti_CheckpointOptimizations; + +/** + * \brief Configuration and handle for a CUPTI Checkpoint + * + * A CUptiCheckpoint object should be initialized with desired options prior to passing into any + * CUPTI Checkpoint API function. The first call into a Checkpoint API function will initialize internal + * state based on these options. Subsequent changes to these options will not have any effect. + * + * Checkpoint data is saved in device, host, and filesystem space. There are options to reserve memory + * at each level (device, host, filesystem) which are intended to allow a guarantee that a certain amount + * of memory will remain free for use after the checkpoint is saved. + * Note, however, that falling back to slower levels of memory (host, and then filesystem) to save the checkpoint + * will result in performance degradation. + * Currently, the filesystem limitation is not implemented. Note that falling back to filesystem storage may + * significantly impact the performance for saving and restoring a checkpoint. + */ +typedef struct +{ + size_t structSize; //!< [in] Must be set to CUpti_Checkpoint_STRUCT_SIZE + + CUcontext ctx; //!< [in] Set to context to save from, or will use current context if NULL + + size_t reserveDeviceMB; //!< [in] Restrict checkpoint from using last N MB of device memory (-1 = use no device memory) + size_t reserveHostMB; //!< [in] Restrict checkpoint from using last N MB of host memory (-1 = use no host memory) + uint8_t allowOverwrite; //!< [in] Boolean, Allow checkpoint to save over existing checkpoint + uint8_t optimizations; //!< [in] Mask of CUpti_CheckpointOptimizations flags for this checkpoint + + void * pPriv; //!< [in] Assign to NULL +} CUpti_Checkpoint; + +#define CUpti_Checkpoint_STRUCT_SIZE \ +(offsetof(CUpti_Checkpoint, pPriv) + \ +sizeof(((CUpti_Checkpoint*)(nullptr))->pPriv)) + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility push(default) +#endif + +/** + * \brief Initialize and save a checkpoint of the device state associated with the handle context + * + * Uses the handle options to configure and save a checkpoint of the device state associated with the specified context. + * + * \param handle A pointer to a CUpti_Checkpoint object + * + * \retval CUPTI_SUCCESS if a checkpoint was successfully initialized and saved + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p handle does not appear to refer to a valid CUpti_Checkpoint + * \retval CUPTI_ERROR_INVALID_CONTEXT + * \retval CUPTI_ERROR_INVALID_DEVICE if device associated with context is not compatible with checkpoint API + * \retval CUPTI_ERROR_INVALID_OPERATION if Save is requested over an existing checkpoint, but \p allowOverwrite was not originally specified + * \retval CUPTI_ERROR_OUT_OF_MEMORY if as configured, not enough backing storage space to save the checkpoint + */ +CUptiResult cuptiCheckpointSave(CUpti_Checkpoint * const handle); + +/** + * \brief Restore a checkpoint to the device associated with its context + * + * Restores device, pinned, and allocated memory to the state when the checkpoint was saved + * + * \param handle A pointer to a previously saved CUpti_Checkpoint object + * + * \retval CUTPI_SUCCESS if the checkpoint was successfully restored + * \retval CUPTI_ERROR_NOT_INITIALIZED if the checkpoint was not previously initialized + * \retval CUPTI_ERROR_INVALID_CONTEXT + * \retval CUPTI_ERROR_INVALID_PARAMETER if the handle appears invalid + * \retval CUPTI_ERROR_UNKNOWN if the restore or optimization operation fails + */ +CUptiResult cuptiCheckpointRestore(CUpti_Checkpoint * const handle); + +/** + * \brief Free the backing data for a checkpoint + * + * Frees all associated device, host memory and filesystem storage used for this context. + * After freeing a handle, it may be re-used as if it was new - options may be re-configured and will + * take effect on the next call to \p cuptiCheckpointSave. + * + * \param handle A pointer to a previously saved CUpti_Checkpoint object + * + * \retval CUPTI_SUCCESS if the handle was successfully freed + * \retval CUPTI_ERROR_INVALID_PARAMETER if the handle was already freed or appears invalid + * \retval CUPTI_ERROR_INVALID_CONTEXT if the context is no longer valid + */ +CUptiResult cuptiCheckpointFree(CUpti_Checkpoint * const handle); + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility pop +#endif + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +// Exit namespace NV::Cupti::Checkpoint +}}} diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_driver_cbid.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_driver_cbid.h new file mode 100644 index 0000000000000000000000000000000000000000..259e46f79edebc9902bbe8aa197af8f033033052 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_driver_cbid.h @@ -0,0 +1,725 @@ + +// ************************************************************************* +// Definitions of indices for API functions, unique across entire API +// ************************************************************************* + +// This file is generated. Any changes you make will be lost during the next clean build. +// CUDA public interface, for type definitions and cu* function prototypes + +typedef enum CUpti_driver_api_trace_cbid_enum { + CUPTI_DRIVER_TRACE_CBID_INVALID = 0, + CUPTI_DRIVER_TRACE_CBID_cuInit = 1, + CUPTI_DRIVER_TRACE_CBID_cuDriverGetVersion = 2, + CUPTI_DRIVER_TRACE_CBID_cuDeviceGet = 3, + CUPTI_DRIVER_TRACE_CBID_cuDeviceGetCount = 4, + CUPTI_DRIVER_TRACE_CBID_cuDeviceGetName = 5, + CUPTI_DRIVER_TRACE_CBID_cuDeviceComputeCapability = 6, + CUPTI_DRIVER_TRACE_CBID_cuDeviceTotalMem = 7, + CUPTI_DRIVER_TRACE_CBID_cuDeviceGetProperties = 8, + CUPTI_DRIVER_TRACE_CBID_cuDeviceGetAttribute = 9, + CUPTI_DRIVER_TRACE_CBID_cuCtxCreate = 10, + CUPTI_DRIVER_TRACE_CBID_cuCtxDestroy = 11, + CUPTI_DRIVER_TRACE_CBID_cuCtxAttach = 12, + CUPTI_DRIVER_TRACE_CBID_cuCtxDetach = 13, + CUPTI_DRIVER_TRACE_CBID_cuCtxPushCurrent = 14, + CUPTI_DRIVER_TRACE_CBID_cuCtxPopCurrent = 15, + CUPTI_DRIVER_TRACE_CBID_cuCtxGetDevice = 16, + CUPTI_DRIVER_TRACE_CBID_cuCtxSynchronize = 17, + CUPTI_DRIVER_TRACE_CBID_cuModuleLoad = 18, + CUPTI_DRIVER_TRACE_CBID_cuModuleLoadData = 19, + CUPTI_DRIVER_TRACE_CBID_cuModuleLoadDataEx = 20, + CUPTI_DRIVER_TRACE_CBID_cuModuleLoadFatBinary = 21, + CUPTI_DRIVER_TRACE_CBID_cuModuleUnload = 22, + CUPTI_DRIVER_TRACE_CBID_cuModuleGetFunction = 23, + CUPTI_DRIVER_TRACE_CBID_cuModuleGetGlobal = 24, + CUPTI_DRIVER_TRACE_CBID_cu64ModuleGetGlobal = 25, + CUPTI_DRIVER_TRACE_CBID_cuModuleGetTexRef = 26, + CUPTI_DRIVER_TRACE_CBID_cuMemGetInfo = 27, + CUPTI_DRIVER_TRACE_CBID_cu64MemGetInfo = 28, + CUPTI_DRIVER_TRACE_CBID_cuMemAlloc = 29, + CUPTI_DRIVER_TRACE_CBID_cu64MemAlloc = 30, + CUPTI_DRIVER_TRACE_CBID_cuMemAllocPitch = 31, + CUPTI_DRIVER_TRACE_CBID_cu64MemAllocPitch = 32, + CUPTI_DRIVER_TRACE_CBID_cuMemFree = 33, + CUPTI_DRIVER_TRACE_CBID_cu64MemFree = 34, + CUPTI_DRIVER_TRACE_CBID_cuMemGetAddressRange = 35, + CUPTI_DRIVER_TRACE_CBID_cu64MemGetAddressRange = 36, + CUPTI_DRIVER_TRACE_CBID_cuMemAllocHost = 37, + CUPTI_DRIVER_TRACE_CBID_cuMemFreeHost = 38, + CUPTI_DRIVER_TRACE_CBID_cuMemHostAlloc = 39, + CUPTI_DRIVER_TRACE_CBID_cuMemHostGetDevicePointer = 40, + CUPTI_DRIVER_TRACE_CBID_cu64MemHostGetDevicePointer = 41, + CUPTI_DRIVER_TRACE_CBID_cuMemHostGetFlags = 42, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoD = 43, + CUPTI_DRIVER_TRACE_CBID_cu64MemcpyHtoD = 44, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoH = 45, + CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoH = 46, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoD = 47, + CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoD = 48, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoA = 49, + CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoA = 50, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoD = 51, + CUPTI_DRIVER_TRACE_CBID_cu64MemcpyAtoD = 52, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoA = 53, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoH = 54, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoA = 55, + CUPTI_DRIVER_TRACE_CBID_cuMemcpy2D = 56, + CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DUnaligned = 57, + CUPTI_DRIVER_TRACE_CBID_cuMemcpy3D = 58, + CUPTI_DRIVER_TRACE_CBID_cu64Memcpy3D = 59, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoDAsync = 60, + CUPTI_DRIVER_TRACE_CBID_cu64MemcpyHtoDAsync = 61, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoHAsync = 62, + CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoHAsync = 63, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoDAsync = 64, + CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoDAsync = 65, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoAAsync = 66, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoHAsync = 67, + CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DAsync = 68, + CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DAsync = 69, + CUPTI_DRIVER_TRACE_CBID_cu64Memcpy3DAsync = 70, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD8 = 71, + CUPTI_DRIVER_TRACE_CBID_cu64MemsetD8 = 72, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD16 = 73, + CUPTI_DRIVER_TRACE_CBID_cu64MemsetD16 = 74, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD32 = 75, + CUPTI_DRIVER_TRACE_CBID_cu64MemsetD32 = 76, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8 = 77, + CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D8 = 78, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16 = 79, + CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D16 = 80, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32 = 81, + CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D32 = 82, + CUPTI_DRIVER_TRACE_CBID_cuFuncSetBlockShape = 83, + CUPTI_DRIVER_TRACE_CBID_cuFuncSetSharedSize = 84, + CUPTI_DRIVER_TRACE_CBID_cuFuncGetAttribute = 85, + CUPTI_DRIVER_TRACE_CBID_cuFuncSetCacheConfig = 86, + CUPTI_DRIVER_TRACE_CBID_cuArrayCreate = 87, + CUPTI_DRIVER_TRACE_CBID_cuArrayGetDescriptor = 88, + CUPTI_DRIVER_TRACE_CBID_cuArrayDestroy = 89, + CUPTI_DRIVER_TRACE_CBID_cuArray3DCreate = 90, + CUPTI_DRIVER_TRACE_CBID_cuArray3DGetDescriptor = 91, + CUPTI_DRIVER_TRACE_CBID_cuTexRefCreate = 92, + CUPTI_DRIVER_TRACE_CBID_cuTexRefDestroy = 93, + CUPTI_DRIVER_TRACE_CBID_cuTexRefSetArray = 94, + CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress = 95, + CUPTI_DRIVER_TRACE_CBID_cu64TexRefSetAddress = 96, + CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress2D = 97, + CUPTI_DRIVER_TRACE_CBID_cu64TexRefSetAddress2D = 98, + CUPTI_DRIVER_TRACE_CBID_cuTexRefSetFormat = 99, + CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddressMode = 100, + CUPTI_DRIVER_TRACE_CBID_cuTexRefSetFilterMode = 101, + CUPTI_DRIVER_TRACE_CBID_cuTexRefSetFlags = 102, + CUPTI_DRIVER_TRACE_CBID_cuTexRefGetAddress = 103, + CUPTI_DRIVER_TRACE_CBID_cu64TexRefGetAddress = 104, + CUPTI_DRIVER_TRACE_CBID_cuTexRefGetArray = 105, + CUPTI_DRIVER_TRACE_CBID_cuTexRefGetAddressMode = 106, + CUPTI_DRIVER_TRACE_CBID_cuTexRefGetFilterMode = 107, + CUPTI_DRIVER_TRACE_CBID_cuTexRefGetFormat = 108, + CUPTI_DRIVER_TRACE_CBID_cuTexRefGetFlags = 109, + CUPTI_DRIVER_TRACE_CBID_cuParamSetSize = 110, + CUPTI_DRIVER_TRACE_CBID_cuParamSeti = 111, + CUPTI_DRIVER_TRACE_CBID_cuParamSetf = 112, + CUPTI_DRIVER_TRACE_CBID_cuParamSetv = 113, + CUPTI_DRIVER_TRACE_CBID_cuParamSetTexRef = 114, + CUPTI_DRIVER_TRACE_CBID_cuLaunch = 115, + CUPTI_DRIVER_TRACE_CBID_cuLaunchGrid = 116, + CUPTI_DRIVER_TRACE_CBID_cuLaunchGridAsync = 117, + CUPTI_DRIVER_TRACE_CBID_cuEventCreate = 118, + CUPTI_DRIVER_TRACE_CBID_cuEventRecord = 119, + CUPTI_DRIVER_TRACE_CBID_cuEventQuery = 120, + CUPTI_DRIVER_TRACE_CBID_cuEventSynchronize = 121, + CUPTI_DRIVER_TRACE_CBID_cuEventDestroy = 122, + CUPTI_DRIVER_TRACE_CBID_cuEventElapsedTime = 123, + CUPTI_DRIVER_TRACE_CBID_cuStreamCreate = 124, + CUPTI_DRIVER_TRACE_CBID_cuStreamQuery = 125, + CUPTI_DRIVER_TRACE_CBID_cuStreamSynchronize = 126, + CUPTI_DRIVER_TRACE_CBID_cuStreamDestroy = 127, + CUPTI_DRIVER_TRACE_CBID_cuGraphicsUnregisterResource = 128, + CUPTI_DRIVER_TRACE_CBID_cuGraphicsSubResourceGetMappedArray = 129, + CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedPointer = 130, + CUPTI_DRIVER_TRACE_CBID_cu64GraphicsResourceGetMappedPointer = 131, + CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceSetMapFlags = 132, + CUPTI_DRIVER_TRACE_CBID_cuGraphicsMapResources = 133, + CUPTI_DRIVER_TRACE_CBID_cuGraphicsUnmapResources = 134, + CUPTI_DRIVER_TRACE_CBID_cuGetExportTable = 135, + CUPTI_DRIVER_TRACE_CBID_cuCtxSetLimit = 136, + CUPTI_DRIVER_TRACE_CBID_cuCtxGetLimit = 137, + CUPTI_DRIVER_TRACE_CBID_cuD3D10GetDevice = 138, + CUPTI_DRIVER_TRACE_CBID_cuD3D10CtxCreate = 139, + CUPTI_DRIVER_TRACE_CBID_cuGraphicsD3D10RegisterResource = 140, + CUPTI_DRIVER_TRACE_CBID_cuD3D10RegisterResource = 141, + CUPTI_DRIVER_TRACE_CBID_cuD3D10UnregisterResource = 142, + CUPTI_DRIVER_TRACE_CBID_cuD3D10MapResources = 143, + CUPTI_DRIVER_TRACE_CBID_cuD3D10UnmapResources = 144, + CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceSetMapFlags = 145, + CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedArray = 146, + CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPointer = 147, + CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedSize = 148, + CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPitch = 149, + CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetSurfaceDimensions = 150, + CUPTI_DRIVER_TRACE_CBID_cuD3D11GetDevice = 151, + CUPTI_DRIVER_TRACE_CBID_cuD3D11CtxCreate = 152, + CUPTI_DRIVER_TRACE_CBID_cuGraphicsD3D11RegisterResource = 153, + CUPTI_DRIVER_TRACE_CBID_cuD3D9GetDevice = 154, + CUPTI_DRIVER_TRACE_CBID_cuD3D9CtxCreate = 155, + CUPTI_DRIVER_TRACE_CBID_cuGraphicsD3D9RegisterResource = 156, + CUPTI_DRIVER_TRACE_CBID_cuD3D9GetDirect3DDevice = 157, + CUPTI_DRIVER_TRACE_CBID_cuD3D9RegisterResource = 158, + CUPTI_DRIVER_TRACE_CBID_cuD3D9UnregisterResource = 159, + CUPTI_DRIVER_TRACE_CBID_cuD3D9MapResources = 160, + CUPTI_DRIVER_TRACE_CBID_cuD3D9UnmapResources = 161, + CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceSetMapFlags = 162, + CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetSurfaceDimensions = 163, + CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedArray = 164, + CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPointer = 165, + CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedSize = 166, + CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPitch = 167, + CUPTI_DRIVER_TRACE_CBID_cuD3D9Begin = 168, + CUPTI_DRIVER_TRACE_CBID_cuD3D9End = 169, + CUPTI_DRIVER_TRACE_CBID_cuD3D9RegisterVertexBuffer = 170, + CUPTI_DRIVER_TRACE_CBID_cuD3D9MapVertexBuffer = 171, + CUPTI_DRIVER_TRACE_CBID_cuD3D9UnmapVertexBuffer = 172, + CUPTI_DRIVER_TRACE_CBID_cuD3D9UnregisterVertexBuffer = 173, + CUPTI_DRIVER_TRACE_CBID_cuGLCtxCreate = 174, + CUPTI_DRIVER_TRACE_CBID_cuGraphicsGLRegisterBuffer = 175, + CUPTI_DRIVER_TRACE_CBID_cuGraphicsGLRegisterImage = 176, + CUPTI_DRIVER_TRACE_CBID_cuWGLGetDevice = 177, + CUPTI_DRIVER_TRACE_CBID_cuGLInit = 178, + CUPTI_DRIVER_TRACE_CBID_cuGLRegisterBufferObject = 179, + CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObject = 180, + CUPTI_DRIVER_TRACE_CBID_cuGLUnmapBufferObject = 181, + CUPTI_DRIVER_TRACE_CBID_cuGLUnregisterBufferObject = 182, + CUPTI_DRIVER_TRACE_CBID_cuGLSetBufferObjectMapFlags = 183, + CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObjectAsync = 184, + CUPTI_DRIVER_TRACE_CBID_cuGLUnmapBufferObjectAsync = 185, + CUPTI_DRIVER_TRACE_CBID_cuVDPAUGetDevice = 186, + CUPTI_DRIVER_TRACE_CBID_cuVDPAUCtxCreate = 187, + CUPTI_DRIVER_TRACE_CBID_cuGraphicsVDPAURegisterVideoSurface = 188, + CUPTI_DRIVER_TRACE_CBID_cuGraphicsVDPAURegisterOutputSurface = 189, + CUPTI_DRIVER_TRACE_CBID_cuModuleGetSurfRef = 190, + CUPTI_DRIVER_TRACE_CBID_cuSurfRefCreate = 191, + CUPTI_DRIVER_TRACE_CBID_cuSurfRefDestroy = 192, + CUPTI_DRIVER_TRACE_CBID_cuSurfRefSetFormat = 193, + CUPTI_DRIVER_TRACE_CBID_cuSurfRefSetArray = 194, + CUPTI_DRIVER_TRACE_CBID_cuSurfRefGetFormat = 195, + CUPTI_DRIVER_TRACE_CBID_cuSurfRefGetArray = 196, + CUPTI_DRIVER_TRACE_CBID_cu64DeviceTotalMem = 197, + CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetMappedPointer = 198, + CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetMappedSize = 199, + CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetMappedPitch = 200, + CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetSurfaceDimensions = 201, + CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetSurfaceDimensions = 202, + CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetMappedPointer = 203, + CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetMappedSize = 204, + CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetMappedPitch = 205, + CUPTI_DRIVER_TRACE_CBID_cu64D3D9MapVertexBuffer = 206, + CUPTI_DRIVER_TRACE_CBID_cu64GLMapBufferObject = 207, + CUPTI_DRIVER_TRACE_CBID_cu64GLMapBufferObjectAsync = 208, + CUPTI_DRIVER_TRACE_CBID_cuD3D11GetDevices = 209, + CUPTI_DRIVER_TRACE_CBID_cuD3D11CtxCreateOnDevice = 210, + CUPTI_DRIVER_TRACE_CBID_cuD3D10GetDevices = 211, + CUPTI_DRIVER_TRACE_CBID_cuD3D10CtxCreateOnDevice = 212, + CUPTI_DRIVER_TRACE_CBID_cuD3D9GetDevices = 213, + CUPTI_DRIVER_TRACE_CBID_cuD3D9CtxCreateOnDevice = 214, + CUPTI_DRIVER_TRACE_CBID_cu64MemHostAlloc = 215, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD8Async = 216, + CUPTI_DRIVER_TRACE_CBID_cu64MemsetD8Async = 217, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD16Async = 218, + CUPTI_DRIVER_TRACE_CBID_cu64MemsetD16Async = 219, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD32Async = 220, + CUPTI_DRIVER_TRACE_CBID_cu64MemsetD32Async = 221, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8Async = 222, + CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D8Async = 223, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16Async = 224, + CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D16Async = 225, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32Async = 226, + CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D32Async = 227, + CUPTI_DRIVER_TRACE_CBID_cu64ArrayCreate = 228, + CUPTI_DRIVER_TRACE_CBID_cu64ArrayGetDescriptor = 229, + CUPTI_DRIVER_TRACE_CBID_cu64Array3DCreate = 230, + CUPTI_DRIVER_TRACE_CBID_cu64Array3DGetDescriptor = 231, + CUPTI_DRIVER_TRACE_CBID_cu64Memcpy2D = 232, + CUPTI_DRIVER_TRACE_CBID_cu64Memcpy2DUnaligned = 233, + CUPTI_DRIVER_TRACE_CBID_cu64Memcpy2DAsync = 234, + CUPTI_DRIVER_TRACE_CBID_cuCtxCreate_v2 = 235, + CUPTI_DRIVER_TRACE_CBID_cuD3D10CtxCreate_v2 = 236, + CUPTI_DRIVER_TRACE_CBID_cuD3D11CtxCreate_v2 = 237, + CUPTI_DRIVER_TRACE_CBID_cuD3D9CtxCreate_v2 = 238, + CUPTI_DRIVER_TRACE_CBID_cuGLCtxCreate_v2 = 239, + CUPTI_DRIVER_TRACE_CBID_cuVDPAUCtxCreate_v2 = 240, + CUPTI_DRIVER_TRACE_CBID_cuModuleGetGlobal_v2 = 241, + CUPTI_DRIVER_TRACE_CBID_cuMemGetInfo_v2 = 242, + CUPTI_DRIVER_TRACE_CBID_cuMemAlloc_v2 = 243, + CUPTI_DRIVER_TRACE_CBID_cuMemAllocPitch_v2 = 244, + CUPTI_DRIVER_TRACE_CBID_cuMemFree_v2 = 245, + CUPTI_DRIVER_TRACE_CBID_cuMemGetAddressRange_v2 = 246, + CUPTI_DRIVER_TRACE_CBID_cuMemHostGetDevicePointer_v2 = 247, + CUPTI_DRIVER_TRACE_CBID_cuMemcpy_v2 = 248, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD8_v2 = 249, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD16_v2 = 250, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD32_v2 = 251, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8_v2 = 252, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16_v2 = 253, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32_v2 = 254, + CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress_v2 = 255, + CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress2D_v2 = 256, + CUPTI_DRIVER_TRACE_CBID_cuTexRefGetAddress_v2 = 257, + CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedPointer_v2 = 258, + CUPTI_DRIVER_TRACE_CBID_cuDeviceTotalMem_v2 = 259, + CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPointer_v2 = 260, + CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedSize_v2 = 261, + CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPitch_v2 = 262, + CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetSurfaceDimensions_v2 = 263, + CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetSurfaceDimensions_v2 = 264, + CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPointer_v2 = 265, + CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedSize_v2 = 266, + CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPitch_v2 = 267, + CUPTI_DRIVER_TRACE_CBID_cuD3D9MapVertexBuffer_v2 = 268, + CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObject_v2 = 269, + CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObjectAsync_v2 = 270, + CUPTI_DRIVER_TRACE_CBID_cuMemHostAlloc_v2 = 271, + CUPTI_DRIVER_TRACE_CBID_cuArrayCreate_v2 = 272, + CUPTI_DRIVER_TRACE_CBID_cuArrayGetDescriptor_v2 = 273, + CUPTI_DRIVER_TRACE_CBID_cuArray3DCreate_v2 = 274, + CUPTI_DRIVER_TRACE_CBID_cuArray3DGetDescriptor_v2 = 275, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoD_v2 = 276, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoDAsync_v2 = 277, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoH_v2 = 278, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoHAsync_v2 = 279, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoD_v2 = 280, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoDAsync_v2 = 281, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoH_v2 = 282, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoHAsync_v2 = 283, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoD_v2 = 284, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoA_v2 = 285, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoA_v2 = 286, + CUPTI_DRIVER_TRACE_CBID_cuMemcpy2D_v2 = 287, + CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DUnaligned_v2 = 288, + CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DAsync_v2 = 289, + CUPTI_DRIVER_TRACE_CBID_cuMemcpy3D_v2 = 290, + CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DAsync_v2 = 291, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoA_v2 = 292, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoAAsync_v2 = 293, + CUPTI_DRIVER_TRACE_CBID_cuMemAllocHost_v2 = 294, + CUPTI_DRIVER_TRACE_CBID_cuStreamWaitEvent = 295, + CUPTI_DRIVER_TRACE_CBID_cuCtxGetApiVersion = 296, + CUPTI_DRIVER_TRACE_CBID_cuD3D10GetDirect3DDevice = 297, + CUPTI_DRIVER_TRACE_CBID_cuD3D11GetDirect3DDevice = 298, + CUPTI_DRIVER_TRACE_CBID_cuCtxGetCacheConfig = 299, + CUPTI_DRIVER_TRACE_CBID_cuCtxSetCacheConfig = 300, + CUPTI_DRIVER_TRACE_CBID_cuMemHostRegister = 301, + CUPTI_DRIVER_TRACE_CBID_cuMemHostUnregister = 302, + CUPTI_DRIVER_TRACE_CBID_cuCtxSetCurrent = 303, + CUPTI_DRIVER_TRACE_CBID_cuCtxGetCurrent = 304, + CUPTI_DRIVER_TRACE_CBID_cuMemcpy = 305, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyAsync = 306, + CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel = 307, + CUPTI_DRIVER_TRACE_CBID_cuProfilerStart = 308, + CUPTI_DRIVER_TRACE_CBID_cuProfilerStop = 309, + CUPTI_DRIVER_TRACE_CBID_cuPointerGetAttribute = 310, + CUPTI_DRIVER_TRACE_CBID_cuProfilerInitialize = 311, + CUPTI_DRIVER_TRACE_CBID_cuDeviceCanAccessPeer = 312, + CUPTI_DRIVER_TRACE_CBID_cuCtxEnablePeerAccess = 313, + CUPTI_DRIVER_TRACE_CBID_cuCtxDisablePeerAccess = 314, + CUPTI_DRIVER_TRACE_CBID_cuMemPeerRegister = 315, + CUPTI_DRIVER_TRACE_CBID_cuMemPeerUnregister = 316, + CUPTI_DRIVER_TRACE_CBID_cuMemPeerGetDevicePointer = 317, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeer = 318, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeerAsync = 319, + CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeer = 320, + CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeerAsync = 321, + CUPTI_DRIVER_TRACE_CBID_cuCtxDestroy_v2 = 322, + CUPTI_DRIVER_TRACE_CBID_cuCtxPushCurrent_v2 = 323, + CUPTI_DRIVER_TRACE_CBID_cuCtxPopCurrent_v2 = 324, + CUPTI_DRIVER_TRACE_CBID_cuEventDestroy_v2 = 325, + CUPTI_DRIVER_TRACE_CBID_cuStreamDestroy_v2 = 326, + CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress2D_v3 = 327, + CUPTI_DRIVER_TRACE_CBID_cuIpcGetMemHandle = 328, + CUPTI_DRIVER_TRACE_CBID_cuIpcOpenMemHandle = 329, + CUPTI_DRIVER_TRACE_CBID_cuIpcCloseMemHandle = 330, + CUPTI_DRIVER_TRACE_CBID_cuDeviceGetByPCIBusId = 331, + CUPTI_DRIVER_TRACE_CBID_cuDeviceGetPCIBusId = 332, + CUPTI_DRIVER_TRACE_CBID_cuGLGetDevices = 333, + CUPTI_DRIVER_TRACE_CBID_cuIpcGetEventHandle = 334, + CUPTI_DRIVER_TRACE_CBID_cuIpcOpenEventHandle = 335, + CUPTI_DRIVER_TRACE_CBID_cuCtxSetSharedMemConfig = 336, + CUPTI_DRIVER_TRACE_CBID_cuCtxGetSharedMemConfig = 337, + CUPTI_DRIVER_TRACE_CBID_cuFuncSetSharedMemConfig = 338, + CUPTI_DRIVER_TRACE_CBID_cuTexObjectCreate = 339, + CUPTI_DRIVER_TRACE_CBID_cuTexObjectDestroy = 340, + CUPTI_DRIVER_TRACE_CBID_cuTexObjectGetResourceDesc = 341, + CUPTI_DRIVER_TRACE_CBID_cuTexObjectGetTextureDesc = 342, + CUPTI_DRIVER_TRACE_CBID_cuSurfObjectCreate = 343, + CUPTI_DRIVER_TRACE_CBID_cuSurfObjectDestroy = 344, + CUPTI_DRIVER_TRACE_CBID_cuSurfObjectGetResourceDesc = 345, + CUPTI_DRIVER_TRACE_CBID_cuStreamAddCallback = 346, + CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayCreate = 347, + CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayGetLevel = 348, + CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayDestroy = 349, + CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmappedArray = 350, + CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmapFilterMode = 351, + CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmapLevelBias = 352, + CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmapLevelClamp = 353, + CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMaxAnisotropy = 354, + CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmappedArray = 355, + CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmapFilterMode = 356, + CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmapLevelBias = 357, + CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmapLevelClamp = 358, + CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMaxAnisotropy = 359, + CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedMipmappedArray = 360, + CUPTI_DRIVER_TRACE_CBID_cuTexObjectGetResourceViewDesc = 361, + CUPTI_DRIVER_TRACE_CBID_cuLinkCreate = 362, + CUPTI_DRIVER_TRACE_CBID_cuLinkAddData = 363, + CUPTI_DRIVER_TRACE_CBID_cuLinkAddFile = 364, + CUPTI_DRIVER_TRACE_CBID_cuLinkComplete = 365, + CUPTI_DRIVER_TRACE_CBID_cuLinkDestroy = 366, + CUPTI_DRIVER_TRACE_CBID_cuStreamCreateWithPriority = 367, + CUPTI_DRIVER_TRACE_CBID_cuStreamGetPriority = 368, + CUPTI_DRIVER_TRACE_CBID_cuStreamGetFlags = 369, + CUPTI_DRIVER_TRACE_CBID_cuCtxGetStreamPriorityRange = 370, + CUPTI_DRIVER_TRACE_CBID_cuMemAllocManaged = 371, + CUPTI_DRIVER_TRACE_CBID_cuGetErrorString = 372, + CUPTI_DRIVER_TRACE_CBID_cuGetErrorName = 373, + CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxActiveBlocksPerMultiprocessor = 374, + CUPTI_DRIVER_TRACE_CBID_cuCompilePtx = 375, + CUPTI_DRIVER_TRACE_CBID_cuBinaryFree = 376, + CUPTI_DRIVER_TRACE_CBID_cuStreamAttachMemAsync = 377, + CUPTI_DRIVER_TRACE_CBID_cuPointerSetAttribute = 378, + CUPTI_DRIVER_TRACE_CBID_cuMemHostRegister_v2 = 379, + CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceSetMapFlags_v2 = 380, + CUPTI_DRIVER_TRACE_CBID_cuLinkCreate_v2 = 381, + CUPTI_DRIVER_TRACE_CBID_cuLinkAddData_v2 = 382, + CUPTI_DRIVER_TRACE_CBID_cuLinkAddFile_v2 = 383, + CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxPotentialBlockSize = 384, + CUPTI_DRIVER_TRACE_CBID_cuGLGetDevices_v2 = 385, + CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxRetain = 386, + CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxRelease = 387, + CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxSetFlags = 388, + CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxReset = 389, + CUPTI_DRIVER_TRACE_CBID_cuGraphicsEGLRegisterImage = 390, + CUPTI_DRIVER_TRACE_CBID_cuCtxGetFlags = 391, + CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxGetState = 392, + CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerConnect = 393, + CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerDisconnect = 394, + CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerAcquireFrame = 395, + CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerReleaseFrame = 396, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoD_v2_ptds = 397, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoH_v2_ptds = 398, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoD_v2_ptds = 399, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoA_v2_ptds = 400, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoD_v2_ptds = 401, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoA_v2_ptds = 402, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoH_v2_ptds = 403, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoA_v2_ptds = 404, + CUPTI_DRIVER_TRACE_CBID_cuMemcpy2D_v2_ptds = 405, + CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DUnaligned_v2_ptds = 406, + CUPTI_DRIVER_TRACE_CBID_cuMemcpy3D_v2_ptds = 407, + CUPTI_DRIVER_TRACE_CBID_cuMemcpy_ptds = 408, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeer_ptds = 409, + CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeer_ptds = 410, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD8_v2_ptds = 411, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD16_v2_ptds = 412, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD32_v2_ptds = 413, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8_v2_ptds = 414, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16_v2_ptds = 415, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32_v2_ptds = 416, + CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObject_v2_ptds = 417, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyAsync_ptsz = 418, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoAAsync_v2_ptsz = 419, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoHAsync_v2_ptsz = 420, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoDAsync_v2_ptsz = 421, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoHAsync_v2_ptsz = 422, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoDAsync_v2_ptsz = 423, + CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DAsync_v2_ptsz = 424, + CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DAsync_v2_ptsz = 425, + CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeerAsync_ptsz = 426, + CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeerAsync_ptsz = 427, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD8Async_ptsz = 428, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD16Async_ptsz = 429, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD32Async_ptsz = 430, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8Async_ptsz = 431, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16Async_ptsz = 432, + CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32Async_ptsz = 433, + CUPTI_DRIVER_TRACE_CBID_cuStreamGetPriority_ptsz = 434, + CUPTI_DRIVER_TRACE_CBID_cuStreamGetFlags_ptsz = 435, + CUPTI_DRIVER_TRACE_CBID_cuStreamWaitEvent_ptsz = 436, + CUPTI_DRIVER_TRACE_CBID_cuStreamAddCallback_ptsz = 437, + CUPTI_DRIVER_TRACE_CBID_cuStreamAttachMemAsync_ptsz = 438, + CUPTI_DRIVER_TRACE_CBID_cuStreamQuery_ptsz = 439, + CUPTI_DRIVER_TRACE_CBID_cuStreamSynchronize_ptsz = 440, + CUPTI_DRIVER_TRACE_CBID_cuEventRecord_ptsz = 441, + CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel_ptsz = 442, + CUPTI_DRIVER_TRACE_CBID_cuGraphicsMapResources_ptsz = 443, + CUPTI_DRIVER_TRACE_CBID_cuGraphicsUnmapResources_ptsz = 444, + CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObjectAsync_v2_ptsz = 445, + CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerConnect = 446, + CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerDisconnect = 447, + CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerPresentFrame = 448, + CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedEglFrame = 449, + CUPTI_DRIVER_TRACE_CBID_cuPointerGetAttributes = 450, + CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags = 451, + CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxPotentialBlockSizeWithFlags = 452, + CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerReturnFrame = 453, + CUPTI_DRIVER_TRACE_CBID_cuDeviceGetP2PAttribute = 454, + CUPTI_DRIVER_TRACE_CBID_cuTexRefSetBorderColor = 455, + CUPTI_DRIVER_TRACE_CBID_cuTexRefGetBorderColor = 456, + CUPTI_DRIVER_TRACE_CBID_cuMemAdvise = 457, + CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32 = 458, + CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32_ptsz = 459, + CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32 = 460, + CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32_ptsz = 461, + CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp = 462, + CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp_ptsz = 463, + CUPTI_DRIVER_TRACE_CBID_cuNVNbufferGetPointer = 464, + CUPTI_DRIVER_TRACE_CBID_cuNVNtextureGetArray = 465, + CUPTI_DRIVER_TRACE_CBID_cuNNSetAllocator = 466, + CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync = 467, + CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync_ptsz = 468, + CUPTI_DRIVER_TRACE_CBID_cuEventCreateFromNVNSync = 469, + CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerConnectWithFlags = 470, + CUPTI_DRIVER_TRACE_CBID_cuMemRangeGetAttribute = 471, + CUPTI_DRIVER_TRACE_CBID_cuMemRangeGetAttributes = 472, + CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64 = 473, + CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64_ptsz = 474, + CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64 = 475, + CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64_ptsz = 476, + CUPTI_DRIVER_TRACE_CBID_cuLaunchCooperativeKernel = 477, + CUPTI_DRIVER_TRACE_CBID_cuLaunchCooperativeKernel_ptsz = 478, + CUPTI_DRIVER_TRACE_CBID_cuEventCreateFromEGLSync = 479, + CUPTI_DRIVER_TRACE_CBID_cuLaunchCooperativeKernelMultiDevice = 480, + CUPTI_DRIVER_TRACE_CBID_cuFuncSetAttribute = 481, + CUPTI_DRIVER_TRACE_CBID_cuDeviceGetUuid = 482, + CUPTI_DRIVER_TRACE_CBID_cuStreamGetCtx = 483, + CUPTI_DRIVER_TRACE_CBID_cuStreamGetCtx_ptsz = 484, + CUPTI_DRIVER_TRACE_CBID_cuImportExternalMemory = 485, + CUPTI_DRIVER_TRACE_CBID_cuExternalMemoryGetMappedBuffer = 486, + CUPTI_DRIVER_TRACE_CBID_cuExternalMemoryGetMappedMipmappedArray = 487, + CUPTI_DRIVER_TRACE_CBID_cuDestroyExternalMemory = 488, + CUPTI_DRIVER_TRACE_CBID_cuImportExternalSemaphore = 489, + CUPTI_DRIVER_TRACE_CBID_cuSignalExternalSemaphoresAsync = 490, + CUPTI_DRIVER_TRACE_CBID_cuSignalExternalSemaphoresAsync_ptsz = 491, + CUPTI_DRIVER_TRACE_CBID_cuWaitExternalSemaphoresAsync = 492, + CUPTI_DRIVER_TRACE_CBID_cuWaitExternalSemaphoresAsync_ptsz = 493, + CUPTI_DRIVER_TRACE_CBID_cuDestroyExternalSemaphore = 494, + CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture = 495, + CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture_ptsz = 496, + CUPTI_DRIVER_TRACE_CBID_cuStreamEndCapture = 497, + CUPTI_DRIVER_TRACE_CBID_cuStreamEndCapture_ptsz = 498, + CUPTI_DRIVER_TRACE_CBID_cuStreamIsCapturing = 499, + CUPTI_DRIVER_TRACE_CBID_cuStreamIsCapturing_ptsz = 500, + CUPTI_DRIVER_TRACE_CBID_cuGraphCreate = 501, + CUPTI_DRIVER_TRACE_CBID_cuGraphAddKernelNode = 502, + CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeGetParams = 503, + CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemcpyNode = 504, + CUPTI_DRIVER_TRACE_CBID_cuGraphMemcpyNodeGetParams = 505, + CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemsetNode = 506, + CUPTI_DRIVER_TRACE_CBID_cuGraphMemsetNodeGetParams = 507, + CUPTI_DRIVER_TRACE_CBID_cuGraphMemsetNodeSetParams = 508, + CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetType = 509, + CUPTI_DRIVER_TRACE_CBID_cuGraphGetRootNodes = 510, + CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependencies = 511, + CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependentNodes = 512, + CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiate = 513, + CUPTI_DRIVER_TRACE_CBID_cuGraphLaunch = 514, + CUPTI_DRIVER_TRACE_CBID_cuGraphLaunch_ptsz = 515, + CUPTI_DRIVER_TRACE_CBID_cuGraphExecDestroy = 516, + CUPTI_DRIVER_TRACE_CBID_cuGraphDestroy = 517, + CUPTI_DRIVER_TRACE_CBID_cuGraphAddDependencies = 518, + CUPTI_DRIVER_TRACE_CBID_cuGraphRemoveDependencies = 519, + CUPTI_DRIVER_TRACE_CBID_cuGraphMemcpyNodeSetParams = 520, + CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeSetParams = 521, + CUPTI_DRIVER_TRACE_CBID_cuGraphDestroyNode = 522, + CUPTI_DRIVER_TRACE_CBID_cuGraphClone = 523, + CUPTI_DRIVER_TRACE_CBID_cuGraphNodeFindInClone = 524, + CUPTI_DRIVER_TRACE_CBID_cuGraphAddChildGraphNode = 525, + CUPTI_DRIVER_TRACE_CBID_cuGraphAddEmptyNode = 526, + CUPTI_DRIVER_TRACE_CBID_cuLaunchHostFunc = 527, + CUPTI_DRIVER_TRACE_CBID_cuLaunchHostFunc_ptsz = 528, + CUPTI_DRIVER_TRACE_CBID_cuGraphChildGraphNodeGetGraph = 529, + CUPTI_DRIVER_TRACE_CBID_cuGraphAddHostNode = 530, + CUPTI_DRIVER_TRACE_CBID_cuGraphHostNodeGetParams = 531, + CUPTI_DRIVER_TRACE_CBID_cuDeviceGetLuid = 532, + CUPTI_DRIVER_TRACE_CBID_cuGraphHostNodeSetParams = 533, + CUPTI_DRIVER_TRACE_CBID_cuGraphGetNodes = 534, + CUPTI_DRIVER_TRACE_CBID_cuGraphGetEdges = 535, + CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo = 536, + CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_ptsz = 537, + CUPTI_DRIVER_TRACE_CBID_cuGraphExecKernelNodeSetParams = 538, + CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture_v2 = 539, + CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture_v2_ptsz = 540, + CUPTI_DRIVER_TRACE_CBID_cuThreadExchangeStreamCaptureMode = 541, + CUPTI_DRIVER_TRACE_CBID_cuDeviceGetNvSciSyncAttributes = 542, + CUPTI_DRIVER_TRACE_CBID_cuOccupancyAvailableDynamicSMemPerBlock = 543, + CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxRelease_v2 = 544, + CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxReset_v2 = 545, + CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxSetFlags_v2 = 546, + CUPTI_DRIVER_TRACE_CBID_cuMemAddressReserve = 547, + CUPTI_DRIVER_TRACE_CBID_cuMemAddressFree = 548, + CUPTI_DRIVER_TRACE_CBID_cuMemCreate = 549, + CUPTI_DRIVER_TRACE_CBID_cuMemRelease = 550, + CUPTI_DRIVER_TRACE_CBID_cuMemMap = 551, + CUPTI_DRIVER_TRACE_CBID_cuMemUnmap = 552, + CUPTI_DRIVER_TRACE_CBID_cuMemSetAccess = 553, + CUPTI_DRIVER_TRACE_CBID_cuMemExportToShareableHandle = 554, + CUPTI_DRIVER_TRACE_CBID_cuMemImportFromShareableHandle = 555, + CUPTI_DRIVER_TRACE_CBID_cuMemGetAllocationGranularity = 556, + CUPTI_DRIVER_TRACE_CBID_cuMemGetAllocationPropertiesFromHandle = 557, + CUPTI_DRIVER_TRACE_CBID_cuMemGetAccess = 558, + CUPTI_DRIVER_TRACE_CBID_cuStreamSetFlags = 559, + CUPTI_DRIVER_TRACE_CBID_cuStreamSetFlags_ptsz = 560, + CUPTI_DRIVER_TRACE_CBID_cuGraphExecUpdate = 561, + CUPTI_DRIVER_TRACE_CBID_cuGraphExecMemcpyNodeSetParams = 562, + CUPTI_DRIVER_TRACE_CBID_cuGraphExecMemsetNodeSetParams = 563, + CUPTI_DRIVER_TRACE_CBID_cuGraphExecHostNodeSetParams = 564, + CUPTI_DRIVER_TRACE_CBID_cuMemRetainAllocationHandle = 565, + CUPTI_DRIVER_TRACE_CBID_cuFuncGetModule = 566, + CUPTI_DRIVER_TRACE_CBID_cuIpcOpenMemHandle_v2 = 567, + CUPTI_DRIVER_TRACE_CBID_cuCtxResetPersistingL2Cache = 568, + CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeCopyAttributes = 569, + CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeGetAttribute = 570, + CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeSetAttribute = 571, + CUPTI_DRIVER_TRACE_CBID_cuStreamCopyAttributes = 572, + CUPTI_DRIVER_TRACE_CBID_cuStreamCopyAttributes_ptsz = 573, + CUPTI_DRIVER_TRACE_CBID_cuStreamGetAttribute = 574, + CUPTI_DRIVER_TRACE_CBID_cuStreamGetAttribute_ptsz = 575, + CUPTI_DRIVER_TRACE_CBID_cuStreamSetAttribute = 576, + CUPTI_DRIVER_TRACE_CBID_cuStreamSetAttribute_ptsz = 577, + CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiate_v2 = 578, + CUPTI_DRIVER_TRACE_CBID_cuDeviceGetTexture1DLinearMaxWidth = 579, + CUPTI_DRIVER_TRACE_CBID_cuGraphUpload = 580, + CUPTI_DRIVER_TRACE_CBID_cuGraphUpload_ptsz = 581, + CUPTI_DRIVER_TRACE_CBID_cuArrayGetSparseProperties = 582, + CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayGetSparseProperties = 583, + CUPTI_DRIVER_TRACE_CBID_cuMemMapArrayAsync = 584, + CUPTI_DRIVER_TRACE_CBID_cuMemMapArrayAsync_ptsz = 585, + CUPTI_DRIVER_TRACE_CBID_cuGraphExecChildGraphNodeSetParams = 586, + CUPTI_DRIVER_TRACE_CBID_cuEventRecordWithFlags = 587, + CUPTI_DRIVER_TRACE_CBID_cuEventRecordWithFlags_ptsz = 588, + CUPTI_DRIVER_TRACE_CBID_cuGraphAddEventRecordNode = 589, + CUPTI_DRIVER_TRACE_CBID_cuGraphAddEventWaitNode = 590, + CUPTI_DRIVER_TRACE_CBID_cuGraphEventRecordNodeGetEvent = 591, + CUPTI_DRIVER_TRACE_CBID_cuGraphEventWaitNodeGetEvent = 592, + CUPTI_DRIVER_TRACE_CBID_cuGraphEventRecordNodeSetEvent = 593, + CUPTI_DRIVER_TRACE_CBID_cuGraphEventWaitNodeSetEvent = 594, + CUPTI_DRIVER_TRACE_CBID_cuGraphExecEventRecordNodeSetEvent = 595, + CUPTI_DRIVER_TRACE_CBID_cuGraphExecEventWaitNodeSetEvent = 596, + CUPTI_DRIVER_TRACE_CBID_cuArrayGetPlane = 597, + CUPTI_DRIVER_TRACE_CBID_cuMemAllocAsync = 598, + CUPTI_DRIVER_TRACE_CBID_cuMemAllocAsync_ptsz = 599, + CUPTI_DRIVER_TRACE_CBID_cuMemFreeAsync = 600, + CUPTI_DRIVER_TRACE_CBID_cuMemFreeAsync_ptsz = 601, + CUPTI_DRIVER_TRACE_CBID_cuMemPoolTrimTo = 602, + CUPTI_DRIVER_TRACE_CBID_cuMemPoolSetAttribute = 603, + CUPTI_DRIVER_TRACE_CBID_cuMemPoolGetAttribute = 604, + CUPTI_DRIVER_TRACE_CBID_cuMemPoolSetAccess = 605, + CUPTI_DRIVER_TRACE_CBID_cuDeviceGetDefaultMemPool = 606, + CUPTI_DRIVER_TRACE_CBID_cuMemPoolCreate = 607, + CUPTI_DRIVER_TRACE_CBID_cuMemPoolDestroy = 608, + CUPTI_DRIVER_TRACE_CBID_cuDeviceSetMemPool = 609, + CUPTI_DRIVER_TRACE_CBID_cuDeviceGetMemPool = 610, + CUPTI_DRIVER_TRACE_CBID_cuMemAllocFromPoolAsync = 611, + CUPTI_DRIVER_TRACE_CBID_cuMemAllocFromPoolAsync_ptsz = 612, + CUPTI_DRIVER_TRACE_CBID_cuMemPoolExportToShareableHandle = 613, + CUPTI_DRIVER_TRACE_CBID_cuMemPoolImportFromShareableHandle = 614, + CUPTI_DRIVER_TRACE_CBID_cuMemPoolExportPointer = 615, + CUPTI_DRIVER_TRACE_CBID_cuMemPoolImportPointer = 616, + CUPTI_DRIVER_TRACE_CBID_cuMemPoolGetAccess = 617, + CUPTI_DRIVER_TRACE_CBID_cuGraphAddExternalSemaphoresSignalNode = 618, + CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresSignalNodeGetParams = 619, + CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresSignalNodeSetParams = 620, + CUPTI_DRIVER_TRACE_CBID_cuGraphAddExternalSemaphoresWaitNode = 621, + CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresWaitNodeGetParams = 622, + CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresWaitNodeSetParams = 623, + CUPTI_DRIVER_TRACE_CBID_cuGraphExecExternalSemaphoresSignalNodeSetParams = 624, + CUPTI_DRIVER_TRACE_CBID_cuGraphExecExternalSemaphoresWaitNodeSetParams = 625, + CUPTI_DRIVER_TRACE_CBID_cuGetProcAddress = 626, + CUPTI_DRIVER_TRACE_CBID_cuFlushGPUDirectRDMAWrites = 627, + CUPTI_DRIVER_TRACE_CBID_cuGraphDebugDotPrint = 628, + CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v2 = 629, + CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v2_ptsz = 630, + CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies = 631, + CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies_ptsz = 632, + CUPTI_DRIVER_TRACE_CBID_cuUserObjectCreate = 633, + CUPTI_DRIVER_TRACE_CBID_cuUserObjectRetain = 634, + CUPTI_DRIVER_TRACE_CBID_cuUserObjectRelease = 635, + CUPTI_DRIVER_TRACE_CBID_cuGraphRetainUserObject = 636, + CUPTI_DRIVER_TRACE_CBID_cuGraphReleaseUserObject = 637, + CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemAllocNode = 638, + CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemFreeNode = 639, + CUPTI_DRIVER_TRACE_CBID_cuDeviceGraphMemTrim = 640, + CUPTI_DRIVER_TRACE_CBID_cuDeviceGetGraphMemAttribute = 641, + CUPTI_DRIVER_TRACE_CBID_cuDeviceSetGraphMemAttribute = 642, + CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiateWithFlags = 643, + CUPTI_DRIVER_TRACE_CBID_cuDeviceGetExecAffinitySupport = 644, + CUPTI_DRIVER_TRACE_CBID_cuCtxCreate_v3 = 645, + CUPTI_DRIVER_TRACE_CBID_cuCtxGetExecAffinity = 646, + CUPTI_DRIVER_TRACE_CBID_cuDeviceGetUuid_v2 = 647, + CUPTI_DRIVER_TRACE_CBID_cuGraphMemAllocNodeGetParams = 648, + CUPTI_DRIVER_TRACE_CBID_cuGraphMemFreeNodeGetParams = 649, + CUPTI_DRIVER_TRACE_CBID_cuGraphNodeSetEnabled = 650, + CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetEnabled = 651, + CUPTI_DRIVER_TRACE_CBID_cuLaunchKernelEx = 652, + CUPTI_DRIVER_TRACE_CBID_cuLaunchKernelEx_ptsz = 653, + CUPTI_DRIVER_TRACE_CBID_cuArrayGetMemoryRequirements = 654, + CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayGetMemoryRequirements = 655, + CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiateWithParams = 656, + CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiateWithParams_ptsz = 657, + CUPTI_DRIVER_TRACE_CBID_cuGraphExecGetFlags = 658, + CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32_v2 = 659, + CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32_v2_ptsz = 660, + CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64_v2 = 661, + CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64_v2_ptsz = 662, + CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32_v2 = 663, + CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32_v2_ptsz = 664, + CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64_v2 = 665, + CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64_v2_ptsz = 666, + CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp_v2 = 667, + CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp_v2_ptsz = 668, + CUPTI_DRIVER_TRACE_CBID_cuGraphAddBatchMemOpNode = 669, + CUPTI_DRIVER_TRACE_CBID_cuGraphBatchMemOpNodeGetParams = 670, + CUPTI_DRIVER_TRACE_CBID_cuGraphBatchMemOpNodeSetParams = 671, + CUPTI_DRIVER_TRACE_CBID_cuGraphExecBatchMemOpNodeSetParams = 672, + CUPTI_DRIVER_TRACE_CBID_cuModuleGetLoadingMode = 673, + CUPTI_DRIVER_TRACE_CBID_cuMemGetHandleForAddressRange = 674, + CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxPotentialClusterSize = 675, + CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxActiveClusters = 676, + CUPTI_DRIVER_TRACE_CBID_cuGetProcAddress_v2 = 677, + CUPTI_DRIVER_TRACE_CBID_cuLibraryLoadData = 678, + CUPTI_DRIVER_TRACE_CBID_cuLibraryLoadFromFile = 679, + CUPTI_DRIVER_TRACE_CBID_cuLibraryUnload = 680, + CUPTI_DRIVER_TRACE_CBID_cuLibraryGetKernel = 681, + CUPTI_DRIVER_TRACE_CBID_cuLibraryGetModule = 682, + CUPTI_DRIVER_TRACE_CBID_cuKernelGetFunction = 683, + CUPTI_DRIVER_TRACE_CBID_cuLibraryGetGlobal = 684, + CUPTI_DRIVER_TRACE_CBID_cuLibraryGetManaged = 685, + CUPTI_DRIVER_TRACE_CBID_cuKernelGetAttribute = 686, + CUPTI_DRIVER_TRACE_CBID_cuKernelSetAttribute = 687, + CUPTI_DRIVER_TRACE_CBID_cuKernelSetCacheConfig = 688, + CUPTI_DRIVER_TRACE_CBID_cuGraphAddKernelNode_v2 = 689, + CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeGetParams_v2 = 690, + CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeSetParams_v2 = 691, + CUPTI_DRIVER_TRACE_CBID_cuGraphExecKernelNodeSetParams_v2 = 692, + CUPTI_DRIVER_TRACE_CBID_cuStreamGetId = 693, + CUPTI_DRIVER_TRACE_CBID_cuStreamGetId_ptsz = 694, + CUPTI_DRIVER_TRACE_CBID_cuCtxGetId = 695, + CUPTI_DRIVER_TRACE_CBID_cuGraphExecUpdate_v2 = 696, + CUPTI_DRIVER_TRACE_CBID_cuTensorMapEncodeTiled = 697, + CUPTI_DRIVER_TRACE_CBID_cuTensorMapEncodeIm2col = 698, + CUPTI_DRIVER_TRACE_CBID_cuTensorMapReplaceAddress = 699, + CUPTI_DRIVER_TRACE_CBID_cuLibraryGetUnifiedFunction = 700, + CUPTI_DRIVER_TRACE_CBID_cuCoredumpGetAttribute = 701, + CUPTI_DRIVER_TRACE_CBID_cuCoredumpGetAttributeGlobal = 702, + CUPTI_DRIVER_TRACE_CBID_cuCoredumpSetAttribute = 703, + CUPTI_DRIVER_TRACE_CBID_cuCoredumpSetAttributeGlobal = 704, + CUPTI_DRIVER_TRACE_CBID_cuCtxSetFlags = 705, + CUPTI_DRIVER_TRACE_CBID_cuMulticastCreate = 706, + CUPTI_DRIVER_TRACE_CBID_cuMulticastAddDevice = 707, + CUPTI_DRIVER_TRACE_CBID_cuMulticastBindMem = 708, + CUPTI_DRIVER_TRACE_CBID_cuMulticastBindAddr = 709, + CUPTI_DRIVER_TRACE_CBID_cuMulticastUnbind = 710, + CUPTI_DRIVER_TRACE_CBID_cuMulticastGetGranularity = 711, + CUPTI_DRIVER_TRACE_CBID_SIZE = 712, + CUPTI_DRIVER_TRACE_CBID_FORCE_INT = 0x7fffffff +} CUpti_driver_api_trace_cbid; + diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_events.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_events.h new file mode 100644 index 0000000000000000000000000000000000000000..d76394e8bc4c9dbbff8422eaa50651340639a546 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_events.h @@ -0,0 +1,1371 @@ +/* + * Copyright 2010-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(_CUPTI_EVENTS_H_) +#define _CUPTI_EVENTS_H_ + +#include +#include +#include +#include + +#ifndef CUPTIAPI +#ifdef _WIN32 +#define CUPTIAPI __stdcall +#else +#define CUPTIAPI +#endif +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility push(default) +#endif + +/** + * \defgroup CUPTI_EVENT_API CUPTI Event API + * Functions, types, and enums that implement the CUPTI Event API. + * + * \note CUPTI event API from the header cupti_events.h are not supported on devices + * with compute capability 7.5 and higher (i.e. Turing and later GPU architectures). + * These API will be deprecated in a future CUDA release. These are replaced by + * Profiling API in the header cupti_profiler_target.h and Perfworks metrics API + * in the headers nvperf_host.h and nvperf_target.h which are supported on + * devices with compute capability 7.0 and higher (i.e. Volta and later GPU + * architectures). + * + * @{ + */ + +/** + * \brief ID for an event. + * + * An event represents a countable activity, action, or occurrence on + * the device. + */ +typedef uint32_t CUpti_EventID; + +/** + * \brief ID for an event domain. + * + * ID for an event domain. An event domain represents a group of + * related events. A device may have multiple instances of a domain, + * indicating that the device can simultaneously record multiple + * instances of each event within that domain. + */ +typedef uint32_t CUpti_EventDomainID; + +/** + * \brief A group of events. + * + * An event group is a collection of events that are managed + * together. All events in an event group must belong to the same + * domain. + */ +typedef void *CUpti_EventGroup; + +/** + * \brief Device class. + * + * Enumeration of device classes for device attribute + * CUPTI_DEVICE_ATTR_DEVICE_CLASS. + */ +typedef enum { + CUPTI_DEVICE_ATTR_DEVICE_CLASS_TESLA = 0, + CUPTI_DEVICE_ATTR_DEVICE_CLASS_QUADRO = 1, + CUPTI_DEVICE_ATTR_DEVICE_CLASS_GEFORCE = 2, + CUPTI_DEVICE_ATTR_DEVICE_CLASS_TEGRA = 3, +} CUpti_DeviceAttributeDeviceClass; + +/** + * \brief Device attributes. + * + * CUPTI device attributes. These attributes can be read using \ref + * cuptiDeviceGetAttribute. + */ +typedef enum { + /** + * Number of event IDs for a device. Value is a uint32_t. + */ + CUPTI_DEVICE_ATTR_MAX_EVENT_ID = 1, + /** + * Number of event domain IDs for a device. Value is a uint32_t. + */ + CUPTI_DEVICE_ATTR_MAX_EVENT_DOMAIN_ID = 2, + /** + * Get global memory bandwidth in Kbytes/sec. Value is a uint64_t. + */ + CUPTI_DEVICE_ATTR_GLOBAL_MEMORY_BANDWIDTH = 3, + /** + * Get theoretical maximum number of instructions per cycle. Value + * is a uint32_t. + */ + CUPTI_DEVICE_ATTR_INSTRUCTION_PER_CYCLE = 4, + /** + * Get theoretical maximum number of single precision instructions + * that can be executed per second. Value is a uint64_t. + */ + CUPTI_DEVICE_ATTR_INSTRUCTION_THROUGHPUT_SINGLE_PRECISION = 5, + /** + * Get number of frame buffers for device. Value is a uint64_t. + */ + CUPTI_DEVICE_ATTR_MAX_FRAME_BUFFERS = 6, + /** + * Get PCIE link rate in Mega bits/sec for device. Return 0 if bus-type + * is non-PCIE. Value is a uint64_t. + */ + CUPTI_DEVICE_ATTR_PCIE_LINK_RATE = 7, + /** + * Get PCIE link width for device. Return 0 if bus-type + * is non-PCIE. Value is a uint64_t. + */ + CUPTI_DEVICE_ATTR_PCIE_LINK_WIDTH = 8, + /** + * Get PCIE generation for device. Return 0 if bus-type + * is non-PCIE. Value is a uint64_t. + */ + CUPTI_DEVICE_ATTR_PCIE_GEN = 9, + /** + * Get the class for the device. Value is a + * CUpti_DeviceAttributeDeviceClass. + */ + CUPTI_DEVICE_ATTR_DEVICE_CLASS = 10, + /** + * Get the peak single precision flop per cycle. Value is a uint64_t. + */ + CUPTI_DEVICE_ATTR_FLOP_SP_PER_CYCLE = 11, + /** + * Get the peak double precision flop per cycle. Value is a uint64_t. + */ + CUPTI_DEVICE_ATTR_FLOP_DP_PER_CYCLE = 12, + /** + * Get number of L2 units. Value is a uint64_t. + */ + CUPTI_DEVICE_ATTR_MAX_L2_UNITS = 13, + /** + * Get the maximum shared memory for the CU_FUNC_CACHE_PREFER_SHARED + * preference. Value is a uint64_t. + */ + CUPTI_DEVICE_ATTR_MAX_SHARED_MEMORY_CACHE_CONFIG_PREFER_SHARED = 14, + /** + * Get the maximum shared memory for the CU_FUNC_CACHE_PREFER_L1 + * preference. Value is a uint64_t. + */ + CUPTI_DEVICE_ATTR_MAX_SHARED_MEMORY_CACHE_CONFIG_PREFER_L1 = 15, + /** + * Get the maximum shared memory for the CU_FUNC_CACHE_PREFER_EQUAL + * preference. Value is a uint64_t. + */ + CUPTI_DEVICE_ATTR_MAX_SHARED_MEMORY_CACHE_CONFIG_PREFER_EQUAL = 16, + /** + * Get the peak half precision flop per cycle. Value is a uint64_t. + */ + CUPTI_DEVICE_ATTR_FLOP_HP_PER_CYCLE = 17, + /** + * Check if Nvlink is connected to device. Returns 1, if at least one + * Nvlink is connected to the device, returns 0 otherwise. + * Value is a uint32_t. + */ + CUPTI_DEVICE_ATTR_NVLINK_PRESENT = 18, + /** + * Check if Nvlink is present between GPU and CPU. Returns Bandwidth, + * in Bytes/sec, if Nvlink is present, returns 0 otherwise. + * Value is a uint64_t. + */ + CUPTI_DEVICE_ATTR_GPU_CPU_NVLINK_BW = 19, + /** + * Check if NVSwitch is present in the underlying topology. + * Returns 1, if present, returns 0 otherwise. + * Value is a uint32_t. + */ + CUPTI_DEVICE_ATTR_NVSWITCH_PRESENT = 20, + CUPTI_DEVICE_ATTR_FORCE_INT = 0x7fffffff, +} CUpti_DeviceAttribute; + +/** + * \brief Event domain attributes. + * + * Event domain attributes. Except where noted, all the attributes can + * be read using either \ref cuptiDeviceGetEventDomainAttribute or + * \ref cuptiEventDomainGetAttribute. + */ +typedef enum { + /** + * Event domain name. Value is a null terminated const c-string. + */ + CUPTI_EVENT_DOMAIN_ATTR_NAME = 0, + /** + * Number of instances of the domain for which event counts will be + * collected. The domain may have additional instances that cannot + * be profiled (see CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT). + * Can be read only with \ref + * cuptiDeviceGetEventDomainAttribute. Value is a uint32_t. + */ + CUPTI_EVENT_DOMAIN_ATTR_INSTANCE_COUNT = 1, + /** + * Total number of instances of the domain, including instances that + * cannot be profiled. Use CUPTI_EVENT_DOMAIN_ATTR_INSTANCE_COUNT + * to get the number of instances that can be profiled. Can be read + * only with \ref cuptiDeviceGetEventDomainAttribute. Value is a + * uint32_t. + */ + CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT = 3, + /** + * Collection method used for events contained in the event domain. + * Value is a \ref CUpti_EventCollectionMethod. + */ + CUPTI_EVENT_DOMAIN_ATTR_COLLECTION_METHOD = 4, + + CUPTI_EVENT_DOMAIN_ATTR_FORCE_INT = 0x7fffffff, +} CUpti_EventDomainAttribute; + +/** + * \brief The collection method used for an event. + * + * The collection method indicates how an event is collected. + */ +typedef enum { + /** + * Event is collected using a hardware global performance monitor. + */ + CUPTI_EVENT_COLLECTION_METHOD_PM = 0, + /** + * Event is collected using a hardware SM performance monitor. + */ + CUPTI_EVENT_COLLECTION_METHOD_SM = 1, + /** + * Event is collected using software instrumentation. + */ + CUPTI_EVENT_COLLECTION_METHOD_INSTRUMENTED = 2, + /** + * Event is collected using NvLink throughput counter method. + */ + CUPTI_EVENT_COLLECTION_METHOD_NVLINK_TC = 3, + CUPTI_EVENT_COLLECTION_METHOD_FORCE_INT = 0x7fffffff +} CUpti_EventCollectionMethod; + +/** + * \brief Event group attributes. + * + * Event group attributes. These attributes can be read using \ref + * cuptiEventGroupGetAttribute. Attributes marked [rw] can also be + * written using \ref cuptiEventGroupSetAttribute. + */ +typedef enum { + /** + * The domain to which the event group is bound. This attribute is + * set when the first event is added to the group. Value is a + * CUpti_EventDomainID. + */ + CUPTI_EVENT_GROUP_ATTR_EVENT_DOMAIN_ID = 0, + /** + * [rw] Profile all the instances of the domain for this + * eventgroup. This feature can be used to get load balancing + * across all instances of a domain. Value is an integer. + */ + CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES = 1, + /** + * [rw] Reserved for user data. + */ + CUPTI_EVENT_GROUP_ATTR_USER_DATA = 2, + /** + * Number of events in the group. Value is a uint32_t. + */ + CUPTI_EVENT_GROUP_ATTR_NUM_EVENTS = 3, + /** + * Enumerates events in the group. Value is a pointer to buffer of + * size sizeof(CUpti_EventID) * num_of_events in the eventgroup. + * num_of_events can be queried using + * CUPTI_EVENT_GROUP_ATTR_NUM_EVENTS. + */ + CUPTI_EVENT_GROUP_ATTR_EVENTS = 4, + /** + * Number of instances of the domain bound to this event group that + * will be counted. Value is a uint32_t. + */ + CUPTI_EVENT_GROUP_ATTR_INSTANCE_COUNT = 5, + /** + * Event group scope can be set to CUPTI_EVENT_PROFILING_SCOPE_DEVICE or + * CUPTI_EVENT_PROFILING_SCOPE_CONTEXT for an eventGroup, before + * adding any event. + * Sets the scope of eventgroup as CUPTI_EVENT_PROFILING_SCOPE_DEVICE or + * CUPTI_EVENT_PROFILING_SCOPE_CONTEXT when the scope of the events + * that will be added is CUPTI_EVENT_PROFILING_SCOPE_BOTH. + * If profiling scope of event is either + * CUPTI_EVENT_PROFILING_SCOPE_DEVICE or CUPTI_EVENT_PROFILING_SCOPE_CONTEXT + * then setting this attribute will not affect the default scope. + * It is not allowed to add events of different scope to same eventgroup. + * Value is a uint32_t. + */ + CUPTI_EVENT_GROUP_ATTR_PROFILING_SCOPE = 6, + CUPTI_EVENT_GROUP_ATTR_FORCE_INT = 0x7fffffff, +} CUpti_EventGroupAttribute; + +/** +* \brief Profiling scope for event. +* +* Profiling scope of event indicates if the event can be collected at context +* scope or device scope or both i.e. it can be collected at any of context or +* device scope. +*/ +typedef enum { + /** + * Event is collected at context scope. + */ + CUPTI_EVENT_PROFILING_SCOPE_CONTEXT = 0, + /** + * Event is collected at device scope. + */ + CUPTI_EVENT_PROFILING_SCOPE_DEVICE = 1, + /** + * Event can be collected at device or context scope. + * The scope can be set using \ref cuptiEventGroupSetAttribute API. + */ + CUPTI_EVENT_PROFILING_SCOPE_BOTH = 2, + CUPTI_EVENT_PROFILING_SCOPE_FORCE_INT = 0x7fffffff +} CUpti_EventProfilingScope; + +/** + * \brief Event attributes. + * + * Event attributes. These attributes can be read using \ref + * cuptiEventGetAttribute. + */ +typedef enum { + /** + * Event name. Value is a null terminated const c-string. + */ + CUPTI_EVENT_ATTR_NAME = 0, + /** + * Short description of event. Value is a null terminated const + * c-string. + */ + CUPTI_EVENT_ATTR_SHORT_DESCRIPTION = 1, + /** + * Long description of event. Value is a null terminated const + * c-string. + */ + CUPTI_EVENT_ATTR_LONG_DESCRIPTION = 2, + /** + * Category of event. Value is CUpti_EventCategory. + */ + CUPTI_EVENT_ATTR_CATEGORY = 3, + /** + * Profiling scope of the events. It can be either device or context or both. + * Value is a \ref CUpti_EventProfilingScope. + */ + CUPTI_EVENT_ATTR_PROFILING_SCOPE = 5, + + CUPTI_EVENT_ATTR_FORCE_INT = 0x7fffffff, +} CUpti_EventAttribute; + +/** + * \brief Event collection modes. + * + * The event collection mode determines the period over which the + * events within the enabled event groups will be collected. + */ +typedef enum { + /** + * Events are collected for the entire duration between the + * cuptiEventGroupEnable and cuptiEventGroupDisable calls. + * Event values are reset when the events are read. + * For CUDA toolkit v6.0 and older this was the default mode. + */ + CUPTI_EVENT_COLLECTION_MODE_CONTINUOUS = 0, + /** + * Events are collected only for the durations of kernel executions + * that occur between the cuptiEventGroupEnable and + * cuptiEventGroupDisable calls. Event collection begins when a + * kernel execution begins, and stops when kernel execution + * completes. Event values are reset to zero when each kernel + * execution begins. If multiple kernel executions occur between the + * cuptiEventGroupEnable and cuptiEventGroupDisable calls then the + * event values must be read after each kernel launch if those + * events need to be associated with the specific kernel launch. + * Note that collection in this mode may significantly change the + * overall performance characteristics of the application because + * kernel executions that occur between the cuptiEventGroupEnable and + * cuptiEventGroupDisable calls are serialized on the GPU. + * This is the default mode from CUDA toolkit v6.5 + */ + CUPTI_EVENT_COLLECTION_MODE_KERNEL = 1, + CUPTI_EVENT_COLLECTION_MODE_FORCE_INT = 0x7fffffff +} CUpti_EventCollectionMode; + +/** + * \brief An event category. + * + * Each event is assigned to a category that represents the general + * type of the event. A event's category is accessed using \ref + * cuptiEventGetAttribute and the CUPTI_EVENT_ATTR_CATEGORY attribute. + */ +typedef enum { + /** + * An instruction related event. + */ + CUPTI_EVENT_CATEGORY_INSTRUCTION = 0, + /** + * A memory related event. + */ + CUPTI_EVENT_CATEGORY_MEMORY = 1, + /** + * A cache related event. + */ + CUPTI_EVENT_CATEGORY_CACHE = 2, + /** + * A profile-trigger event. + */ + CUPTI_EVENT_CATEGORY_PROFILE_TRIGGER = 3, + /** + * A system event. + */ + CUPTI_EVENT_CATEGORY_SYSTEM = 4, + CUPTI_EVENT_CATEGORY_FORCE_INT = 0x7fffffff +} CUpti_EventCategory; + +/** + * \brief The overflow value for a CUPTI event. + * + * The CUPTI event value that indicates an overflow. + */ +#define CUPTI_EVENT_OVERFLOW ((uint64_t)0xFFFFFFFFFFFFFFFFULL) + +/** + * \brief The value that indicates the event value is invalid + */ +#define CUPTI_EVENT_INVALID ((uint64_t)0xFFFFFFFFFFFFFFFEULL) + +/** + * \brief Flags for cuptiEventGroupReadEvent an + * cuptiEventGroupReadAllEvents. + * + * Flags for \ref cuptiEventGroupReadEvent an \ref + * cuptiEventGroupReadAllEvents. + */ +typedef enum { + /** + * No flags. + */ + CUPTI_EVENT_READ_FLAG_NONE = 0, + CUPTI_EVENT_READ_FLAG_FORCE_INT = 0x7fffffff, +} CUpti_ReadEventFlags; + + +/** + * \brief A set of event groups. + * + * A set of event groups. When returned by \ref + * cuptiEventGroupSetsCreate and \ref cuptiMetricCreateEventGroupSets + * a set indicates that event groups that can be enabled at the same + * time (i.e. all the events in the set can be collected + * simultaneously). + */ +typedef struct { + /** + * The number of event groups in the set. + */ + uint32_t numEventGroups; + /** + * An array of \p numEventGroups event groups. + */ + CUpti_EventGroup *eventGroups; +} CUpti_EventGroupSet; + +/** + * \brief A set of event group sets. + * + * A set of event group sets. When returned by \ref + * cuptiEventGroupSetsCreate and \ref cuptiMetricCreateEventGroupSets + * a CUpti_EventGroupSets indicates the number of passes required to + * collect all the events, and the event groups that should be + * collected during each pass. + */ +typedef struct { + /** + * Number of event group sets. + */ + uint32_t numSets; + /** + * An array of \p numSets event group sets. + */ + CUpti_EventGroupSet *sets; +} CUpti_EventGroupSets; + +/** + * \brief Set the event collection mode. + * + * Set the event collection mode for a \p context. The \p mode + * controls the event collection behavior of all events in event + * groups created in the \p context. This API is invalid in kernel + * replay mode. + * \note \b Thread-safety: this function is thread safe. + * + * \param context The context + * \param mode The event collection mode + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_CONTEXT + * \retval CUPTI_ERROR_INVALID_OPERATION if called when replay mode is enabled + * \retval CUPTI_ERROR_NOT_SUPPORTED if mode is not supported on the device + */ + +CUptiResult CUPTIAPI cuptiSetEventCollectionMode(CUcontext context, + CUpti_EventCollectionMode mode); + +/** + * \brief Read a device attribute. + * + * Read a device attribute and return it in \p *value. + * \note \b Thread-safety: this function is thread safe. + * + * \param device The CUDA device + * \param attrib The attribute to read + * \param valueSize Size of buffer pointed by the value, and + * returns the number of bytes written to \p value + * \param value Returns the value of the attribute + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_DEVICE + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value + * is NULL, or if \p attrib is not a device attribute + * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string + * attribute values, indicates that the \p value buffer is too small + * to hold the attribute value. + */ +CUptiResult CUPTIAPI cuptiDeviceGetAttribute(CUdevice device, + CUpti_DeviceAttribute attrib, + size_t *valueSize, + void *value); + +/** + * \brief Read a device timestamp. + * + * Returns the device timestamp in \p *timestamp. The timestamp is + * reported in nanoseconds and indicates the time since the device was + * last reset. + * \note \b Thread-safety: this function is thread safe. + * + * \param context A context on the device from which to get the timestamp + * \param timestamp Returns the device timestamp + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_CONTEXT + * \retval CUPTI_ERROR_INVALID_PARAMETER is \p timestamp is NULL + + * **DEPRECATED** This API is deprecated as of CUDA 11.3 + */ +CUptiResult CUPTIAPI cuptiDeviceGetTimestamp(CUcontext context, + uint64_t *timestamp); + +/** + * \brief Get the number of domains for a device. + * + * Returns the number of domains in \p numDomains for a device. + * \note \b Thread-safety: this function is thread safe. + * + * \param device The CUDA device + * \param numDomains Returns the number of domains + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_DEVICE + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numDomains is NULL + */ +CUptiResult CUPTIAPI cuptiDeviceGetNumEventDomains(CUdevice device, + uint32_t *numDomains); + +/** + * \brief Get the event domains for a device. + * + * Returns the event domains IDs in \p domainArray for a device. The + * size of the \p domainArray buffer is given by \p + * *arraySizeBytes. The size of the \p domainArray buffer must be at + * least \p numdomains * sizeof(CUpti_EventDomainID) or else all + * domains will not be returned. The value returned in \p + * *arraySizeBytes contains the number of bytes returned in \p + * domainArray. + * \note \b Thread-safety: this function is thread safe. + * + * \param device The CUDA device + * \param arraySizeBytes The size of \p domainArray in bytes, and + * returns the number of bytes written to \p domainArray + * \param domainArray Returns the IDs of the event domains for the device + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_DEVICE + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p arraySizeBytes or + * \p domainArray are NULL + */ +CUptiResult CUPTIAPI cuptiDeviceEnumEventDomains(CUdevice device, + size_t *arraySizeBytes, + CUpti_EventDomainID *domainArray); + +/** + * \brief Read an event domain attribute. + * + * Returns an event domain attribute in \p *value. The size of the \p + * value buffer is given by \p *valueSize. The value returned in \p + * *valueSize contains the number of bytes returned in \p value. + * + * If the attribute value is a c-string that is longer than \p + * *valueSize, then only the first \p *valueSize characters will be + * returned and there will be no terminating null byte. + * \note \b Thread-safety: this function is thread safe. + * + * \param device The CUDA device + * \param eventDomain ID of the event domain + * \param attrib The event domain attribute to read + * \param valueSize The size of the \p value buffer in bytes, and + * returns the number of bytes written to \p value + * \param value Returns the attribute's value + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_DEVICE + * \retval CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value + * is NULL, or if \p attrib is not an event domain attribute + * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string + * attribute values, indicates that the \p value buffer is too small + * to hold the attribute value. + */ +CUptiResult CUPTIAPI cuptiDeviceGetEventDomainAttribute(CUdevice device, + CUpti_EventDomainID eventDomain, + CUpti_EventDomainAttribute attrib, + size_t *valueSize, + void *value); + +/** + * \brief Get the number of event domains available on any device. + * + * Returns the total number of event domains available on any + * CUDA-capable device. + * \note \b Thread-safety: this function is thread safe. + * + * \param numDomains Returns the number of domains + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numDomains is NULL + */ +CUptiResult CUPTIAPI cuptiGetNumEventDomains(uint32_t *numDomains); + +/** + * \brief Get the event domains available on any device. + * + * Returns all the event domains available on any CUDA-capable device. + * Event domain IDs are returned in \p domainArray. The size of the \p + * domainArray buffer is given by \p *arraySizeBytes. The size of the + * \p domainArray buffer must be at least \p numDomains * + * sizeof(CUpti_EventDomainID) or all domains will not be + * returned. The value returned in \p *arraySizeBytes contains the + * number of bytes returned in \p domainArray. + * \note \b Thread-safety: this function is thread safe. + * + * \param arraySizeBytes The size of \p domainArray in bytes, and + * returns the number of bytes written to \p domainArray + * \param domainArray Returns all the event domains + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p arraySizeBytes or + * \p domainArray are NULL + */ +CUptiResult CUPTIAPI cuptiEnumEventDomains(size_t *arraySizeBytes, + CUpti_EventDomainID *domainArray); + +/** + * \brief Read an event domain attribute. + * + * Returns an event domain attribute in \p *value. The size of the \p + * value buffer is given by \p *valueSize. The value returned in \p + * *valueSize contains the number of bytes returned in \p value. + * + * If the attribute value is a c-string that is longer than \p + * *valueSize, then only the first \p *valueSize characters will be + * returned and there will be no terminating null byte. + * \note \b Thread-safety: this function is thread safe. + * + * \param eventDomain ID of the event domain + * \param attrib The event domain attribute to read + * \param valueSize The size of the \p value buffer in bytes, and + * returns the number of bytes written to \p value + * \param value Returns the attribute's value + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value + * is NULL, or if \p attrib is not an event domain attribute + * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string + * attribute values, indicates that the \p value buffer is too small + * to hold the attribute value. + */ +CUptiResult CUPTIAPI cuptiEventDomainGetAttribute(CUpti_EventDomainID eventDomain, + CUpti_EventDomainAttribute attrib, + size_t *valueSize, + void *value); + +/** + * \brief Get number of events in a domain. + * + * Returns the number of events in \p numEvents for a domain. + * \note \b Thread-safety: this function is thread safe. + * + * \param eventDomain ID of the event domain + * \param numEvents Returns the number of events in the domain + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numEvents is NULL + */ +CUptiResult CUPTIAPI cuptiEventDomainGetNumEvents(CUpti_EventDomainID eventDomain, + uint32_t *numEvents); + +/** + * \brief Get the events in a domain. + * + * Returns the event IDs in \p eventArray for a domain. The size of + * the \p eventArray buffer is given by \p *arraySizeBytes. The size + * of the \p eventArray buffer must be at least \p numdomainevents * + * sizeof(CUpti_EventID) or else all events will not be returned. The + * value returned in \p *arraySizeBytes contains the number of bytes + * returned in \p eventArray. + * \note \b Thread-safety: this function is thread safe. + * + * \param eventDomain ID of the event domain + * \param arraySizeBytes The size of \p eventArray in bytes, and + * returns the number of bytes written to \p eventArray + * \param eventArray Returns the IDs of the events in the domain + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p arraySizeBytes or \p + * eventArray are NULL + */ +CUptiResult CUPTIAPI cuptiEventDomainEnumEvents(CUpti_EventDomainID eventDomain, + size_t *arraySizeBytes, + CUpti_EventID *eventArray); + +/** + * \brief Get an event attribute. + * + * Returns an event attribute in \p *value. The size of the \p + * value buffer is given by \p *valueSize. The value returned in \p + * *valueSize contains the number of bytes returned in \p value. + * + * If the attribute value is a c-string that is longer than \p + * *valueSize, then only the first \p *valueSize characters will be + * returned and there will be no terminating null byte. + * \note \b Thread-safety: this function is thread safe. + * + * \param event ID of the event + * \param attrib The event attribute to read + * \param valueSize The size of the \p value buffer in bytes, and + * returns the number of bytes written to \p value + * \param value Returns the attribute's value + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_EVENT_ID + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value + * is NULL, or if \p attrib is not an event attribute + * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string + * attribute values, indicates that the \p value buffer is too small + * to hold the attribute value. + */ +CUptiResult CUPTIAPI cuptiEventGetAttribute(CUpti_EventID event, + CUpti_EventAttribute attrib, + size_t *valueSize, + void *value); + +/** + * \brief Find an event by name. + * + * Find an event by name and return the event ID in \p *event. + * \note \b Thread-safety: this function is thread safe. + * + * \param device The CUDA device + * \param eventName The name of the event to find + * \param event Returns the ID of the found event or undefined if + * unable to find the event + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_DEVICE + * \retval CUPTI_ERROR_INVALID_EVENT_NAME if unable to find an event + * with name \p eventName. In this case \p *event is undefined + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventName or \p event are NULL + */ +CUptiResult CUPTIAPI cuptiEventGetIdFromName(CUdevice device, + const char *eventName, + CUpti_EventID *event); + +/** + * \brief Create a new event group for a context. + * + * Creates a new event group for \p context and returns the new group + * in \p *eventGroup. + * \note \p flags are reserved for future use and should be set to zero. + * \note \b Thread-safety: this function is thread safe. + * + * \param context The context for the event group + * \param eventGroup Returns the new event group + * \param flags Reserved - must be zero + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_CONTEXT + * \retval CUPTI_ERROR_OUT_OF_MEMORY + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL + */ +CUptiResult CUPTIAPI cuptiEventGroupCreate(CUcontext context, + CUpti_EventGroup *eventGroup, + uint32_t flags); + +/** + * \brief Destroy an event group. + * + * Destroy an \p eventGroup and free its resources. An event group + * cannot be destroyed if it is enabled. + * \note \b Thread-safety: this function is thread safe. + * + * \param eventGroup The event group to destroy + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_OPERATION if the event group is enabled + * \retval CUPTI_ERROR_INVALID_PARAMETER if eventGroup is NULL + */ +CUptiResult CUPTIAPI cuptiEventGroupDestroy(CUpti_EventGroup eventGroup); + +/** + * \brief Read an event group attribute. + * + * Read an event group attribute and return it in \p *value. + * \note \b Thread-safety: this function is thread safe but client + * must guard against simultaneous destruction or modification of \p + * eventGroup (for example, client must guard against simultaneous + * calls to \ref cuptiEventGroupDestroy, \ref cuptiEventGroupAddEvent, + * etc.), and must guard against simultaneous destruction of the + * context in which \p eventGroup was created (for example, client + * must guard against simultaneous calls to cudaDeviceReset, + * cuCtxDestroy, etc.). + * + * \param eventGroup The event group + * \param attrib The attribute to read + * \param valueSize Size of buffer pointed by the value, and + * returns the number of bytes written to \p value + * \param value Returns the value of the attribute + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value + * is NULL, or if \p attrib is not an eventgroup attribute + * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string + * attribute values, indicates that the \p value buffer is too small + * to hold the attribute value. + */ +CUptiResult CUPTIAPI cuptiEventGroupGetAttribute(CUpti_EventGroup eventGroup, + CUpti_EventGroupAttribute attrib, + size_t *valueSize, + void *value); + +/** + * \brief Write an event group attribute. + * + * Write an event group attribute. + * \note \b Thread-safety: this function is thread safe. + * + * \param eventGroup The event group + * \param attrib The attribute to write + * \param valueSize The size, in bytes, of the value + * \param value The attribute value to write + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value + * is NULL, or if \p attrib is not an event group attribute, or if + * \p attrib is not a writable attribute + * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT Indicates that + * the \p value buffer is too small to hold the attribute value. + */ +CUptiResult CUPTIAPI cuptiEventGroupSetAttribute(CUpti_EventGroup eventGroup, + CUpti_EventGroupAttribute attrib, + size_t valueSize, + void *value); + +/** + * \brief Add an event to an event group. + * + * Add an event to an event group. The event add can fail for a number of reasons: + * \li The event group is enabled + * \li The event does not belong to the same event domain as the + * events that are already in the event group + * \li Device limitations on the events that can belong to the same group + * \li The event group is full + * + * \note \b Thread-safety: this function is thread safe. + * + * \param eventGroup The event group + * \param event The event to add to the group + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_EVENT_ID + * \retval CUPTI_ERROR_OUT_OF_MEMORY + * \retval CUPTI_ERROR_INVALID_OPERATION if \p eventGroup is enabled + * \retval CUPTI_ERROR_NOT_COMPATIBLE if \p event belongs to a + * different event domain than the events already in \p eventGroup, or + * if a device limitation prevents \p event from being collected at + * the same time as the events already in \p eventGroup + * \retval CUPTI_ERROR_MAX_LIMIT_REACHED if \p eventGroup is full + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL + */ +CUptiResult CUPTIAPI cuptiEventGroupAddEvent(CUpti_EventGroup eventGroup, + CUpti_EventID event); + +/** + * \brief Remove an event from an event group. + * + * Remove \p event from the an event group. The event cannot be + * removed if the event group is enabled. + * \note \b Thread-safety: this function is thread safe. + * + * \param eventGroup The event group + * \param event The event to remove from the group + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_EVENT_ID + * \retval CUPTI_ERROR_INVALID_OPERATION if \p eventGroup is enabled + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL + */ +CUptiResult CUPTIAPI cuptiEventGroupRemoveEvent(CUpti_EventGroup eventGroup, + CUpti_EventID event); + +/** + * \brief Remove all events from an event group. + * + * Remove all events from an event group. Events cannot be removed if + * the event group is enabled. + * \note \b Thread-safety: this function is thread safe. + * + * \param eventGroup The event group + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_OPERATION if \p eventGroup is enabled + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL + */ +CUptiResult CUPTIAPI cuptiEventGroupRemoveAllEvents(CUpti_EventGroup eventGroup); + +/** + * \brief Zero all the event counts in an event group. + * + * Zero all the event counts in an event group. + * \note \b Thread-safety: this function is thread safe but client + * must guard against simultaneous destruction or modification of \p + * eventGroup (for example, client must guard against simultaneous + * calls to \ref cuptiEventGroupDestroy, \ref cuptiEventGroupAddEvent, + * etc.), and must guard against simultaneous destruction of the + * context in which \p eventGroup was created (for example, client + * must guard against simultaneous calls to cudaDeviceReset, + * cuCtxDestroy, etc.). + * + * \param eventGroup The event group + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_HARDWARE + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL + */ +CUptiResult CUPTIAPI cuptiEventGroupResetAllEvents(CUpti_EventGroup eventGroup); + +/** + * \brief Enable an event group. + * + * Enable an event group. Enabling an event group zeros the value of + * all the events in the group and then starts collection of those + * events. + * \note \b Thread-safety: this function is thread safe. + * + * \param eventGroup The event group + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_HARDWARE + * \retval CUPTI_ERROR_NOT_READY if \p eventGroup does not contain any events + * \retval CUPTI_ERROR_NOT_COMPATIBLE if \p eventGroup cannot be + * enabled due to other already enabled event groups + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL + * \retval CUPTI_ERROR_HARDWARE_BUSY if another client is profiling + * and hardware is busy + */ +CUptiResult CUPTIAPI cuptiEventGroupEnable(CUpti_EventGroup eventGroup); + +/** + * \brief Disable an event group. + * + * Disable an event group. Disabling an event group stops collection + * of events contained in the group. + * \note \b Thread-safety: this function is thread safe. + * + * \param eventGroup The event group + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_HARDWARE + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL + */ +CUptiResult CUPTIAPI cuptiEventGroupDisable(CUpti_EventGroup eventGroup); + +/** + * \brief Read the value for an event in an event group. + * + * Read the value for an event in an event group. The event value is + * returned in the \p eventValueBuffer buffer. \p + * eventValueBufferSizeBytes indicates the size of the \p + * eventValueBuffer buffer. The buffer must be at least sizeof(uint64) + * if ::CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES is not set + * on the group containing the event. The buffer must be at least + * (sizeof(uint64) * number of domain instances) if + * ::CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES is set on the + * group. + * + * If any instance of an event counter overflows, the value returned + * for that event instance will be ::CUPTI_EVENT_OVERFLOW. + * + * The only allowed value for \p flags is ::CUPTI_EVENT_READ_FLAG_NONE. + * + * Reading an event from a disabled event group is not allowed. After + * being read, an event's value is reset to zero. + * \note \b Thread-safety: this function is thread safe but client + * must guard against simultaneous destruction or modification of \p + * eventGroup (for example, client must guard against simultaneous + * calls to \ref cuptiEventGroupDestroy, \ref cuptiEventGroupAddEvent, + * etc.), and must guard against simultaneous destruction of the + * context in which \p eventGroup was created (for example, client + * must guard against simultaneous calls to cudaDeviceReset, + * cuCtxDestroy, etc.). If \ref cuptiEventGroupResetAllEvents is + * called simultaneously with this function, then returned event + * values are undefined. + * + * \param eventGroup The event group + * \param flags Flags controlling the reading mode + * \param event The event to read + * \param eventValueBufferSizeBytes The size of \p eventValueBuffer + * in bytes, and returns the number of bytes written to \p + * eventValueBuffer + * \param eventValueBuffer Returns the event value(s) + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_EVENT_ID + * \retval CUPTI_ERROR_HARDWARE + * \retval CUPTI_ERROR_INVALID_OPERATION if \p eventGroup is disabled + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup, \p + * eventValueBufferSizeBytes or \p eventValueBuffer is NULL + * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT if size of \p eventValueBuffer + * is not sufficient + */ +CUptiResult CUPTIAPI cuptiEventGroupReadEvent(CUpti_EventGroup eventGroup, + CUpti_ReadEventFlags flags, + CUpti_EventID event, + size_t *eventValueBufferSizeBytes, + uint64_t *eventValueBuffer); + +/** + * \brief Read the values for all the events in an event group. + * + * Read the values for all the events in an event group. The event + * values are returned in the \p eventValueBuffer buffer. \p + * eventValueBufferSizeBytes indicates the size of \p + * eventValueBuffer. The buffer must be at least (sizeof(uint64) * + * number of events in group) if + * ::CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES is not set on + * the group containing the events. The buffer must be at least + * (sizeof(uint64) * number of domain instances * number of events in + * group) if ::CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES is + * set on the group. + * + * The data format returned in \p eventValueBuffer is: + * - domain instance 0: event0 event1 ... eventN + * - domain instance 1: event0 event1 ... eventN + * - ... + * - domain instance M: event0 event1 ... eventN + * + * The event order in \p eventValueBuffer is returned in \p + * eventIdArray. The size of \p eventIdArray is specified in \p + * eventIdArraySizeBytes. The size should be at least + * (sizeof(CUpti_EventID) * number of events in group). + * + * If any instance of any event counter overflows, the value returned + * for that event instance will be ::CUPTI_EVENT_OVERFLOW. + * + * The only allowed value for \p flags is ::CUPTI_EVENT_READ_FLAG_NONE. + * + * Reading events from a disabled event group is not allowed. After + * being read, an event's value is reset to zero. + * \note \b Thread-safety: this function is thread safe but client + * must guard against simultaneous destruction or modification of \p + * eventGroup (for example, client must guard against simultaneous + * calls to \ref cuptiEventGroupDestroy, \ref cuptiEventGroupAddEvent, + * etc.), and must guard against simultaneous destruction of the + * context in which \p eventGroup was created (for example, client + * must guard against simultaneous calls to cudaDeviceReset, + * cuCtxDestroy, etc.). If \ref cuptiEventGroupResetAllEvents is + * called simultaneously with this function, then returned event + * values are undefined. + * + * \param eventGroup The event group + * \param flags Flags controlling the reading mode + * \param eventValueBufferSizeBytes The size of \p eventValueBuffer in + * bytes, and returns the number of bytes written to \p + * eventValueBuffer + * \param eventValueBuffer Returns the event values + * \param eventIdArraySizeBytes The size of \p eventIdArray in bytes, + * and returns the number of bytes written to \p eventIdArray + * \param eventIdArray Returns the IDs of the events in the same order + * as the values return in eventValueBuffer. + * \param numEventIdsRead Returns the number of event IDs returned + * in \p eventIdArray + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_HARDWARE + * \retval CUPTI_ERROR_INVALID_OPERATION if \p eventGroup is disabled + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup, \p + * eventValueBufferSizeBytes, \p eventValueBuffer, \p + * eventIdArraySizeBytes, \p eventIdArray or \p numEventIdsRead is + * NULL + * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT if size of \p eventValueBuffer + * or \p eventIdArray is not sufficient + */ +CUptiResult CUPTIAPI cuptiEventGroupReadAllEvents(CUpti_EventGroup eventGroup, + CUpti_ReadEventFlags flags, + size_t *eventValueBufferSizeBytes, + uint64_t *eventValueBuffer, + size_t *eventIdArraySizeBytes, + CUpti_EventID *eventIdArray, + size_t *numEventIdsRead); + +/** + * \brief For a set of events, get the grouping that indicates the + * number of passes and the event groups necessary to collect the + * events. + * + * The number of events that can be collected simultaneously varies by + * device and by the type of the events. When events can be collected + * simultaneously, they may need to be grouped into multiple event + * groups because they are from different event domains. This function + * takes a set of events and determines how many passes are required + * to collect all those events, and which events can be collected + * simultaneously in each pass. + * + * The CUpti_EventGroupSets returned in \p eventGroupPasses indicates + * how many passes are required to collect the events with the \p + * numSets field. Within each event group set, the \p sets array + * indicates the event groups that should be collected on each pass. + * \note \b Thread-safety: this function is thread safe, but client + * must guard against another thread simultaneously destroying \p + * context. + * + * \param context The context for event collection + * \param eventIdArraySizeBytes Size of \p eventIdArray in bytes + * \param eventIdArray Array of event IDs that need to be grouped + * \param eventGroupPasses Returns a CUpti_EventGroupSets object that + * indicates the number of passes required to collect the events and + * the events to collect on each pass + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_CONTEXT + * \retval CUPTI_ERROR_INVALID_EVENT_ID + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventIdArray or + * \p eventGroupPasses is NULL + */ +CUptiResult CUPTIAPI cuptiEventGroupSetsCreate(CUcontext context, + size_t eventIdArraySizeBytes, + CUpti_EventID *eventIdArray, + CUpti_EventGroupSets **eventGroupPasses); + +/** + * \brief Destroy a event group sets object. + * + * Destroy a CUpti_EventGroupSets object. + * \note \b Thread-safety: this function is thread safe. + * + * \param eventGroupSets The object to destroy + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_OPERATION if any of the event groups + * contained in the sets is enabled + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroupSets is NULL + */ +CUptiResult CUPTIAPI cuptiEventGroupSetsDestroy(CUpti_EventGroupSets *eventGroupSets); + + +/** + * \brief Enable an event group set. + * + * Enable a set of event groups. Enabling a set of event groups zeros the value of + * all the events in all the groups and then starts collection of those events. + * \note \b Thread-safety: this function is thread safe. + * + * \param eventGroupSet The pointer to the event group set + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_HARDWARE + * \retval CUPTI_ERROR_NOT_READY if \p eventGroup does not contain any events + * \retval CUPTI_ERROR_NOT_COMPATIBLE if \p eventGroup cannot be + * enabled due to other already enabled event groups + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroupSet is NULL + * \retval CUPTI_ERROR_HARDWARE_BUSY if other client is profiling and hardware is + * busy + */ +CUptiResult CUPTIAPI cuptiEventGroupSetEnable(CUpti_EventGroupSet *eventGroupSet); + +/** + * \brief Disable an event group set. + * + * Disable a set of event groups. Disabling a set of event groups + * stops collection of events contained in the groups. + * \note \b Thread-safety: this function is thread safe. + * \note \b If this call fails, some of the event groups in the set may be disabled + * and other event groups may remain enabled. + * + * \param eventGroupSet The pointer to the event group set + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_HARDWARE + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroupSet is NULL + */ +CUptiResult CUPTIAPI cuptiEventGroupSetDisable(CUpti_EventGroupSet *eventGroupSet); + +/** + * \brief Enable kernel replay mode. + * + * Set profiling mode for the context to replay mode. In this mode, + * any number of events can be collected in one run of the kernel. The + * event collection mode will automatically switch to + * CUPTI_EVENT_COLLECTION_MODE_KERNEL. In this mode, \ref + * cuptiSetEventCollectionMode will return + * CUPTI_ERROR_INVALID_OPERATION. + * \note \b Kernels might take longer to run if many events are enabled. + * \note \b Thread-safety: this function is thread safe. + * + * \param context The context + * \retval CUPTI_SUCCESS + */ +CUptiResult CUPTIAPI cuptiEnableKernelReplayMode(CUcontext context); + +/** + * \brief Disable kernel replay mode. + * + * Set profiling mode for the context to non-replay (default) + * mode. Event collection mode will be set to + * CUPTI_EVENT_COLLECTION_MODE_KERNEL. All previously enabled + * event groups and event group sets will be disabled. + * \note \b Thread-safety: this function is thread safe. + * + * \param context The context + * \retval CUPTI_SUCCESS + */ +CUptiResult CUPTIAPI cuptiDisableKernelReplayMode(CUcontext context); + +/** + * \brief Function type for getting updates on kernel replay. + * + * \param kernelName The mangled kernel name + * \param numReplaysDone Number of replays done so far + * \param customData Pointer of any custom data passed in when subscribing + */ +typedef void (CUPTIAPI *CUpti_KernelReplayUpdateFunc)( + const char *kernelName, + int numReplaysDone, + void *customData); + +/** + * \brief Subscribe to kernel replay updates. + * + * When subscribed, the function pointer passed in will be called each time a + * kernel run is finished during kernel replay. Previously subscribed function + * pointer will be replaced. Pass in NULL as the function pointer unsubscribes + * the update. + * + * \param updateFunc The update function pointer + * \param customData Pointer to any custom data + * \retval CUPTI_SUCCESS + */ +CUptiResult CUPTIAPI cuptiKernelReplaySubscribeUpdate(CUpti_KernelReplayUpdateFunc updateFunc, void *customData); + +/** @} */ /* END CUPTI_EVENT_API */ + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility pop +#endif + +#if defined(__cplusplus) +} +#endif + +#endif /*_CUPTI_EVENTS_H_*/ + + diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_metrics.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_metrics.h new file mode 100644 index 0000000000000000000000000000000000000000..28d441e6b51a1be18f22a018800316fda0a779ec --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_metrics.h @@ -0,0 +1,825 @@ +/* + * Copyright 2011-2020 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(_CUPTI_METRIC_H_) +#define _CUPTI_METRIC_H_ + +#include +#include +#include +#include + +#ifndef CUPTIAPI +#ifdef _WIN32 +#define CUPTIAPI __stdcall +#else +#define CUPTIAPI +#endif +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility push(default) +#endif + +/** + * \defgroup CUPTI_METRIC_API CUPTI Metric API + * Functions, types, and enums that implement the CUPTI Metric API. + * + * \note CUPTI metric API from the header cupti_metrics.h are not supported on devices + * with compute capability 7.5 and higher (i.e. Turing and later GPU architectures). + * These API will be deprecated in a future CUDA release. These are replaced by + * Profiling API in the header cupti_profiler_target.h and Perfworks metrics API + * in the headers nvperf_host.h and nvperf_target.h which are supported on + * devices with compute capability 7.0 and higher (i.e. Volta and later GPU + * architectures). + * + * @{ + */ + +/** + * \brief ID for a metric. + * + * A metric provides a measure of some aspect of the device. + */ +typedef uint32_t CUpti_MetricID; + +/** + * \brief A metric category. + * + * Each metric is assigned to a category that represents the general + * type of the metric. A metric's category is accessed using \ref + * cuptiMetricGetAttribute and the CUPTI_METRIC_ATTR_CATEGORY + * attribute. + */ +typedef enum { + /** + * A memory related metric. + */ + CUPTI_METRIC_CATEGORY_MEMORY = 0, + /** + * An instruction related metric. + */ + CUPTI_METRIC_CATEGORY_INSTRUCTION = 1, + /** + * A multiprocessor related metric. + */ + CUPTI_METRIC_CATEGORY_MULTIPROCESSOR = 2, + /** + * A cache related metric. + */ + CUPTI_METRIC_CATEGORY_CACHE = 3, + /** + * A texture related metric. + */ + CUPTI_METRIC_CATEGORY_TEXTURE = 4, + /** + *A Nvlink related metric. + */ + CUPTI_METRIC_CATEGORY_NVLINK = 5, + /** + *A PCIe related metric. + */ + CUPTI_METRIC_CATEGORY_PCIE = 6, + CUPTI_METRIC_CATEGORY_FORCE_INT = 0x7fffffff, +} CUpti_MetricCategory; + +/** + * \brief A metric evaluation mode. + * + * A metric can be evaluated per hardware instance to know the load balancing + * across instances of a domain or the metric can be evaluated in aggregate mode + * when the events involved in metric evaluation are from different event + * domains. It might be possible to evaluate some metrics in both + * modes for convenience. A metric's evaluation mode is accessed using \ref + * CUpti_MetricEvaluationMode and the CUPTI_METRIC_ATTR_EVALUATION_MODE + * attribute. + */ +typedef enum { + /** + * If this bit is set, the metric can be profiled for each instance of the + * domain. The event values passed to \ref cuptiMetricGetValue can contain + * values for one instance of the domain. And \ref cuptiMetricGetValue can + * be called for each instance. + */ + CUPTI_METRIC_EVALUATION_MODE_PER_INSTANCE = 1, + /** + * If this bit is set, the metric can be profiled over all instances. The + * event values passed to \ref cuptiMetricGetValue can be aggregated values + * of events for all instances of the domain. + */ + CUPTI_METRIC_EVALUATION_MODE_AGGREGATE = 1 << 1, + CUPTI_METRIC_EVALUATION_MODE_FORCE_INT = 0x7fffffff, +} CUpti_MetricEvaluationMode; + +/** + * \brief Kinds of metric values. + * + * Metric values can be one of several different kinds. Corresponding + * to each kind is a member of the CUpti_MetricValue union. The metric + * value returned by \ref cuptiMetricGetValue should be accessed using + * the appropriate member of that union based on its value kind. + */ +typedef enum { + /** + * The metric value is a 64-bit double. + */ + CUPTI_METRIC_VALUE_KIND_DOUBLE = 0, + /** + * The metric value is a 64-bit unsigned integer. + */ + CUPTI_METRIC_VALUE_KIND_UINT64 = 1, + /** + * The metric value is a percentage represented by a 64-bit + * double. For example, 57.5% is represented by the value 57.5. + */ + CUPTI_METRIC_VALUE_KIND_PERCENT = 2, + /** + * The metric value is a throughput represented by a 64-bit + * integer. The unit for throughput values is bytes/second. + */ + CUPTI_METRIC_VALUE_KIND_THROUGHPUT = 3, + /** + * The metric value is a 64-bit signed integer. + */ + CUPTI_METRIC_VALUE_KIND_INT64 = 4, + /** + * The metric value is a utilization level, as represented by + * CUpti_MetricValueUtilizationLevel. + */ + CUPTI_METRIC_VALUE_KIND_UTILIZATION_LEVEL = 5, + + CUPTI_METRIC_VALUE_KIND_FORCE_INT = 0x7fffffff +} CUpti_MetricValueKind; + +/** + * \brief Enumeration of utilization levels for metrics values of kind + * CUPTI_METRIC_VALUE_KIND_UTILIZATION_LEVEL. Utilization values can + * vary from IDLE (0) to MAX (10) but the enumeration only provides + * specific names for a few values. + */ +typedef enum { + CUPTI_METRIC_VALUE_UTILIZATION_IDLE = 0, + CUPTI_METRIC_VALUE_UTILIZATION_LOW = 2, + CUPTI_METRIC_VALUE_UTILIZATION_MID = 5, + CUPTI_METRIC_VALUE_UTILIZATION_HIGH = 8, + CUPTI_METRIC_VALUE_UTILIZATION_MAX = 10, + CUPTI_METRIC_VALUE_UTILIZATION_FORCE_INT = 0x7fffffff +} CUpti_MetricValueUtilizationLevel; + +/** + * \brief Metric attributes. + * + * Metric attributes describe properties of a metric. These attributes + * can be read using \ref cuptiMetricGetAttribute. + */ +typedef enum { + /** + * Metric name. Value is a null terminated const c-string. + */ + CUPTI_METRIC_ATTR_NAME = 0, + /** + * Short description of metric. Value is a null terminated const c-string. + */ + CUPTI_METRIC_ATTR_SHORT_DESCRIPTION = 1, + /** + * Long description of metric. Value is a null terminated const c-string. + */ + CUPTI_METRIC_ATTR_LONG_DESCRIPTION = 2, + /** + * Category of the metric. Value is of type CUpti_MetricCategory. + */ + CUPTI_METRIC_ATTR_CATEGORY = 3, + /** + * Value type of the metric. Value is of type CUpti_MetricValueKind. + */ + CUPTI_METRIC_ATTR_VALUE_KIND = 4, + /** + * Metric evaluation mode. Value is of type CUpti_MetricEvaluationMode. + */ + CUPTI_METRIC_ATTR_EVALUATION_MODE = 5, + CUPTI_METRIC_ATTR_FORCE_INT = 0x7fffffff, +} CUpti_MetricAttribute; + +/** + * \brief A metric value. + * + * Metric values can be one of several different kinds. Corresponding + * to each kind is a member of the CUpti_MetricValue union. The metric + * value returned by \ref cuptiMetricGetValue should be accessed using + * the appropriate member of that union based on its value kind. + */ +typedef union { + /* + * Value for CUPTI_METRIC_VALUE_KIND_DOUBLE. + */ + double metricValueDouble; + /* + * Value for CUPTI_METRIC_VALUE_KIND_UINT64. + */ + uint64_t metricValueUint64; + /* + * Value for CUPTI_METRIC_VALUE_KIND_INT64. + */ + int64_t metricValueInt64; + /* + * Value for CUPTI_METRIC_VALUE_KIND_PERCENT. For example, 57.5% is + * represented by the value 57.5. + */ + double metricValuePercent; + /* + * Value for CUPTI_METRIC_VALUE_KIND_THROUGHPUT. The unit for + * throughput values is bytes/second. + */ + uint64_t metricValueThroughput; + /* + * Value for CUPTI_METRIC_VALUE_KIND_UTILIZATION_LEVEL. + */ + CUpti_MetricValueUtilizationLevel metricValueUtilizationLevel; +} CUpti_MetricValue; + +/** + * \brief Device class. + * + * Enumeration of device classes for metric property + * CUPTI_METRIC_PROPERTY_DEVICE_CLASS. + */ +typedef enum { + CUPTI_METRIC_PROPERTY_DEVICE_CLASS_TESLA = 0, + CUPTI_METRIC_PROPERTY_DEVICE_CLASS_QUADRO = 1, + CUPTI_METRIC_PROPERTY_DEVICE_CLASS_GEFORCE = 2, + CUPTI_METRIC_PROPERTY_DEVICE_CLASS_TEGRA = 3, +} CUpti_MetricPropertyDeviceClass; + +/** + * \brief Metric device properties. + * + * Metric device properties describe device properties which are needed for a metric. + * Some of these properties can be collected using cuDeviceGetAttribute. + */ +typedef enum { + /* + * Number of multiprocessors on a device. This can be collected + * using value of \param CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT of + * cuDeviceGetAttribute. + */ + CUPTI_METRIC_PROPERTY_MULTIPROCESSOR_COUNT, + /* + * Maximum number of warps on a multiprocessor. This can be + * collected using ratio of value of \param + * CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR and \param + * CU_DEVICE_ATTRIBUTE_WARP_SIZE of cuDeviceGetAttribute. + */ + CUPTI_METRIC_PROPERTY_WARPS_PER_MULTIPROCESSOR, + /* + * GPU Time for kernel in ns. This should be profiled using CUPTI + * Activity API. + */ + CUPTI_METRIC_PROPERTY_KERNEL_GPU_TIME, + /* + * Clock rate for device in KHz. This should be collected using + * value of \param CU_DEVICE_ATTRIBUTE_CLOCK_RATE of + * cuDeviceGetAttribute. + */ + CUPTI_METRIC_PROPERTY_CLOCK_RATE, + /* + * Number of Frame buffer units for device. This should be collected + * using value of \param CUPTI_DEVICE_ATTRIBUTE_MAX_FRAME_BUFFERS of + * cuptiDeviceGetAttribute. + */ + CUPTI_METRIC_PROPERTY_FRAME_BUFFER_COUNT, + /* + * Global memory bandwidth in KBytes/sec. This should be collected + * using value of \param CUPTI_DEVICE_ATTR_GLOBAL_MEMORY_BANDWIDTH + * of cuptiDeviceGetAttribute. + */ + CUPTI_METRIC_PROPERTY_GLOBAL_MEMORY_BANDWIDTH, + /* + * PCIE link rate in Mega bits/sec. This should be collected using + * value of \param CUPTI_DEVICE_ATTR_PCIE_LINK_RATE of + * cuptiDeviceGetAttribute. + */ + CUPTI_METRIC_PROPERTY_PCIE_LINK_RATE, + /* + * PCIE link width for device. This should be collected using + * value of \param CUPTI_DEVICE_ATTR_PCIE_LINK_WIDTH of + * cuptiDeviceGetAttribute. + */ + CUPTI_METRIC_PROPERTY_PCIE_LINK_WIDTH, + /* + * PCIE generation for device. This should be collected using + * value of \param CUPTI_DEVICE_ATTR_PCIE_GEN of + * cuptiDeviceGetAttribute. + */ + CUPTI_METRIC_PROPERTY_PCIE_GEN, + /* + * The device class. This should be collected using + * value of \param CUPTI_DEVICE_ATTR_DEVICE_CLASS of + * cuptiDeviceGetAttribute. + */ + CUPTI_METRIC_PROPERTY_DEVICE_CLASS, + /* + * Peak single precision floating point operations that + * can be performed in one cycle by the device. + * This should be collected using value of + * \param CUPTI_DEVICE_ATTR_FLOP_SP_PER_CYCLE of + * cuptiDeviceGetAttribute. + */ + CUPTI_METRIC_PROPERTY_FLOP_SP_PER_CYCLE, + /* + * Peak double precision floating point operations that + * can be performed in one cycle by the device. + * This should be collected using value of + * \param CUPTI_DEVICE_ATTR_FLOP_DP_PER_CYCLE of + * cuptiDeviceGetAttribute. + */ + CUPTI_METRIC_PROPERTY_FLOP_DP_PER_CYCLE, + /* + * Number of L2 units on a device. This can be collected + * using value of \param CUPTI_DEVICE_ATTR_MAX_L2_UNITS of + * cuDeviceGetAttribute. + */ + CUPTI_METRIC_PROPERTY_L2_UNITS, + /* + * Whether ECC support is enabled on the device. This can be + * collected using value of \param CU_DEVICE_ATTRIBUTE_ECC_ENABLED of + * cuDeviceGetAttribute. + */ + CUPTI_METRIC_PROPERTY_ECC_ENABLED, + /* + * Peak half precision floating point operations that + * can be performed in one cycle by the device. + * This should be collected using value of + * \param CUPTI_DEVICE_ATTR_FLOP_HP_PER_CYCLE of + * cuptiDeviceGetAttribute. + */ + CUPTI_METRIC_PROPERTY_FLOP_HP_PER_CYCLE, + /* + * NVLINK Bandwitdh for device. This should be collected + * using value of \param CUPTI_DEVICE_ATTR_GPU_CPU_NVLINK_BW of + * cuptiDeviceGetAttribute. + */ + CUPTI_METRIC_PROPERTY_GPU_CPU_NVLINK_BANDWIDTH, +} CUpti_MetricPropertyID; + +/** + * \brief Get the total number of metrics available on any device. + * + * Returns the total number of metrics available on any CUDA-capable + * devices. + * + * \param numMetrics Returns the number of metrics + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numMetrics is NULL +*/ +CUptiResult CUPTIAPI cuptiGetNumMetrics(uint32_t *numMetrics); + +/** + * \brief Get all the metrics available on any device. + * + * Returns the metric IDs in \p metricArray for all CUDA-capable + * devices. The size of the \p metricArray buffer is given by \p + * *arraySizeBytes. The size of the \p metricArray buffer must be at + * least \p numMetrics * sizeof(CUpti_MetricID) or all metric IDs will + * not be returned. The value returned in \p *arraySizeBytes contains + * the number of bytes returned in \p metricArray. + * + * \param arraySizeBytes The size of \p metricArray in bytes, and + * returns the number of bytes written to \p metricArray + * \param metricArray Returns the IDs of the metrics + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p arraySizeBytes or + * \p metricArray are NULL +*/ +CUptiResult CUPTIAPI cuptiEnumMetrics(size_t *arraySizeBytes, + CUpti_MetricID *metricArray); + +/** + * \brief Get the number of metrics for a device. + * + * Returns the number of metrics available for a device. + * + * \param device The CUDA device + * \param numMetrics Returns the number of metrics available for the + * device + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_DEVICE + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numMetrics is NULL + */ +CUptiResult CUPTIAPI cuptiDeviceGetNumMetrics(CUdevice device, + uint32_t *numMetrics); + +/** + * \brief Get the metrics for a device. + * + * Returns the metric IDs in \p metricArray for a device. The size of + * the \p metricArray buffer is given by \p *arraySizeBytes. The size + * of the \p metricArray buffer must be at least \p numMetrics * + * sizeof(CUpti_MetricID) or else all metric IDs will not be + * returned. The value returned in \p *arraySizeBytes contains the + * number of bytes returned in \p metricArray. + * + * \param device The CUDA device + * \param arraySizeBytes The size of \p metricArray in bytes, and + * returns the number of bytes written to \p metricArray + * \param metricArray Returns the IDs of the metrics for the device + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_DEVICE + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p arraySizeBytes or + * \p metricArray are NULL + */ +CUptiResult CUPTIAPI cuptiDeviceEnumMetrics(CUdevice device, + size_t *arraySizeBytes, + CUpti_MetricID *metricArray); + +/** + * \brief Get a metric attribute. + * + * Returns a metric attribute in \p *value. The size of the \p + * value buffer is given by \p *valueSize. The value returned in \p + * *valueSize contains the number of bytes returned in \p value. + * + * If the attribute value is a c-string that is longer than \p + * *valueSize, then only the first \p *valueSize characters will be + * returned and there will be no terminating null byte. + * + * \param metric ID of the metric + * \param attrib The metric attribute to read + * \param valueSize The size of the \p value buffer in bytes, and + * returns the number of bytes written to \p value + * \param value Returns the attribute's value + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_METRIC_ID + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value + * is NULL, or if \p attrib is not a metric attribute + * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string + * attribute values, indicates that the \p value buffer is too small + * to hold the attribute value. + */ +CUptiResult CUPTIAPI cuptiMetricGetAttribute(CUpti_MetricID metric, + CUpti_MetricAttribute attrib, + size_t *valueSize, + void *value); + +/** + * \brief Find an metric by name. + * + * Find a metric by name and return the metric ID in \p *metric. + * + * \param device The CUDA device + * \param metricName The name of metric to find + * \param metric Returns the ID of the found metric or undefined if + * unable to find the metric + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_DEVICE + * \retval CUPTI_ERROR_INVALID_METRIC_NAME if unable to find a metric + * with name \p metricName. In this case \p *metric is undefined + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p metricName or \p + * metric are NULL. + */ +CUptiResult CUPTIAPI cuptiMetricGetIdFromName(CUdevice device, + const char *metricName, + CUpti_MetricID *metric); + +/** + * \brief Get number of events required to calculate a metric. + * + * Returns the number of events in \p numEvents that are required to + * calculate a metric. + * + * \param metric ID of the metric + * \param numEvents Returns the number of events required for the metric + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_METRIC_ID + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numEvents is NULL + */ +CUptiResult CUPTIAPI cuptiMetricGetNumEvents(CUpti_MetricID metric, + uint32_t *numEvents); + +/** + * \brief Get the events required to calculating a metric. + * + * Gets the event IDs in \p eventIdArray required to calculate a \p + * metric. The size of the \p eventIdArray buffer is given by \p + * *eventIdArraySizeBytes and must be at least \p numEvents * + * sizeof(CUpti_EventID) or all events will not be returned. The value + * returned in \p *eventIdArraySizeBytes contains the number of bytes + * returned in \p eventIdArray. + * + * \param metric ID of the metric + * \param eventIdArraySizeBytes The size of \p eventIdArray in bytes, + * and returns the number of bytes written to \p eventIdArray + * \param eventIdArray Returns the IDs of the events required to + * calculate \p metric + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_METRIC_ID + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventIdArraySizeBytes or \p + * eventIdArray are NULL. + */ +CUptiResult CUPTIAPI cuptiMetricEnumEvents(CUpti_MetricID metric, + size_t *eventIdArraySizeBytes, + CUpti_EventID *eventIdArray); + +/** + * \brief Get number of properties required to calculate a metric. + * + * Returns the number of properties in \p numProp that are required to + * calculate a metric. + * + * \param metric ID of the metric + * \param numProp Returns the number of properties required for the + * metric + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_METRIC_ID + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numProp is NULL + */ +CUptiResult CUPTIAPI cuptiMetricGetNumProperties(CUpti_MetricID metric, + uint32_t *numProp); + +/** + * \brief Get the properties required to calculating a metric. + * + * Gets the property IDs in \p propIdArray required to calculate a \p + * metric. The size of the \p propIdArray buffer is given by \p + * *propIdArraySizeBytes and must be at least \p numProp * + * sizeof(CUpti_DeviceAttribute) or all properties will not be + * returned. The value returned in \p *propIdArraySizeBytes contains + * the number of bytes returned in \p propIdArray. + * + * \param metric ID of the metric + * \param propIdArraySizeBytes The size of \p propIdArray in bytes, + * and returns the number of bytes written to \p propIdArray + * \param propIdArray Returns the IDs of the properties required to + * calculate \p metric + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_METRIC_ID + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p propIdArraySizeBytes or \p + * propIdArray are NULL. + */ +CUptiResult CUPTIAPI cuptiMetricEnumProperties(CUpti_MetricID metric, + size_t *propIdArraySizeBytes, + CUpti_MetricPropertyID *propIdArray); + + +/** + * \brief For a metric get the groups of events that must be collected + * in the same pass. + * + * For a metric get the groups of events that must be collected in the + * same pass to ensure that the metric is calculated correctly. If the + * events are not collected as specified then the metric value may be + * inaccurate. + * + * The function returns NULL if a metric does not have any required + * event group. In this case the events needed for the metric can be + * grouped in any manner for collection. + * + * \param context The context for event collection + * \param metric The metric ID + * \param eventGroupSets Returns a CUpti_EventGroupSets object that + * indicates the events that must be collected in the same pass to + * ensure the metric is calculated correctly. Returns NULL if no + * grouping is required for metric + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_METRIC_ID + */ +CUptiResult CUPTIAPI cuptiMetricGetRequiredEventGroupSets(CUcontext context, + CUpti_MetricID metric, + CUpti_EventGroupSets **eventGroupSets); + +/** + * \brief For a set of metrics, get the grouping that indicates the + * number of passes and the event groups necessary to collect the + * events required for those metrics. + * + * For a set of metrics, get the grouping that indicates the number of + * passes and the event groups necessary to collect the events + * required for those metrics. + * + * \see cuptiEventGroupSetsCreate for details on event group set + * creation. + * + * \param context The context for event collection + * \param metricIdArraySizeBytes Size of the metricIdArray in bytes + * \param metricIdArray Array of metric IDs + * \param eventGroupPasses Returns a CUpti_EventGroupSets object that + * indicates the number of passes required to collect the events and + * the events to collect on each pass + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_CONTEXT + * \retval CUPTI_ERROR_INVALID_METRIC_ID + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p metricIdArray or + * \p eventGroupPasses is NULL + */ +CUptiResult CUPTIAPI cuptiMetricCreateEventGroupSets(CUcontext context, + size_t metricIdArraySizeBytes, + CUpti_MetricID *metricIdArray, + CUpti_EventGroupSets **eventGroupPasses); + +/** + * \brief Calculate the value for a metric. + * + * Use the events collected for a metric to calculate the metric + * value. Metric value evaluation depends on the evaluation mode + * \ref CUpti_MetricEvaluationMode that the metric supports. + * If a metric has evaluation mode as CUPTI_METRIC_EVALUATION_MODE_PER_INSTANCE, + * then it assumes that the input event value is for one domain instance. + * If a metric has evaluation mode as CUPTI_METRIC_EVALUATION_MODE_AGGREGATE, + * it assumes that input event values are + * normalized to represent all domain instances on a device. For the + * most accurate metric collection, the events required for the metric + * should be collected for all profiled domain instances. For example, + * to collect all instances of an event, set the + * CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES attribute on + * the group containing the event to 1. The normalized value for the + * event is then: (\p sum_event_values * \p totalInstanceCount) / \p + * instanceCount, where \p sum_event_values is the summation of the + * event values across all profiled domain instances, \p + * totalInstanceCount is obtained from querying + * CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT and \p instanceCount + * is obtained from querying CUPTI_EVENT_GROUP_ATTR_INSTANCE_COUNT (or + * CUPTI_EVENT_DOMAIN_ATTR_INSTANCE_COUNT). + * + * \param device The CUDA device that the metric is being calculated for + * \param metric The metric ID + * \param eventIdArraySizeBytes The size of \p eventIdArray in bytes + * \param eventIdArray The event IDs required to calculate \p metric + * \param eventValueArraySizeBytes The size of \p eventValueArray in bytes + * \param eventValueArray The normalized event values required to + * calculate \p metric. The values must be order to match the order of + * events in \p eventIdArray + * \param timeDuration The duration over which the events were + * collected, in ns + * \param metricValue Returns the value for the metric + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_METRIC_ID + * \retval CUPTI_ERROR_INVALID_OPERATION + * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT if the + * eventIdArray does not contain all the events needed for metric + * \retval CUPTI_ERROR_INVALID_EVENT_VALUE if any of the + * event values required for the metric is CUPTI_EVENT_OVERFLOW + * \retval CUPTI_ERROR_INVALID_METRIC_VALUE if the computed metric value + * cannot be represented in the metric's value type. For example, + * if the metric value type is unsigned and the computed metric value is negative + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p metricValue, + * \p eventIdArray or \p eventValueArray is NULL + */ +CUptiResult CUPTIAPI cuptiMetricGetValue(CUdevice device, + CUpti_MetricID metric, + size_t eventIdArraySizeBytes, + CUpti_EventID *eventIdArray, + size_t eventValueArraySizeBytes, + uint64_t *eventValueArray, + uint64_t timeDuration, + CUpti_MetricValue *metricValue); + +/** + * \brief Calculate the value for a metric. + * + * Use the events and properties collected for a metric to calculate + * the metric value. Metric value evaluation depends on the evaluation + * mode \ref CUpti_MetricEvaluationMode that the metric supports. If + * a metric has evaluation mode as + * CUPTI_METRIC_EVALUATION_MODE_PER_INSTANCE, then it assumes that the + * input event value is for one domain instance. If a metric has + * evaluation mode as CUPTI_METRIC_EVALUATION_MODE_AGGREGATE, it + * assumes that input event values are normalized to represent all + * domain instances on a device. For the most accurate metric + * collection, the events required for the metric should be collected + * for all profiled domain instances. For example, to collect all + * instances of an event, set the + * CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES attribute on + * the group containing the event to 1. The normalized value for the + * event is then: (\p sum_event_values * \p totalInstanceCount) / \p + * instanceCount, where \p sum_event_values is the summation of the + * event values across all profiled domain instances, \p + * totalInstanceCount is obtained from querying + * CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT and \p instanceCount + * is obtained from querying CUPTI_EVENT_GROUP_ATTR_INSTANCE_COUNT (or + * CUPTI_EVENT_DOMAIN_ATTR_INSTANCE_COUNT). + * + * \param metric The metric ID + * \param eventIdArraySizeBytes The size of \p eventIdArray in bytes + * \param eventIdArray The event IDs required to calculate \p metric + * \param eventValueArraySizeBytes The size of \p eventValueArray in bytes + * \param eventValueArray The normalized event values required to + * calculate \p metric. The values must be order to match the order of + * events in \p eventIdArray + * \param propIdArraySizeBytes The size of \p propIdArray in bytes + * \param propIdArray The metric property IDs required to calculate \p metric + * \param propValueArraySizeBytes The size of \p propValueArray in bytes + * \param propValueArray The metric property values required to + * calculate \p metric. The values must be order to match the order of + * metric properties in \p propIdArray + * \param metricValue Returns the value for the metric + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_NOT_INITIALIZED + * \retval CUPTI_ERROR_INVALID_METRIC_ID + * \retval CUPTI_ERROR_INVALID_OPERATION + * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT if the + * eventIdArray does not contain all the events needed for metric + * \retval CUPTI_ERROR_INVALID_EVENT_VALUE if any of the + * event values required for the metric is CUPTI_EVENT_OVERFLOW + * \retval CUPTI_ERROR_NOT_COMPATIBLE if the computed metric value + * cannot be represented in the metric's value type. For example, + * if the metric value type is unsigned and the computed metric value is negative + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p metricValue, + * \p eventIdArray or \p eventValueArray is NULL + */ +CUptiResult CUPTIAPI cuptiMetricGetValue2(CUpti_MetricID metric, + size_t eventIdArraySizeBytes, + CUpti_EventID *eventIdArray, + size_t eventValueArraySizeBytes, + uint64_t *eventValueArray, + size_t propIdArraySizeBytes, + CUpti_MetricPropertyID *propIdArray, + size_t propValueArraySizeBytes, + uint64_t *propValueArray, + CUpti_MetricValue *metricValue); + +/** @} */ /* END CUPTI_METRIC_API */ + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility pop +#endif + +#if defined(__cplusplus) +} +#endif + +#endif /*_CUPTI_METRIC_H_*/ + + diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_nvtx_cbid.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_nvtx_cbid.h new file mode 100644 index 0000000000000000000000000000000000000000..5ad8c85e6e674b9a016580be88d3c5a2d2619990 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_nvtx_cbid.h @@ -0,0 +1,111 @@ +/* + * Copyright 2013-2017 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility push(default) +#endif + +typedef enum { + CUPTI_CBID_NVTX_INVALID = 0, + CUPTI_CBID_NVTX_nvtxMarkA = 1, + CUPTI_CBID_NVTX_nvtxMarkW = 2, + CUPTI_CBID_NVTX_nvtxMarkEx = 3, + CUPTI_CBID_NVTX_nvtxRangeStartA = 4, + CUPTI_CBID_NVTX_nvtxRangeStartW = 5, + CUPTI_CBID_NVTX_nvtxRangeStartEx = 6, + CUPTI_CBID_NVTX_nvtxRangeEnd = 7, + CUPTI_CBID_NVTX_nvtxRangePushA = 8, + CUPTI_CBID_NVTX_nvtxRangePushW = 9, + CUPTI_CBID_NVTX_nvtxRangePushEx = 10, + CUPTI_CBID_NVTX_nvtxRangePop = 11, + CUPTI_CBID_NVTX_nvtxNameCategoryA = 12, + CUPTI_CBID_NVTX_nvtxNameCategoryW = 13, + CUPTI_CBID_NVTX_nvtxNameOsThreadA = 14, + CUPTI_CBID_NVTX_nvtxNameOsThreadW = 15, + CUPTI_CBID_NVTX_nvtxNameCuDeviceA = 16, + CUPTI_CBID_NVTX_nvtxNameCuDeviceW = 17, + CUPTI_CBID_NVTX_nvtxNameCuContextA = 18, + CUPTI_CBID_NVTX_nvtxNameCuContextW = 19, + CUPTI_CBID_NVTX_nvtxNameCuStreamA = 20, + CUPTI_CBID_NVTX_nvtxNameCuStreamW = 21, + CUPTI_CBID_NVTX_nvtxNameCuEventA = 22, + CUPTI_CBID_NVTX_nvtxNameCuEventW = 23, + CUPTI_CBID_NVTX_nvtxNameCudaDeviceA = 24, + CUPTI_CBID_NVTX_nvtxNameCudaDeviceW = 25, + CUPTI_CBID_NVTX_nvtxNameCudaStreamA = 26, + CUPTI_CBID_NVTX_nvtxNameCudaStreamW = 27, + CUPTI_CBID_NVTX_nvtxNameCudaEventA = 28, + CUPTI_CBID_NVTX_nvtxNameCudaEventW = 29, + CUPTI_CBID_NVTX_nvtxDomainMarkEx = 30, + CUPTI_CBID_NVTX_nvtxDomainRangeStartEx = 31, + CUPTI_CBID_NVTX_nvtxDomainRangeEnd = 32, + CUPTI_CBID_NVTX_nvtxDomainRangePushEx = 33, + CUPTI_CBID_NVTX_nvtxDomainRangePop = 34, + CUPTI_CBID_NVTX_nvtxDomainResourceCreate = 35, + CUPTI_CBID_NVTX_nvtxDomainResourceDestroy = 36, + CUPTI_CBID_NVTX_nvtxDomainNameCategoryA = 37, + CUPTI_CBID_NVTX_nvtxDomainNameCategoryW = 38, + CUPTI_CBID_NVTX_nvtxDomainRegisterStringA = 39, + CUPTI_CBID_NVTX_nvtxDomainRegisterStringW = 40, + CUPTI_CBID_NVTX_nvtxDomainCreateA = 41, + CUPTI_CBID_NVTX_nvtxDomainCreateW = 42, + CUPTI_CBID_NVTX_nvtxDomainDestroy = 43, + CUPTI_CBID_NVTX_nvtxDomainSyncUserCreate = 44, + CUPTI_CBID_NVTX_nvtxDomainSyncUserDestroy = 45, + CUPTI_CBID_NVTX_nvtxDomainSyncUserAcquireStart = 46, + CUPTI_CBID_NVTX_nvtxDomainSyncUserAcquireFailed = 47, + CUPTI_CBID_NVTX_nvtxDomainSyncUserAcquireSuccess = 48, + CUPTI_CBID_NVTX_nvtxDomainSyncUserReleasing = 49, + CUPTI_CBID_NVTX_SIZE, + CUPTI_CBID_NVTX_FORCE_INT = 0x7fffffff +} CUpti_nvtx_api_trace_cbid; + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility pop +#endif diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_pcsampling.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_pcsampling.h new file mode 100644 index 0000000000000000000000000000000000000000..86c5c0ed9a6c43e0555595718809a0d58aca722c --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_pcsampling.h @@ -0,0 +1,950 @@ +/* + * Copyright 2020-2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(_CUPTI_PCSAMPLING_H_) +#define _CUPTI_PCSAMPLING_H_ + +#include +#include +#include +#include "cupti_result.h" + +#ifndef CUPTIAPI +#ifdef _WIN32 +#define CUPTIAPI __stdcall +#else +#define CUPTIAPI +#endif +#endif + +#define ACTIVITY_RECORD_ALIGNMENT 8 +#if defined(_WIN32) // Windows 32- and 64-bit +#define START_PACKED_ALIGNMENT __pragma(pack(push,1)) // exact fit - no padding +#define PACKED_ALIGNMENT __declspec(align(ACTIVITY_RECORD_ALIGNMENT)) +#define END_PACKED_ALIGNMENT __pragma(pack(pop)) +#elif defined(__GNUC__) // GCC +#define START_PACKED_ALIGNMENT +#define PACKED_ALIGNMENT __attribute__ ((__packed__)) __attribute__ ((aligned (ACTIVITY_RECORD_ALIGNMENT))) +#define END_PACKED_ALIGNMENT +#else // all other compilers +#define START_PACKED_ALIGNMENT +#define PACKED_ALIGNMENT +#define END_PACKED_ALIGNMENT +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility push(default) +#endif + +/** + * \defgroup CUPTI_PCSAMPLING_API CUPTI PC Sampling API + * Functions, types, and enums that implement the CUPTI PC Sampling API. + * @{ + */ + +#ifndef CUPTI_PCSAMPLING_STRUCT_SIZE +#define CUPTI_PCSAMPLING_STRUCT_SIZE(type_, lastfield_) (offsetof(type_, lastfield_) + sizeof(((type_*)0)->lastfield_)) +#endif + +#ifndef CUPTI_STALL_REASON_STRING_SIZE +#define CUPTI_STALL_REASON_STRING_SIZE 128 +#endif + +/** + * \brief PC Sampling collection mode + */ +typedef enum +{ + /** + * INVALID Value + */ + CUPTI_PC_SAMPLING_COLLECTION_MODE_INVALID = 0, + /** + * Continuous mode. Kernels are not serialized in this mode. + */ + CUPTI_PC_SAMPLING_COLLECTION_MODE_CONTINUOUS = 1, + /** + * Serialized mode. Kernels are serialized in this mode. + */ + CUPTI_PC_SAMPLING_COLLECTION_MODE_KERNEL_SERIALIZED = 2, +} CUpti_PCSamplingCollectionMode; + +/** + * \brief PC Sampling stall reasons + */ +typedef struct PACKED_ALIGNMENT +{ + /** + * [r] Collected stall reason index + */ + uint32_t pcSamplingStallReasonIndex; + /** + * [r] Number of times the PC was sampled with the stallReason. + */ + uint32_t samples; +} CUpti_PCSamplingStallReason; + +/** + * \brief PC Sampling data + */ +typedef struct PACKED_ALIGNMENT +{ + /** + * [w] Size of the data structure. + * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are + * available in the structure. Used to preserve backward compatibility. + */ + size_t size; + /** + * [r] Unique cubin id + */ + uint64_t cubinCrc; + /** + * [r] PC offset + */ + uint64_t pcOffset; + /** + * The function's unique symbol index in the module. + */ + uint32_t functionIndex; + /** + * Padding + */ + uint32_t pad; + /** + * [r] The function name. This name string might be shared across all the records + * including records from activity APIs representing the same function, and so it should not be + * modified or freed until post processing of all the records is done. Once done, it is user’s responsibility to + * free the memory using free() function. + */ + char* functionName; + /** + * [r] Collected stall reason count + */ + size_t stallReasonCount; + /** + * [r] Stall reason id + * Total samples + */ + CUpti_PCSamplingStallReason *stallReason; +} CUpti_PCSamplingPCData; + +/** + * \brief PC Sampling output data format + */ +typedef enum +{ + CUPTI_PC_SAMPLING_OUTPUT_DATA_FORMAT_INVALID = 0, + /** + * HW buffer data will be parsed during collection of data + */ + CUPTI_PC_SAMPLING_OUTPUT_DATA_FORMAT_PARSED = 1, +} CUpti_PCSamplingOutputDataFormat; + +/** + * \brief Collected PC Sampling data + * + */ +typedef struct PACKED_ALIGNMENT +{ + /** + * [w] Size of the data structure. + * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are + * available in the structure. Used to preserve backward compatibility. + */ + size_t size; + /** + * [w] Number of PCs to be collected + */ + size_t collectNumPcs; + /** + * [r] Number of samples collected across all PCs. + * It includes samples for user modules, samples for non-user kernels and dropped samples. + * It includes counts for all non selected stall reasons. + * CUPTI does not provide PC records for non-user kernels. + * CUPTI does not provide PC records for instructions for which all selected stall reason metrics counts are zero. + */ + uint64_t totalSamples; + /** + * [r] Number of samples that were dropped by hardware due to backpressure/overflow. + */ + uint64_t droppedSamples; + /** + * [r] Number of PCs collected + */ + size_t totalNumPcs; + /** + * [r] Number of PCs available for collection + */ + size_t remainingNumPcs; + /** + * [r] Unique identifier for each range. + * Data collected across multiple ranges in multiple buffers can be identified using range id. + */ + uint64_t rangeId; + /** + * [r] Profiled PC data + * This data struct should have enough memory to collect number of PCs mentioned in \brief collectNumPcs + */ + CUpti_PCSamplingPCData *pPcData; + /** + * [r] Number of samples collected across all non user kernels PCs. + * It includes samples for non-user kernels. + * It includes counts for all non selected stall reasons as well. + * CUPTI does not provide PC records for non-user kernels. + */ + uint64_t nonUsrKernelsTotalSamples; + + /** + * [r] Status of the hardware buffer. + * CUPTI returns the error code CUPTI_ERROR_OUT_OF_MEMORY when hardware buffer is full. + * When hardware buffer is full, user will get pc data as 0. To mitigate this issue, one or more of the below options can be tried: + * 1. Increase the hardware buffer size using the attribute CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_HARDWARE_BUFFER_SIZE + * 2. Decrease the thread sleep span using the attribute CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_WORKER_THREAD_PERIODIC_SLEEP_SPAN + * 3. Decrease the sampling frequency using the attribute CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SAMPLING_PERIOD + */ + uint8_t hardwareBufferFull; +} CUpti_PCSamplingData; + +/** + * \brief PC Sampling configuration attributes + * + * PC Sampling configuration attribute types. These attributes can be read + * using \ref cuptiPCSamplingGetConfigurationAttribute and can be written + * using \ref cuptiPCSamplingSetConfigurationAttribute. Attributes marked + * [r] can only be read using \ref cuptiPCSamplingGetConfigurationAttribute + * [w] can only be written using \ref cuptiPCSamplingSetConfigurationAttribute + * [rw] can be read using \ref cuptiPCSamplingGetConfigurationAttribute and + * written using \ref cuptiPCSamplingSetConfigurationAttribute + */ +typedef enum +{ + CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_INVALID = 0, + /** + * [rw] Sampling period for PC Sampling. + * DEFAULT - CUPTI defined value based on number of SMs + * Valid values for the sampling + * periods are between 5 to 31 both inclusive. This will set the + * sampling period to (2^samplingPeriod) cycles. + * For e.g. for sampling period = 5 to 31, cycles = 32, 64, 128,..., 2^31 + * Value is a uint32_t + */ + CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SAMPLING_PERIOD = 1, + /** + * [w] Number of stall reasons to collect. + * DEFAULT - All stall reasons will be collected + * Value is a size_t + * [w] Stall reasons to collect + * DEFAULT - All stall reasons will be collected + * Input value should be a pointer pointing to array of stall reason indexes + * containing all the stall reason indexes to collect. + */ + CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_STALL_REASON = 2, + /** + * [rw] Size of SW buffer for raw PC counter data downloaded from HW buffer + * DEFAULT - 1 MB, which can accommodate approximately 5500 PCs + * with all stall reasons + * Approximately it takes 16 Bytes (and some fixed size memory) + * to accommodate one PC with one stall reason + * For e.g. 1 PC with 1 stall reason = 32 Bytes + * 1 PC with 2 stall reason = 48 Bytes + * 1 PC with 4 stall reason = 96 Bytes + * Value is a size_t + */ + CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SCRATCH_BUFFER_SIZE = 3, + /** + * [rw] Size of HW buffer in bytes + * DEFAULT - 512 MB + * If sampling period is too less, HW buffer can overflow + * and drop PC data + * Value is a size_t + */ + CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_HARDWARE_BUFFER_SIZE = 4, + /** + * [rw] PC Sampling collection mode + * DEFAULT - CUPTI_PC_SAMPLING_COLLECTION_MODE_CONTINUOUS + * Input value should be of type \ref CUpti_PCSamplingCollectionMode. + */ + CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_COLLECTION_MODE = 5, + /** + * [rw] Control over PC Sampling data collection range + * Default - 0 + * 1 - Allows user to start and stop PC Sampling using APIs - + * \ref cuptiPCSamplingStart() - Start PC Sampling + * \ref cuptiPCSamplingStop() - Stop PC Sampling + * Value is a uint32_t + */ + CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL = 6, + /** + * [w] Value for output data format + * Default - CUPTI_PC_SAMPLING_OUTPUT_DATA_FORMAT_PARSED + * Input value should be of type \ref CUpti_PCSamplingOutputDataFormat. + */ + CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_OUTPUT_DATA_FORMAT = 7, + /** + * [w] Data buffer to hold collected PC Sampling data PARSED_DATA + * Default - none. + * Buffer type is void * which can point to PARSED_DATA + * Refer \ref CUpti_PCSamplingData for buffer format for PARSED_DATA + */ + CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SAMPLING_DATA_BUFFER = 8, + /** + * [rw] Control sleep time of the worker threads created by CUPTI for various PC sampling operations. + * CUPTI creates multiple worker threads to offload certain operations to these threads. This includes decoding of HW data to + * the CUPTI PC sampling data and correlating PC data to SASS instructions. CUPTI wakes up these threads periodically. + * Default - 100 milliseconds. + * Value is a uint32_t + */ + CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_WORKER_THREAD_PERIODIC_SLEEP_SPAN = 9, + CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_FORCE_INT = 0x7fffffff, +} CUpti_PCSamplingConfigurationAttributeType; + +/** + * \brief PC sampling configuration information structure + * + * This structure provides \ref CUpti_PCSamplingConfigurationAttributeType which can be configured + * or queried for PC sampling configuration + */ +typedef struct +{ + /** + * Refer \ref CUpti_PCSamplingConfigurationAttributeType for all supported attribute types + */ + CUpti_PCSamplingConfigurationAttributeType attributeType; + /* + * Configure or query status for \p attributeType + * CUPTI_SUCCESS for valid \p attributeType and \p attributeData + * CUPTI_ERROR_INVALID_OPERATION if \p attributeData is not valid + * CUPTI_ERROR_INVALID_PARAMETER if \p attributeType is not valid + */ + CUptiResult attributeStatus; + union + { + /** + * Invalid Value + */ + struct + { + uint64_t data[3]; + } invalidData; + /** + * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SAMPLING_PERIOD + */ + struct + { + uint32_t samplingPeriod; + } samplingPeriodData; + /** + * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_STALL_REASON + */ + struct + { + size_t stallReasonCount; + uint32_t *pStallReasonIndex; + } stallReasonData; + /** + * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SCRATCH_BUFFER_SIZE + */ + struct + { + size_t scratchBufferSize; + } scratchBufferSizeData; + /** + * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_HARDWARE_BUFFER_SIZE + */ + struct + { + size_t hardwareBufferSize; + } hardwareBufferSizeData; + /** + * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_COLLECTION_MODE + */ + struct + { + CUpti_PCSamplingCollectionMode collectionMode; + } collectionModeData; + /** + * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL + */ + struct + { + uint32_t enableStartStopControl; + } enableStartStopControlData; + /** + * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_OUTPUT_DATA_FORMAT + */ + struct + { + CUpti_PCSamplingOutputDataFormat outputDataFormat; + } outputDataFormatData; + /** + * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SAMPLING_DATA_BUFFER + */ + struct + { + void *samplingDataBuffer; + } samplingDataBufferData; + /** + * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_WORKER_THREAD_PERIODIC_SLEEP_SPAN + */ + struct + { + uint32_t workerThreadPeriodicSleepSpan; + } workerThreadPeriodicSleepSpanData; + + } attributeData; +} CUpti_PCSamplingConfigurationInfo; + +/** + * \brief PC sampling configuration structure + * + * This structure configures PC sampling using \ref cuptiPCSamplingSetConfigurationAttribute + * and queries PC sampling default configuration using \ref cuptiPCSamplingGetConfigurationAttribute + */ +typedef struct +{ + /** + * [w] Size of the data structure i.e. CUpti_PCSamplingConfigurationInfoParamsSize + * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are + * available in the structure. Used to preserve backward compatibility. + */ + size_t size; + /** + * [w] Assign to NULL + */ + void* pPriv; + /** + * [w] CUcontext + */ + CUcontext ctx; + /** + * [w] Number of attributes to configure using \ref cuptiPCSamplingSetConfigurationAttribute or query + * using \ref cuptiPCSamplingGetConfigurationAttribute + */ + size_t numAttributes; + /** + * Refer \ref CUpti_PCSamplingConfigurationInfo + */ + CUpti_PCSamplingConfigurationInfo *pPCSamplingConfigurationInfo; +} CUpti_PCSamplingConfigurationInfoParams; +#define CUpti_PCSamplingConfigurationInfoParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingConfigurationInfoParams,pPCSamplingConfigurationInfo) + +/** + * \brief Write PC Sampling configuration attribute. + * + * \param pParams A pointer to \ref CUpti_PCSamplingConfigurationInfoParams + * containing PC sampling configuration. + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called with + * some invalid \p attrib. + * \retval CUPTI_ERROR_INVALID_PARAMETER if attribute \p value is not valid + * or any \p pParams is not valid + * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device + * does not support the API + */ +CUptiResult CUPTIAPI cuptiPCSamplingSetConfigurationAttribute(CUpti_PCSamplingConfigurationInfoParams *pParams); + +/** + * \brief Read PC Sampling configuration attribute. + * + * \param pParams A pointer to \ref CUpti_PCSamplingConfigurationInfoParams + * containing PC sampling configuration. + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called with + * some invalid attribute. + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p attrib is not valid + * or any \p pParams is not valid + * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT indicates that + * the \p value buffer is too small to hold the attribute value + * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device + * does not support the API + */ +CUptiResult CUPTIAPI cuptiPCSamplingGetConfigurationAttribute(CUpti_PCSamplingConfigurationInfoParams *pParams); + +/** + * \brief Params for cuptiPCSamplingEnable + */ +typedef struct +{ + /** + * [w] Size of the data structure i.e. CUpti_PCSamplingGetDataParamsSize + * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are + * available in the structure. Used to preserve backward compatibility. + */ + size_t size; + /** + * [w] Assign to NULL + */ + void* pPriv; + /** + * [w] CUcontext + */ + CUcontext ctx; + /** + * \param pcSamplingData Data buffer to hold collected PC Sampling data PARSED_DATA + * Buffer type is void * which can point to PARSED_DATA + * Refer \ref CUpti_PCSamplingData for buffer format for PARSED_DATA + */ + void *pcSamplingData; +} CUpti_PCSamplingGetDataParams; +#define CUpti_PCSamplingGetDataParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingGetDataParams, pcSamplingData) +/** + * \brief Flush GPU PC sampling data periodically. + * + * Flushing of GPU PC Sampling data is required at following point to maintain uniqueness of PCs: + * For \brief CUPTI_PC_SAMPLING_COLLECTION_MODE_CONTINUOUS, after every module load-unload-load + * For \brief CUPTI_PC_SAMPLING_COLLECTION_MODE_KERNEL_SERIALIZED, after every kernel ends + * If configuration option \brief CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL + * is enabled, then after every range end i.e. \brief cuptiPCSamplingStop() + * + * If application is profiled in \brief CUPTI_PC_SAMPLING_COLLECTION_MODE_CONTINUOUS, with disabled + * \brief CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL, and there is no module unload, + * user can collect data in two ways: + * Use \brief cuptiPCSamplingGetData() API periodically + * Use \brief cuptiPCSamplingDisable() on application exit and read GPU PC sampling data from sampling + * data buffer passed during configuration. + * Note: In case, \brief cuptiPCSamplingGetData() API is not called periodically, then sampling data buffer + * passed during configuration should be large enough to hold all PCs data. + * \brief cuptiPCSamplingGetData() API never does device synchronization. + * It is possible that when the API is called there is some unconsumed data from the HW buffer. In this case + * CUPTI provides only the data available with it at that moment. + * + * \param Refer \ref CUpti_PCSamplingGetDataParams + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called without + * enabling PC sampling. + * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid + * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device + * \retval CUPTI_ERROR_OUT_OF_MEMORY indicates that the HW buffer is full + * does not support the API + */ +CUptiResult CUPTIAPI cuptiPCSamplingGetData(CUpti_PCSamplingGetDataParams *pParams); + +/** + * \brief Params for cuptiPCSamplingEnable + */ +typedef struct +{ + /** + * [w] Size of the data structure i.e. CUpti_PCSamplingEnableParamsSize + * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are + * available in the structure. Used to preserve backward compatibility. + */ + size_t size; + /** + * [w] Assign to NULL + */ + void* pPriv; + /** + * [w] CUcontext + */ + CUcontext ctx; +} CUpti_PCSamplingEnableParams; +#define CUpti_PCSamplingEnableParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingEnableParams, ctx) + +/** + * \brief Enable PC sampling. + * + * \param Refer \ref CUpti_PCSamplingEnableParams + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid + * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device + * does not support the API + */ +CUptiResult CUPTIAPI cuptiPCSamplingEnable(CUpti_PCSamplingEnableParams *pParams); + +/** + * \brief Params for cuptiPCSamplingDisable + */ +typedef struct +{ + /** + * [w] Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize + * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are + * available in the structure. Used to preserve backward compatibility. + */ + size_t size; + /** + * [w] Assign to NULL + */ + void* pPriv; + /** + * [w] CUcontext + */ + CUcontext ctx; +} CUpti_PCSamplingDisableParams; +#define CUpti_PCSamplingDisableParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingDisableParams, ctx) + +/** + * \brief Disable PC sampling. + * + * For application which doesn't destroy the CUDA context explicitly, + * this API does the PC Sampling tear-down, joins threads and copies PC records in the buffer provided + * during the PC sampling configuration. PC records which can't be accommodated in the buffer are discarded. + * + * \param Refer \ref CUpti_PCSamplingDisableParams + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid + * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device + * does not support the API + */ +CUptiResult CUPTIAPI cuptiPCSamplingDisable(CUpti_PCSamplingDisableParams *pParams); + +/** + * \brief Params for cuptiPCSamplingStart + */ +typedef struct +{ + /** + * [w] Size of the data structure i.e. CUpti_PCSamplingStartParamsSize + * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are + * available in the structure. Used to preserve backward compatibility. + */ + size_t size; + /** + * [w] Assign to NULL + */ + void* pPriv; + /** + * [w] CUcontext + */ + CUcontext ctx; +} CUpti_PCSamplingStartParams; +#define CUpti_PCSamplingStartParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingStartParams, ctx) + +/** + * \brief Start PC sampling. + * + * User can collect PC Sampling data for user-defined range specified by Start/Stop APIs. + * This API can be used to mark starting of range. Set configuration option + * \brief CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL to use this API. + * + * \param Refer \ref CUpti_PCSamplingStartParams + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called with + * incorrect PC Sampling configuration. + * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid + * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device + * does not support the API + */ +CUptiResult CUPTIAPI cuptiPCSamplingStart(CUpti_PCSamplingStartParams *pParams); + +/** + * \brief Params for cuptiPCSamplingStop + */ +typedef struct +{ + /** + * [w] Size of the data structure i.e. CUpti_PCSamplingStopParamsSize + * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are + * available in the structure. Used to preserve backward compatibility. + */ + size_t size; + /** + * [w] Assign to NULL + */ + void* pPriv; + /** + * [w] CUcontext + */ + CUcontext ctx; +} CUpti_PCSamplingStopParams; +#define CUpti_PCSamplingStopParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingStopParams, ctx) + +/** + * \brief Stop PC sampling. + * + * User can collect PC Sampling data for user-defined range specified by Start/Stop APIs. + * This API can be used to mark end of range. Set configuration option + * \brief CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL to use this API. + * + * \param Refer \ref CUpti_PCSamplingStopParams + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called with + * incorrect PC Sampling configuration. + * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid + * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device + * does not support the API + */ +CUptiResult CUPTIAPI cuptiPCSamplingStop(CUpti_PCSamplingStopParams *pParams); + +/** + * \brief Params for cuptiPCSamplingGetNumStallReasons + */ +typedef struct +{ + /** + * [w] Size of the data structure i.e. CUpti_PCSamplingGetNumStallReasonsParamsSize + * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are + * available in the structure. Used to preserve backward compatibility. + */ + size_t size; + /** + * [w] Assign to NULL + */ + void* pPriv; + /** + * [w] CUcontext + */ + CUcontext ctx; + /** + * [r] Number of stall reasons + */ + size_t *numStallReasons; +} CUpti_PCSamplingGetNumStallReasonsParams; +#define CUpti_PCSamplingGetNumStallReasonsParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingGetNumStallReasonsParams, numStallReasons) + +/** + * \brief Get PC sampling stall reason count. + * + * \param Refer \ref CUpti_PCSamplingGetNumStallReasonsParams + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid + * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device + * does not support the API + */ +CUptiResult CUPTIAPI cuptiPCSamplingGetNumStallReasons(CUpti_PCSamplingGetNumStallReasonsParams *pParams); + +/** + * \brief Params for cuptiPCSamplingGetStallReasons + */ +typedef struct +{ + /** + * [w] Size of the data structure i.e. CUpti_PCSamplingGetStallReasonsParamsSize + * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are + * available in the structure. Used to preserve backward compatibility. + */ + size_t size; + /** + * [w] Assign to NULL + */ + void* pPriv; + /** + * [w] CUcontext + */ + CUcontext ctx; + /** + * [w] Number of stall reasons + */ + size_t numStallReasons; + /** + * [r] Stall reason index + */ + uint32_t *stallReasonIndex; + /** + * [r] Stall reasons name + */ + char **stallReasons; +} CUpti_PCSamplingGetStallReasonsParams; +#define CUpti_PCSamplingGetStallReasonsParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingGetStallReasonsParams, stallReasons) + +/** + * \brief Get PC sampling stall reasons. + * + * \param Refer \ref CUpti_PCSamplingGetStallReasonsParams + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid + * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device + * does not support the API + */ +CUptiResult CUPTIAPI cuptiPCSamplingGetStallReasons(CUpti_PCSamplingGetStallReasonsParams *pParams); + +/** + * \brief Params for cuptiGetSassToSourceCorrelation + */ +typedef struct { + /** + * [w] Size of the data structure i.e. CUpti_GetSassToSourceCorrelationParamsSize + * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are + * available in the structure. Used to preserve backward compatibility. + */ + size_t size; + /** + * [w] Pointer to cubin binary where function belongs. + */ + const void* cubin; + /** + * [w] Function name to which PC belongs. + */ + const char *functionName; + /** + * [w] Size of cubin binary. + */ + size_t cubinSize; + /** + * [r] Line number in the source code. + */ + uint32_t lineNumber; + /** + * [w] PC offset + */ + uint64_t pcOffset; + /** + * [r] Path for the source file. + */ + char *fileName; + /** + * [r] Path for the directory of source file. + */ + char *dirName; +} CUpti_GetSassToSourceCorrelationParams; +#define CUpti_GetSassToSourceCorrelationParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_GetSassToSourceCorrelationParams, dirName) + +/** + * \brief SASS to Source correlation. + * + * \param Refer \ref CUpti_GetSassToSourceCorrelationParams + * + * It is expected from user to free allocated memory for fileName and dirName after use. + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER if either of the parameters cubin or functionName + * is NULL or cubinSize is zero or size field is not set correctly. + * \retval CUPTI_ERROR_INVALID_MODULE provided cubin is invalid. + * \retval CUPTI_ERROR_UNKNOWN an internal error occurred. + * This error code is also used for cases when the function is not present in the module. + * A better error code will be returned in the future release. + */ +CUptiResult CUPTIAPI cuptiGetSassToSourceCorrelation(CUpti_GetSassToSourceCorrelationParams *pParams); + +/** + * \brief Params for cuptiGetCubinCrc + */ +typedef struct { + /** + * [w] Size of configuration structure. + * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are + * available in the structure. Used to preserve backward compatibility. + */ + size_t size; + /** + * [w] Size of cubin binary. + */ + size_t cubinSize; + /** + * [w] Pointer to cubin binary + */ + const void* cubin; + /** + * [r] Computed CRC will be stored in it. + */ + uint64_t cubinCrc; +} CUpti_GetCubinCrcParams; +#define CUpti_GetCubinCrcParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_GetCubinCrcParams, cubinCrc) + +/** + * \brief Get the CRC of cubin. + * + * This function returns the CRC of provided cubin binary. + * + * \param Refer \ref CUpti_GetCubinCrcParams + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER if parameter cubin is NULL or + * provided cubinSize is zero or size field is not set. + */ +CUptiResult CUPTIAPI cuptiGetCubinCrc(CUpti_GetCubinCrcParams *pParams); + +/** + * \brief Function type for callback used by CUPTI to request crc of + * loaded module. + * + * This callback function ask for crc of provided module in function. + * The provided crc will be stored in PC sampling records i.e. in the field 'cubinCrc' of the PC sampling + * struct CUpti_PCSamplingPCData. The CRC is uses during the offline source correlation to uniquely identify the module. + * + * \param cubin The pointer to cubin binary + * \param cubinSize The size of cubin binary. + * \param cubinCrc Returns the computed crc of cubin. + */ +typedef void (CUPTIAPI *CUpti_ComputeCrcCallbackFunc)( + const void* cubin, + size_t cubinSize, + uint64_t *cubinCrc); + +/** + * \brief Register callback function with CUPTI to use + * your own algorithm to compute cubin crc. + * + * This function registers a callback function and it gets called + * from CUPTI when a CUDA module is loaded. + * + * \param funcComputeCubinCrc callback is invoked when a CUDA module + * is loaded. + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p funcComputeCubinCrc is NULL. + */ +CUptiResult CUPTIAPI cuptiRegisterComputeCrcCallback(CUpti_ComputeCrcCallbackFunc funcComputeCubinCrc); + +/** @} */ /* END CUPTI_PCSAMPLING_API */ + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility pop +#endif + +#if defined(__cplusplus) +} +#endif + +#endif /*_CUPTI_PCSAMPLING_H_*/ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_pcsampling_util.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_pcsampling_util.h new file mode 100644 index 0000000000000000000000000000000000000000..9cb1ac2132b3d53bd67f39f1e4ebd85d3ea61465 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_pcsampling_util.h @@ -0,0 +1,419 @@ +#if !defined(_CUPTI_PCSAMPLING_UTIL_H_) +#define _CUPTI_PCSAMPLING_UTIL_H_ + +#include +#include + +#ifndef CUPTIUTILAPI +#ifdef _WIN32 +#define CUPTIUTILAPI __stdcall +#else +#define CUPTIUTILAPI +#endif +#endif + +#define ACTIVITY_RECORD_ALIGNMENT 8 +#if defined(_WIN32) // Windows 32- and 64-bit +#define START_PACKED_ALIGNMENT __pragma(pack(push,1)) // exact fit - no padding +#define PACKED_ALIGNMENT __declspec(align(ACTIVITY_RECORD_ALIGNMENT)) +#define END_PACKED_ALIGNMENT __pragma(pack(pop)) +#elif defined(__GNUC__) // GCC +#define START_PACKED_ALIGNMENT +#define PACKED_ALIGNMENT __attribute__ ((__packed__)) __attribute__ ((aligned (ACTIVITY_RECORD_ALIGNMENT))) +#define END_PACKED_ALIGNMENT +#else // all other compilers +#define START_PACKED_ALIGNMENT +#define PACKED_ALIGNMENT +#define END_PACKED_ALIGNMENT +#endif + +#ifndef CUPTI_UTIL_STRUCT_SIZE +#define CUPTI_UTIL_STRUCT_SIZE(type_, lastfield_) (offsetof(type_, lastfield_) + sizeof(((type_*)0)->lastfield_)) +#endif + +#ifndef CHECK_PC_SAMPLING_STRUCT_FIELD_EXISTS +#define CHECK_PC_SAMPLING_STRUCT_FIELD_EXISTS(type, member, structSize) \ + (offsetof(type, member) < structSize) +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(__GNUC__) + #pragma GCC visibility push(default) +#endif + +namespace CUPTI { namespace PcSamplingUtil { + +/** + * \defgroup CUPTI_PCSAMPLING_UTILITY CUPTI PC Sampling Utility API + * Functions, types, and enums that implement the CUPTI PC Sampling Utility API. + * @{ + */ + +/** + * \brief Header info will be stored in file. + */ +typedef struct PACKED_ALIGNMENT { + /** + * Version of file format. + */ + uint32_t version; + /** + * Total number of buffers present in the file. + */ + uint32_t totalBuffers; +} Header; + +/** + * \brief BufferInfo will be stored in the file for every buffer + * i.e for every call of UtilDumpPcSamplingBufferInFile() API. + */ +typedef struct PACKED_ALIGNMENT { + /** + * Total number of PC records. + */ + uint64_t recordCount; + /** + * Count of all stall reasons supported on the GPU + */ + size_t numStallReasons; + /** + * Total number of stall reasons in single record. + */ + uint64_t numSelectedStallReasons; + /** + * Buffer size in Bytes. + */ + uint64_t bufferByteSize; +} BufferInfo; + +/** + * \brief All available stall reasons name and respective indexes + * will be stored in it. + */ +typedef struct PACKED_ALIGNMENT { + /** + * Number of all available stall reasons + */ + size_t numStallReasons; + /** + * Stall reasons names of all available stall reasons + */ + char **stallReasons; + /** + * Stall reason index of all available stall reasons + */ + uint32_t *stallReasonIndex; +} PcSamplingStallReasons; + +typedef enum { + /** + * Invalid buffer type. + */ + PC_SAMPLING_BUFFER_INVALID = 0, + /** + * Refers to CUpti_PCSamplingData buffer. + */ + PC_SAMPLING_BUFFER_PC_TO_COUNTER_DATA = 1 +} PcSamplingBufferType; + +/** + * \brief CUPTI PC sampling utility API result codes. + * + * Error and result codes returned by CUPTI PC sampling utility API. + */ +typedef enum { + /** + * No error + */ + CUPTI_UTIL_SUCCESS = 0, + /** + * One or more of the parameters are invalid. + */ + CUPTI_UTIL_ERROR_INVALID_PARAMETER = 1, + /** + * Unable to create a new file + */ + CUPTI_UTIL_ERROR_UNABLE_TO_CREATE_FILE = 2, + /** + * Unable to open a file + */ + CUPTI_UTIL_ERROR_UNABLE_TO_OPEN_FILE = 3, + /** + * Read or write operation failed + */ + CUPTI_UTIL_ERROR_READ_WRITE_OPERATION_FAILED = 4, + /** + * Provided file handle is corrupted. + */ + CUPTI_UTIL_ERROR_FILE_HANDLE_CORRUPTED = 5, + /** + * seek operation failed. + */ + CUPTI_UTIL_ERROR_SEEK_OPERATION_FAILED = 6, + /** + * Unable to allocate enough memory to perform the requested + * operation. + */ + CUPTI_UTIL_ERROR_OUT_OF_MEMORY = 7, + /** + * An unknown internal error has occurred. + */ + CUPTI_UTIL_ERROR_UNKNOWN = 999, + CUPTI_UTIL_ERROR_FORCE_INT = 0x7fffffff +} CUptiUtilResult; + +/** + * \brief Params for \ref CuptiUtilPutPcSampData + */ +typedef struct { + /** + * Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize + * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are + * available in the structure. Used to preserve backward compatibility. + */ + size_t size; + /** + * Type of buffer to store in file + */ + PcSamplingBufferType bufferType; + /** + * PC sampling buffer. + */ + void *pSamplingData; + /** + * Number of configured attributes + */ + size_t numAttributes; + /** + * Refer \ref CUpti_PCSamplingConfigurationInfo + * It is expected to provide configuration details of at least + * CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_STALL_REASON attribute. + */ + CUpti_PCSamplingConfigurationInfo *pPCSamplingConfigurationInfo; + /** + * Refer \ref PcSamplingStallReasons. + */ + PcSamplingStallReasons *pPcSamplingStallReasons; + /** + * File name to store buffer into it. + */ + const char* fileName; +} CUptiUtil_PutPcSampDataParams; +#define CUptiUtil_PutPcSampDataParamsSize CUPTI_UTIL_STRUCT_SIZE(CUptiUtil_PutPcSampDataParams, fileName) + +/** + * \brief Dump PC sampling data into the file. + * + * This API can be called multiple times. + * It will append buffer in the file. + * For every buffer it will store BufferInfo + * so that before retrieving data it will help to allocate buffer + * to store retrieved data. + * This API creates file if file does not present. + * If stallReasonIndex or stallReasons pointer of \ref CUptiUtil_PutPcSampDataParams is NULL + * then stall reasons data will not be stored in file. + * It is expected to store all available stall reason data at least once to refer it during + * offline correlation. + * + * \retval CUPTI_UTIL_SUCCESS + * \retval CUPTI_UTIL_ERROR_INVALID_PARAMETER error out if buffer type is invalid + * or if either of pSamplingData, pParams pointer is NULL or stall reason configuration details not provided + * or filename is empty. + * \retval CUPTI_UTIL_ERROR_UNABLE_TO_CREATE_FILE + * \retval CUPTI_UTIL_ERROR_UNABLE_TO_OPEN_FILE + * \retval CUPTI_UTIL_ERROR_READ_WRITE_OPERATION_FAILED + */ +CUptiUtilResult CUPTIUTILAPI CuptiUtilPutPcSampData(CUptiUtil_PutPcSampDataParams *pParams); + +/** + * \brief Params for \ref CuptiUtilGetHeaderData + */ +typedef struct { + /** + * Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize + * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are + * available in the structure. Used to preserve backward compatibility. + */ + size_t size; + /** + * File handle. + */ + std::ifstream *fileHandler; + /** + * Header Info. + */ + Header headerInfo; + +} CUptiUtil_GetHeaderDataParams; +#define CUptiUtil_GetHeaderDataParamsSize CUPTI_UTIL_STRUCT_SIZE(CUptiUtil_GetHeaderDataParams, headerInfo) + +/** + * \brief Get header data of file. + * + * This API must be called once initially while retrieving data from file. + * \ref Header structure, it gives info about total number + * of buffers present in the file. + * + * \retval CUPTI_UTIL_SUCCESS + * \retval CUPTI_UTIL_ERROR_INVALID_PARAMETER error out if either of pParam or fileHandle is NULL or param struct size is incorrect. + * \retval CUPTI_UTIL_ERROR_FILE_HANDLE_CORRUPTED file handle is not in good state to read data from file + * \retval CUPTI_UTIL_ERROR_READ_WRITE_OPERATION_FAILED failed to read data from file. + */ +CUptiUtilResult CUPTIUTILAPI CuptiUtilGetHeaderData(CUptiUtil_GetHeaderDataParams *pParams); + +/** + * \brief Params for \ref CuptiUtilGetBufferInfo + */ +typedef struct { + /** + * Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize + * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are + * available in the structure. Used to preserve backward compatibility. + */ + size_t size; + /** + * File handle. + */ + std::ifstream *fileHandler; + /** + * Buffer Info. + */ + BufferInfo bufferInfoData; +} CUptiUtil_GetBufferInfoParams; +#define CUptiUtil_GetBufferInfoParamsSize CUPTI_UTIL_STRUCT_SIZE(CUptiUtil_GetBufferInfoParams, bufferInfoData) + +/** + * \brief Get buffer info data of file. + * + * This API must be called every time before calling CuptiUtilGetPcSampData API. + * \ref BufferInfo structure, it gives info about recordCount and stallReasonCount + * of every record in the buffer. This will help to allocate exact buffer to retrieve data into it. + * + * \retval CUPTI_UTIL_SUCCESS + * \retval CUPTI_UTIL_ERROR_INVALID_PARAMETER error out if either of pParam or fileHandle is NULL or param struct size is incorrect. + * \retval CUPTI_UTIL_ERROR_FILE_HANDLE_CORRUPTED file handle is not in good state to read data from file. + * \retval CUPTI_UTIL_ERROR_READ_WRITE_OPERATION_FAILED failed to read data from file. + */ +CUptiUtilResult CUPTIUTILAPI CuptiUtilGetBufferInfo(CUptiUtil_GetBufferInfoParams *pParams); + +/** + * \brief Params for \ref CuptiUtilGetPcSampData + */ +typedef struct { + /** + * Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize + * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are + * available in the structure. Used to preserve backward compatibility. + */ + size_t size; + /** + * File handle. + */ + std::ifstream *fileHandler; + /** + * Type of buffer to store in file + */ + PcSamplingBufferType bufferType; + /** + * Pointer to collected buffer info using \ref CuptiUtilGetBufferInfo + */ + BufferInfo *pBufferInfoData; + /** + * Pointer to allocated memory to store retrieved data from file. + */ + void *pSamplingData; + /** + * Number of configuration attributes + */ + size_t numAttributes; + /** + * Refer \ref CUpti_PCSamplingConfigurationInfo + */ + CUpti_PCSamplingConfigurationInfo *pPCSamplingConfigurationInfo; + /** + * Refer \ref PcSamplingStallReasons. + * For stallReasons field of \ref PcSamplingStallReasons it is expected to + * allocate memory for each string element of array. + */ + PcSamplingStallReasons *pPcSamplingStallReasons; +} CUptiUtil_GetPcSampDataParams; +#define CUptiUtil_GetPcSampDataParamsSize CUPTI_UTIL_STRUCT_SIZE(CUptiUtil_GetPcSampDataParams, pPcSamplingStallReasons) + +/** + * \brief Retrieve PC sampling data from file into allocated buffer. + * + * This API must be called after CuptiUtilGetBufferInfo API. + * It will retrieve data from file into allocated buffer. + * + * \retval CUPTI_UTIL_SUCCESS + * \retval CUPTI_UTIL_ERROR_INVALID_PARAMETER error out if buffer type is invalid + * or if either of pSampData, pParams is NULL. If pPcSamplingStallReasons is not NULL then + * error out if either of stallReasonIndex, stallReasons or stallReasons array element pointer is NULL. + * or filename is empty. + * \retval CUPTI_UTIL_ERROR_READ_WRITE_OPERATION_FAILED + * \retval CUPTI_UTIL_ERROR_FILE_HANDLE_CORRUPTED file handle is not in good state to read data from file. + */ +CUptiUtilResult CUPTIUTILAPI CuptiUtilGetPcSampData(CUptiUtil_GetPcSampDataParams *pParams); + +/** + * \brief Params for \ref CuptiUtilMergePcSampData + */ +typedef struct +{ + /** + * Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize + * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are + * available in the structure. Used to preserve backward compatibility. + */ + size_t size; + /** + * Number of buffers to merge. + */ + size_t numberOfBuffers; + /** + * Pointer to array of buffers to merge + */ + CUpti_PCSamplingData *PcSampDataBuffer; + /** + * Pointer to array of merged buffers as per the range id. + */ + CUpti_PCSamplingData **MergedPcSampDataBuffers; + /** + * Number of merged buffers. + */ + size_t *numMergedBuffer; +} CUptiUtil_MergePcSampDataParams; +#define CUptiUtil_MergePcSampDataParamsSize CUPTI_UTIL_STRUCT_SIZE(CUptiUtil_MergePcSampDataParams, numMergedBuffer) + +/** + * \brief Merge PC sampling data range id wise. + * + * This API merge PC sampling data range id wise. + * It allocates memory for merged data and fill data in it + * and provide buffer pointer in MergedPcSampDataBuffers field. + * It is expected from user to free merge data buffers after use. + * + * \retval CUPTI_UTIL_SUCCESS + * \retval CUPTI_UTIL_ERROR_INVALID_PARAMETER error out if param struct size is invalid + * or count of buffers to merge is invalid i.e less than 1 + * or either of PcSampDataBuffer, MergedPcSampDataBuffers, numMergedBuffer is NULL + * \retval CUPTI_UTIL_ERROR_OUT_OF_MEMORY Unable to allocate memory for merged buffer. + */ +CUptiUtilResult CUPTIUTILAPI CuptiUtilMergePcSampData(CUptiUtil_MergePcSampDataParams *pParams); + +/** @} */ /* END CUPTI_PCSAMPLING_UTILITY */ + +} } + +#if defined(__GNUC__) + #pragma GCC visibility pop +#endif + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_profiler_target.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_profiler_target.h new file mode 100644 index 0000000000000000000000000000000000000000..0682253af7aa5129250359feb7e29fee0ba3c414 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_profiler_target.h @@ -0,0 +1,589 @@ +/* + * Copyright 2011-2020 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(_CUPTI_PROFILER_TARGET_H_) +#define _CUPTI_PROFILER_TARGET_H_ + +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility push(default) +#endif + +/** + * \defgroup CUPTI_PROFILER_API CUPTI Profiling API + * Functions, types, and enums that implement the CUPTI Profiling API. + * @{ + */ +#ifndef CUPTI_PROFILER_STRUCT_SIZE +#define CUPTI_PROFILER_STRUCT_SIZE(type_, lastfield_) (offsetof(type_, lastfield_) + sizeof(((type_*)0)->lastfield_)) +#endif + +/** + * \brief Profiler range attribute + * + * A metric enabled in the session's configuration is collected separately per unique range-stack in the pass. + * This is an attribute to collect metrics around each kernel in a profiling session or in an user defined range. + */ +typedef enum +{ + /** + * Invalid value + */ + CUPTI_Range_INVALID, + /** + * Ranges are auto defined around each kernel in a profiling session + */ + CUPTI_AutoRange, + /** + * A range in which metric data to be collected is defined by the user + */ + CUPTI_UserRange, + /** + * Range count + */ + CUPTI_Range_COUNT, +} CUpti_ProfilerRange; + +/** + * \brief Profiler replay attribute + * + * For metrics which require multipass collection, a replay of the GPU kernel(s) is required. + * This is an attribute which specify how the replay of the kernel(s) to be measured is done. + */ +typedef enum +{ + /** + * Invalid Value + */ + CUPTI_Replay_INVALID, + /** + * Replay is done by CUPTI user around the process + */ + CUPTI_ApplicationReplay, + /** + * Replay is done around kernel implicitly by CUPTI + */ + CUPTI_KernelReplay, + /** + * Replay is done by CUPTI user within a process + */ + CUPTI_UserReplay, + /** + * Replay count + */ + CUPTI_Replay_COUNT, +} CUpti_ProfilerReplayMode; + +/** + * \brief Default parameter for cuptiProfilerInitialize + */ +typedef struct CUpti_Profiler_Initialize_Params +{ + size_t structSize; //!< [in] CUpti_Profiler_Initialize_Params_STRUCT_SIZE + void* pPriv; //!< [in] assign to NULL + +} CUpti_Profiler_Initialize_Params; +#define CUpti_Profiler_Initialize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_Initialize_Params, pPriv) + +/** + * \brief Default parameter for cuptiProfilerDeInitialize + */ +typedef struct CUpti_Profiler_DeInitialize_Params +{ + size_t structSize; //!< [in] CUpti_Profiler_DeInitialize_Params_STRUCT_SIZE + void* pPriv; //!< [in] assign to NULL + +} CUpti_Profiler_DeInitialize_Params; +#define CUpti_Profiler_DeInitialize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_DeInitialize_Params, pPriv) + +/** + * \brief Initializes the profiler interface + * + * Loads the required libraries in the process address space. + * Sets up the hooks with the CUDA driver. + */ +CUptiResult CUPTIAPI cuptiProfilerInitialize(CUpti_Profiler_Initialize_Params *pParams); + +/** + * \brief DeInitializes the profiler interface + */ +CUptiResult CUPTIAPI cuptiProfilerDeInitialize(CUpti_Profiler_DeInitialize_Params *pParams); + +/** + * \brief Input parameter to define the counterDataImage + */ +typedef struct CUpti_Profiler_CounterDataImageOptions +{ + size_t structSize; //!< [in] CUpti_Profiler_CounterDataImageOptions_Params_STRUCT_SIZE + void* pPriv; //!< [in] assign to NULL + + const uint8_t* pCounterDataPrefix; /**< [in] Address of CounterDataPrefix generated from NVPW_CounterDataBuilder_GetCounterDataPrefix(). + Must be align(8).*/ + size_t counterDataPrefixSize; //!< [in] Size of CounterDataPrefix generated from NVPW_CounterDataBuilder_GetCounterDataPrefix(). + uint32_t maxNumRanges; //!< [in] Maximum number of ranges that can be profiled + uint32_t maxNumRangeTreeNodes; //!< [in] Maximum number of RangeTree nodes; must be >= maxNumRanges + uint32_t maxRangeNameLength; //!< [in] Maximum string length of each RangeName, including the trailing NULL character +} CUpti_Profiler_CounterDataImageOptions; +#define CUpti_Profiler_CounterDataImageOptions_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_CounterDataImageOptions, maxRangeNameLength) + +/** + * \brief Params for cuptiProfilerCounterDataImageCalculateSize + */ +typedef struct CUpti_Profiler_CounterDataImage_CalculateSize_Params +{ + size_t structSize; //!< [in] CUpti_Profiler_CounterDataImage_CalculateSize_Params_STRUCT_SIZE + void* pPriv; //!< [in] assign to NULL + + size_t sizeofCounterDataImageOptions; //!< [in] CUpti_Profiler_CounterDataImageOptions_STRUCT_SIZE + const CUpti_Profiler_CounterDataImageOptions* pOptions; //!< [in] Pointer to Counter Data Image Options + size_t counterDataImageSize; //!< [out] +} CUpti_Profiler_CounterDataImage_CalculateSize_Params; +#define CUpti_Profiler_CounterDataImage_CalculateSize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_CounterDataImage_CalculateSize_Params, counterDataImageSize) + +/** + * \brief Params for cuptiProfilerCounterDataImageInitialize + */ +typedef struct CUpti_Profiler_CounterDataImage_Initialize_Params +{ + size_t structSize; //!< [in] CUpti_Profiler_CounterDataImage_Initialize_Params_STRUCT_SIZE + void* pPriv; //!< [in] assign to NULL + + size_t sizeofCounterDataImageOptions; //!< [in] CUpti_Profiler_CounterDataImageOptions_STRUCT_SIZE + const CUpti_Profiler_CounterDataImageOptions* pOptions; //!< [in] Pointer to Counter Data Image Options + size_t counterDataImageSize; //!< [in] Size calculated from cuptiProfilerCounterDataImageCalculateSize + uint8_t* pCounterDataImage; //!< [in] The buffer to be initialized. +} CUpti_Profiler_CounterDataImage_Initialize_Params; +#define CUpti_Profiler_CounterDataImage_Initialize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_CounterDataImage_Initialize_Params, pCounterDataImage) + +/** + * \brief A CounterData image allocates space for values for each counter for each range. + * + * User borne the resposibility of managing the counterDataImage allocations. + * CounterDataPrefix contains meta data about the metrics that will be stored in counterDataImage. + * Use these APIs to calculate the allocation size and initialize counterData image. + */ +CUptiResult cuptiProfilerCounterDataImageCalculateSize(CUpti_Profiler_CounterDataImage_CalculateSize_Params* pParams); +CUptiResult cuptiProfilerCounterDataImageInitialize(CUpti_Profiler_CounterDataImage_Initialize_Params* pParams); + +/** + * \brief Params for cuptiProfilerCounterDataImageCalculateScratchBufferSize + */ +typedef struct CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params +{ + size_t structSize; //!< [in] CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params_STRUCT_SIZE + void* pPriv; //!< [in] assign to NULL + + size_t counterDataImageSize; //!< [in] size calculated from cuptiProfilerCounterDataImageCalculateSize + uint8_t* pCounterDataImage; //!< [in] + size_t counterDataScratchBufferSize; //!< [out] +} CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params; +#define CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params, counterDataScratchBufferSize) + +/** + * \brief Params for cuptiProfilerCounterDataImageInitializeScratchBuffer + */ +typedef struct CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params +{ + size_t structSize; //!< [in] CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params_STRUCT_SIZE + void* pPriv; //!< [in] assign to NULL + + size_t counterDataImageSize; //!< [in] size calculated from cuptiProfilerCounterDataImageCalculateSize + uint8_t* pCounterDataImage; //!< [in] + size_t counterDataScratchBufferSize; //!< [in] size calculated using cuptiProfilerCounterDataImageCalculateScratchBufferSize + uint8_t* pCounterDataScratchBuffer; //!< [in] the scratch buffer to be initialized. +} CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params; +#define CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params, pCounterDataScratchBuffer) + +/** + * \brief A temporary storage for CounterData image needed for internal operations + * + * Use these APIs to calculate the allocation size and initialize counterData image scratch buffer. + */ +CUptiResult cuptiProfilerCounterDataImageCalculateScratchBufferSize(CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params* pParams); +CUptiResult cuptiProfilerCounterDataImageInitializeScratchBuffer(CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params* pParams); + +/** + * \brief Params for cuptiProfilerBeginSession + */ +typedef struct CUpti_Profiler_BeginSession_Params +{ + size_t structSize; //!< [in] CUpti_Profiler_BeginSession_Params_STRUCT_SIZE + void* pPriv; //!< [in] assign to NULL + + CUcontext ctx; //!< [in] if NULL, the current CUcontext is used + size_t counterDataImageSize; //!< [in] size calculated from cuptiProfilerCounterDataImageCalculateSize + uint8_t* pCounterDataImage; //!< [in] address of CounterDataImage + size_t counterDataScratchBufferSize; //!< [in] size calculated from cuptiProfilerCounterDataImageInitializeScratchBuffer + uint8_t* pCounterDataScratchBuffer; //!< [in] address of CounterDataImage scratch buffer + uint8_t bDumpCounterDataInFile; //!< [in] [optional] + const char* pCounterDataFilePath; //!< [in] [optional] + CUpti_ProfilerRange range; //!< [in] CUpti_ProfilerRange + CUpti_ProfilerReplayMode replayMode; //!< [in] CUpti_ProfilerReplayMode + /* Replay options, required when replay is done by cupti user */ + size_t maxRangesPerPass; //!< [in] Maximum number of ranges that can be recorded in a single pass. + size_t maxLaunchesPerPass; //!< [in] Maximum number of kernel launches that can be recorded in a single pass; must be >= maxRangesPerPass. + +} CUpti_Profiler_BeginSession_Params; +#define CUpti_Profiler_BeginSession_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_BeginSession_Params, maxLaunchesPerPass) +/** + * \brief Params for cuptiProfilerEndSession + */ +typedef struct CUpti_Profiler_EndSession_Params +{ + size_t structSize; //!< [in] CUpti_Profiler_EndSession_Params_STRUCT_SIZE + void* pPriv; //!< [in] assign to NULL + + CUcontext ctx; //!< [in] if NULL, the current CUcontext is used +} CUpti_Profiler_EndSession_Params; +#define CUpti_Profiler_EndSession_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_EndSession_Params, ctx) + +/** + * \brief Begin profiling session sets up the profiling on the device + * + * Although, it doesn't start the profiling but GPU resources needed for profiling are allocated. + * Outside of a session, the GPU will return to its normal operating state. + */ +CUptiResult CUPTIAPI cuptiProfilerBeginSession(CUpti_Profiler_BeginSession_Params* pParams); +/** + * \brief Ends profiling session + * + * Frees up the GPU resources acquired for profiling. + * Outside of a session, the GPU will return to it's normal operating state. + */ +CUptiResult CUPTIAPI cuptiProfilerEndSession(CUpti_Profiler_EndSession_Params* pParams); + +/** + * \brief Params for cuptiProfilerSetConfig + */ +typedef struct CUpti_Profiler_SetConfig_Params +{ + size_t structSize; //!< [in] CUpti_Profiler_SetConfig_Params_STRUCT_SIZE + void* pPriv; //!< [in] assign to NULL + + CUcontext ctx; //!< [in] if NULL, the current CUcontext is used + const uint8_t* pConfig; //!< [in] Config created by NVPW_RawMetricsConfig_GetConfigImage(). Must be align(8). + size_t configSize; //!< [in] size of config + uint16_t minNestingLevel; //!< [in] the lowest nesting level to be profiled; must be >= 1 + uint16_t numNestingLevels; //!< [in] the number of nesting levels to profile; must be >= 1 + size_t passIndex; //!< [in] Set this to zero for in-app replay; set this to the output of EndPass() for application replay + uint16_t targetNestingLevel; //!< [in] Set this to minNestingLevel for in-app replay; set this to the output of EndPass() for application +} CUpti_Profiler_SetConfig_Params; + +#define CUpti_Profiler_SetConfig_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_SetConfig_Params, targetNestingLevel) + +/** + * \brief Params for cuptiProfilerUnsetConfig + */ +typedef struct CUpti_Profiler_UnsetConfig_Params +{ + size_t structSize; //!< [in] CUpti_Profiler_UnsetConfig_Params_STRUCT_SIZE + void* pPriv; //!< [in] assign to NULL + + CUcontext ctx; //!< [in] if NULL, the current CUcontext is used +} CUpti_Profiler_UnsetConfig_Params; +#define CUpti_Profiler_UnsetConfig_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_UnsetConfig_Params, ctx) + +/** + * \brief Set metrics configuration to be profiled + * + * Use these APIs to set the config to profile in a session. It can be used for advanced cases such as where multiple + * configurations are collected into a single CounterData Image on the need basis, without restarting the session. + */ +CUptiResult CUPTIAPI cuptiProfilerSetConfig(CUpti_Profiler_SetConfig_Params* pParams); +/** + * \brief Unset metrics configuration profiled + * + */ +CUptiResult CUPTIAPI cuptiProfilerUnsetConfig(CUpti_Profiler_UnsetConfig_Params* pParams); + +/** + * \brief Params for cuptiProfilerBeginPass + */ +typedef struct CUpti_Profiler_BeginPass_Params +{ + size_t structSize; //!< [in] CUpti_Profiler_BeginPass_Params_STRUCT_SIZE + void* pPriv; //!< [in] assign to NULL + + CUcontext ctx; //!< [in] if NULL, the current CUcontext is used +} CUpti_Profiler_BeginPass_Params; +#define CUpti_Profiler_BeginPass_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_BeginPass_Params, ctx) + +/** + * \brief Params for cuptiProfilerEndPass + */ +typedef struct CUpti_Profiler_EndPass_Params +{ + size_t structSize; //!< [in] CUpti_Profiler_EndPass_Params_STRUCT_SIZE + void* pPriv; //!< [in] assign to NULL + + CUcontext ctx; //!< [in] if NULL, the current CUcontext is used + uint16_t targetNestingLevel; //! [out] The targetNestingLevel that will be collected by the *next* BeginPass. + size_t passIndex; //!< [out] The passIndex that will be collected by the *next* BeginPass + uint8_t allPassesSubmitted; //!< [out] becomes true when the last pass has been queued to the GPU +} CUpti_Profiler_EndPass_Params; +#define CUpti_Profiler_EndPass_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_EndPass_Params, allPassesSubmitted) + +/** + * \brief Replay API: used for multipass collection. + + * These APIs are used if user chooses to replay by itself \ref CUPTI_UserReplay or \ref CUPTI_ApplicationReplay + * for multipass collection of the metrics configurations. + * It's a no-op in case of \ref CUPTI_KernelReplay. + */ +CUptiResult cuptiProfilerBeginPass(CUpti_Profiler_BeginPass_Params* pParams); + +/** + * \brief Replay API: used for multipass collection. + + * These APIs are used if user chooses to replay by itself \ref CUPTI_UserReplay or \ref CUPTI_ApplicationReplay + * for multipass collection of the metrics configurations. + * Its a no-op in case of \ref CUPTI_KernelReplay. + * Returns information for next pass. + */ +CUptiResult cuptiProfilerEndPass(CUpti_Profiler_EndPass_Params* pParams); + +/** + * \brief Params for cuptiProfilerEnableProfiling + */ +typedef struct CUpti_Profiler_EnableProfiling_Params +{ + size_t structSize; //!< [in] CUpti_Profiler_EnableProfiling_Params_STRUCT_SIZE + void* pPriv; //!< [in] assign to NULL + + CUcontext ctx; //!< [in] if NULL, the current CUcontext is used +} CUpti_Profiler_EnableProfiling_Params; +#define CUpti_Profiler_EnableProfiling_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_EnableProfiling_Params, ctx) + +/** + * \brief Params for cuptiProfilerDisableProfiling + */ +typedef struct CUpti_Profiler_DisableProfiling_Params +{ + size_t structSize; //!< [in] CUpti_Profiler_DisableProfiling_Params_STRUCT_SIZE + void* pPriv; //!< [in] assign to NULL + + CUcontext ctx; //!< [in] if NULL, the current CUcontext is used +} CUpti_Profiler_DisableProfiling_Params; +#define CUpti_Profiler_DisableProfiling_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_DisableProfiling_Params, ctx) + +/** + * \brief Enables Profiling + * + * In \ref CUPTI_AutoRange, these APIs are used to enable/disable profiling for the kernels to be executed in + * a profiling session. + */ +CUptiResult CUPTIAPI cuptiProfilerEnableProfiling(CUpti_Profiler_EnableProfiling_Params* pParams); + +/** + * \brief Disable Profiling + * + * In \ref CUPTI_AutoRange, these APIs are used to enable/disable profiling for the kernels to be executed in + * a profiling session. + */ +CUptiResult CUPTIAPI cuptiProfilerDisableProfiling(CUpti_Profiler_DisableProfiling_Params* pParams); + +/** + * \brief Params for cuptiProfilerIsPassCollected + */ +typedef struct CUpti_Profiler_IsPassCollected_Params +{ + size_t structSize; //!< [in] CUpti_Profiler_IsPassCollected_Params_STRUCT_SIZE + void* pPriv; //!< [in] assign to NULL + + CUcontext ctx; //!< [in] if NULL, the current CUcontext is used + size_t numRangesDropped; //!< [out] number of ranges whose data was dropped in the processed pass + size_t numTraceBytesDropped; //!< [out] number of bytes not written to TraceBuffer due to buffer full + uint8_t onePassCollected; //!< [out] true if a pass was successfully decoded + uint8_t allPassesCollected; //!< [out] becomes true when the last pass has been decoded +} CUpti_Profiler_IsPassCollected_Params; +#define CUpti_Profiler_IsPassCollected_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_IsPassCollected_Params, allPassesCollected) + +/** + * \brief Asynchronous call to query if the submitted pass to GPU is collected + * + */ +CUptiResult CUPTIAPI cuptiProfilerIsPassCollected(CUpti_Profiler_IsPassCollected_Params* pParams); + +/** + * \brief Params for cuptiProfilerFlushCounterData + */ +typedef struct CUpti_Profiler_FlushCounterData_Params +{ + size_t structSize; //!< [in] CUpti_Profiler_FlushCounterData_Params_STRUCT_SIZE + void* pPriv; //!< [in] assign to NULL + + CUcontext ctx; //!< [in] if NULL, the current CUcontext is used + size_t numRangesDropped; //!< [out] number of ranges whose data was dropped in the processed passes + size_t numTraceBytesDropped; //!< [out] number of bytes not written to TraceBuffer due to buffer full +} CUpti_Profiler_FlushCounterData_Params; +#define CUpti_Profiler_FlushCounterData_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_FlushCounterData_Params, numTraceBytesDropped) + +/** + * \brief Decode all the submitted passes + * + * Flush Counter data API to ensure every pass is decoded into the counterDataImage passed at beginSession. + * This will cause the CPU/GPU sync to collect all the undecoded pass. + */ +CUptiResult CUPTIAPI cuptiProfilerFlushCounterData(CUpti_Profiler_FlushCounterData_Params* pParams); + +typedef struct CUpti_Profiler_PushRange_Params +{ + size_t structSize; //!< [in] CUpti_Profiler_PushRange_Params_STRUCT_SIZE + void* pPriv; //!< [in] assign to NULL + + CUcontext ctx; //!< [in] if NULL, the current CUcontext is used + const char* pRangeName; //!< [in] specifies the range for subsequent launches; must not be NULL + size_t rangeNameLength; //!< [in] assign to strlen(pRangeName) if known; if set to zero, the library will call strlen() +} CUpti_Profiler_PushRange_Params; +#define CUpti_Profiler_PushRange_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_PushRange_Params, rangeNameLength) + +typedef struct CUpti_Profiler_PopRange_Params +{ + size_t structSize; //!< [in] CUpti_Profiler_PopRange_Params_STRUCT_SIZE + void* pPriv; //!< [in] assign to NULL + + CUcontext ctx; //!< [in] if NULL, the current CUcontext is used +} CUpti_Profiler_PopRange_Params; +#define CUpti_Profiler_PopRange_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_PopRange_Params, ctx) + + +/** + * \brief Range API's : Push user range + * + * Counter data is collected per unique range-stack. Identified by a string label passsed by the user. + * It's an invalid operation in case of \ref CUPTI_AutoRange. + */ +CUptiResult CUPTIAPI cuptiProfilerPushRange(CUpti_Profiler_PushRange_Params *pParams); + +/** + * \brief Range API's : Pop user range + * + * Counter data is collected per unique range-stack. Identified by a string label passsed by the user. + * It's an invalid operation in case of \ref CUPTI_AutoRange. + */ +CUptiResult CUPTIAPI cuptiProfilerPopRange(CUpti_Profiler_PopRange_Params *pParams); + +/** + * \brief Params for cuptiProfilerGetCounterAvailability + */ +typedef struct CUpti_Profiler_GetCounterAvailability_Params +{ + size_t structSize; //!< [in] CUpti_Profiler_GetCounterAvailability_Params_STRUCT_SIZE + void* pPriv; //!< [in] assign to NULL + CUcontext ctx; //!< [in] if NULL, the current CUcontext is used + size_t counterAvailabilityImageSize; //!< [in/out] If `pCounterAvailabilityImage` is NULL, then the required size is returned in + //!< `counterAvailabilityImageSize`, otherwise `counterAvailabilityImageSize` should be set to the size of + //!< `pCounterAvailabilityImage`, and on return it would be overwritten with number of actual bytes copied + uint8_t* pCounterAvailabilityImage; //!< [in] buffer receiving counter availability image, may be NULL +} CUpti_Profiler_GetCounterAvailability_Params; +#define CUpti_Profiler_GetCounterAvailability_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_GetCounterAvailability_Params, pCounterAvailabilityImage) + +/** + * \brief Query counter availibility + * + * Use this API to query counter availability information in a buffer which can be used to filter unavailable raw metrics on host. + * Note: This API may fail, if any profiling or sampling session is active on the specified context or its device. + */ +CUptiResult CUPTIAPI cuptiProfilerGetCounterAvailability(CUpti_Profiler_GetCounterAvailability_Params *pParams); + +/// Generic support level enum for CUPTI +typedef enum +{ + CUPTI_PROFILER_CONFIGURATION_UNKNOWN = 0, //!< Configuration support level unknown - either detection code errored out before setting this value, or unable to determine it + CUPTI_PROFILER_CONFIGURATION_UNSUPPORTED, //!< Profiling is unavailable. For specific feature fields, this means that the current configuration of this feature does not work with profiling. For instance, SLI-enabled devices do not support profiling, and this value would be returned for SLI on an SLI-enabled device. + CUPTI_PROFILER_CONFIGURATION_DISABLED, //!< Profiling would be available for this configuration, but was disabled by the system + CUPTI_PROFILER_CONFIGURATION_SUPPORTED //!< Profiling is supported. For specific feature fields, this means that the current configuration of this feature works with profiling. For instance, SLI-enabled devices do not support profiling, and this value would only be returned for devices which are not SLI-enabled. +} CUpti_Profiler_Support_Level; + +/** + * \brief Params for cuptiProfilerDeviceSupported + */ +typedef struct +{ + size_t structSize; //!< [in] Must be CUpti_Profiler_DeviceSupported_Params_STRUCT_SIZE + void *pPriv; //!< [in] assign to NULL + CUdevice cuDevice; //!< [in] if NULL, the current CUcontext is used + + CUpti_Profiler_Support_Level isSupported; //!< [out] overall SUPPORTED / UNSUPPORTED flag representing whether Profiling and PC Sampling APIs work on the given device and configuration. SUPPORTED if all following flags are SUPPORTED, UNSUPPORTED otherwise. + + CUpti_Profiler_Support_Level architecture; //!< [out] SUPPORTED if the device architecture level supports the Profiling API (Compute Capability >= 7.0), UNSUPPORTED otherwise + CUpti_Profiler_Support_Level sli; //!< [out] SUPPORTED if SLI is not enabled, UNSUPPORTED otherwise + CUpti_Profiler_Support_Level vGpu; //!< [out] SUPPORTED if vGPU is supported and profiling is enabled, DISABLED if profiling is supported but not enabled, UNSUPPORTED otherwise + CUpti_Profiler_Support_Level confidentialCompute; //!< [out] SUPPORTED if confidential compute is not enabled, UNSUPPORTED otherwise + CUpti_Profiler_Support_Level cmp; //!< [out] SUPPORTED if not NVIDIA Crypto Mining Processors (CMP), UNSUPPORTED otherwise + CUpti_Profiler_Support_Level wsl; //!< [out] SUPPORTED if WSL supported, UNSUPPORTED otherwise +} CUpti_Profiler_DeviceSupported_Params; +#define CUpti_Profiler_DeviceSupported_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_DeviceSupported_Params, confidentialCompute) + +/** + * \brief Query device compatibility with Profiling API + * + * Use this call to determine whether a compute device and configuration are compatible with the Profiling API. + * If the configuration does not support profiling, one of several flags will indicate why. + */ +CUptiResult CUPTIAPI cuptiProfilerDeviceSupported(CUpti_Profiler_DeviceSupported_Params *pParams); + +/** @} */ /* END CUPTI_METRIC_API */ +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility pop +#endif + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /*_CUPTI_PROFILER_TARGET_H_*/ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_result.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_result.h new file mode 100644 index 0000000000000000000000000000000000000000..f2896451245f9ad325175330c6715b80bf639832 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_result.h @@ -0,0 +1,328 @@ +/* + * Copyright 2010-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(_CUPTI_RESULT_H_) +#define _CUPTI_RESULT_H_ + +#ifndef CUPTIAPI +#ifdef _WIN32 +#define CUPTIAPI __stdcall +#else +#define CUPTIAPI +#endif +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility push(default) +#endif + +/** + * \defgroup CUPTI_RESULT_API CUPTI Result Codes + * Error and result codes returned by CUPTI functions. + * @{ + */ + +/** + * \brief CUPTI result codes. + * + * Error and result codes returned by CUPTI functions. + */ +typedef enum { + /** + * No error. + */ + CUPTI_SUCCESS = 0, + /** + * One or more of the parameters is invalid. + */ + CUPTI_ERROR_INVALID_PARAMETER = 1, + /** + * The device does not correspond to a valid CUDA device. + */ + CUPTI_ERROR_INVALID_DEVICE = 2, + /** + * The context is NULL or not valid. + */ + CUPTI_ERROR_INVALID_CONTEXT = 3, + /** + * The event domain id is invalid. + */ + CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID = 4, + /** + * The event id is invalid. + */ + CUPTI_ERROR_INVALID_EVENT_ID = 5, + /** + * The event name is invalid. + */ + CUPTI_ERROR_INVALID_EVENT_NAME = 6, + /** + * The current operation cannot be performed due to dependency on + * other factors. + */ + CUPTI_ERROR_INVALID_OPERATION = 7, + /** + * Unable to allocate enough memory to perform the requested + * operation. + */ + CUPTI_ERROR_OUT_OF_MEMORY = 8, + /** + * An error occurred on the performance monitoring hardware. + */ + CUPTI_ERROR_HARDWARE = 9, + /** + * The output buffer size is not sufficient to return all + * requested data. + */ + CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT = 10, + /** + * API is not implemented. + */ + CUPTI_ERROR_API_NOT_IMPLEMENTED = 11, + /** + * The maximum limit is reached. + */ + CUPTI_ERROR_MAX_LIMIT_REACHED = 12, + /** + * The object is not yet ready to perform the requested operation. + */ + CUPTI_ERROR_NOT_READY = 13, + /** + * The current operation is not compatible with the current state + * of the object + */ + CUPTI_ERROR_NOT_COMPATIBLE = 14, + /** + * CUPTI is unable to initialize its connection to the CUDA + * driver. + */ + CUPTI_ERROR_NOT_INITIALIZED = 15, + /** + * The metric id is invalid. + */ + CUPTI_ERROR_INVALID_METRIC_ID = 16, + /** + * The metric name is invalid. + */ + CUPTI_ERROR_INVALID_METRIC_NAME = 17, + /** + * The queue is empty. + */ + CUPTI_ERROR_QUEUE_EMPTY = 18, + /** + * Invalid handle (internal?). + */ + CUPTI_ERROR_INVALID_HANDLE = 19, + /** + * Invalid stream. + */ + CUPTI_ERROR_INVALID_STREAM = 20, + /** + * Invalid kind. + */ + CUPTI_ERROR_INVALID_KIND = 21, + /** + * Invalid event value. + */ + CUPTI_ERROR_INVALID_EVENT_VALUE = 22, + /** + * CUPTI is disabled due to conflicts with other enabled profilers + */ + CUPTI_ERROR_DISABLED = 23, + /** + * Invalid module. + */ + CUPTI_ERROR_INVALID_MODULE = 24, + /** + * Invalid metric value. + */ + CUPTI_ERROR_INVALID_METRIC_VALUE = 25, + /** + * The performance monitoring hardware is in use by other client. + */ + CUPTI_ERROR_HARDWARE_BUSY = 26, + /** + * The attempted operation is not supported on the current + * system or device. + */ + CUPTI_ERROR_NOT_SUPPORTED = 27, + /** + * Unified memory profiling is not supported on the system. + * Potential reason could be unsupported OS or architecture. + */ + CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED = 28, + /** + * Unified memory profiling is not supported on the device + */ + CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_ON_DEVICE = 29, + /** + * Unified memory profiling is not supported on a multi-GPU + * configuration without P2P support between any pair of devices + */ + CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_ON_NON_P2P_DEVICES = 30, + /** + * Unified memory profiling is not supported under the + * Multi-Process Service (MPS) environment. CUDA 7.5 removes this + * restriction. + */ + CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_WITH_MPS = 31, + /** + * In CUDA 9.0, devices with compute capability 7.0 don't + * support CDP tracing + */ + CUPTI_ERROR_CDP_TRACING_NOT_SUPPORTED = 32, + /** + * Profiling on virtualized GPU is not supported. + */ + CUPTI_ERROR_VIRTUALIZED_DEVICE_NOT_SUPPORTED = 33, + /** + * Profiling results might be incorrect for CUDA applications + * compiled with nvcc version older than 9.0 for devices with + * compute capability 6.0 and 6.1. + * Profiling session will continue and CUPTI will notify it using this error code. + * User is advised to recompile the application code with nvcc version 9.0 or later. + * Ignore this warning if code is already compiled with the recommended nvcc version. + */ + CUPTI_ERROR_CUDA_COMPILER_NOT_COMPATIBLE = 34, + /** + * User doesn't have sufficient privileges which are required to + * start the profiling session. + * One possible reason for this may be that the NVIDIA driver or your system + * administrator may have restricted access to the NVIDIA GPU performance counters. + * To learn how to resolve this issue and find more information, please visit + * https://developer.nvidia.com/CUPTI_ERROR_INSUFFICIENT_PRIVILEGES + */ + CUPTI_ERROR_INSUFFICIENT_PRIVILEGES = 35, + /** + * Legacy CUPTI Profiling API i.e. event API from the header cupti_events.h and + * metric API from the header cupti_metrics.h are not compatible with the + * Profiling API in the header cupti_profiler_target.h and Perfworks metrics API + * in the headers nvperf_host.h and nvperf_target.h. + */ + CUPTI_ERROR_OLD_PROFILER_API_INITIALIZED = 36, + /** + * Missing definition of the OpenACC API routine in the linked OpenACC library. + * + * One possible reason is that OpenACC library is linked statically in the + * user application, which might not have the definition of all the OpenACC + * API routines needed for the OpenACC profiling, as compiler might ignore + * definitions for the functions not used in the application. This issue + * can be mitigated by linking the OpenACC library dynamically. + */ + CUPTI_ERROR_OPENACC_UNDEFINED_ROUTINE = 37, + /** + * Legacy CUPTI Profiling API i.e. event API from the header cupti_events.h and + * metric API from the header cupti_metrics.h are not supported on devices with + * compute capability 7.5 and higher (i.e. Turing and later GPU architectures). + * These API will be deprecated in a future CUDA release. These are replaced by + * Profiling API in the header cupti_profiler_target.h and Perfworks metrics API + * in the headers nvperf_host.h and nvperf_target.h. + */ + CUPTI_ERROR_LEGACY_PROFILER_NOT_SUPPORTED = 38, + /** + * CUPTI doesn't allow multiple callback subscribers. Only a single subscriber + * can be registered at a time. + * Same error code is used when application is launched using NVIDIA tools + * like nvprof, Visual Profiler, Nsight Systems, Nsight Compute, cuda-gdb and + * cuda-memcheck. + */ + CUPTI_ERROR_MULTIPLE_SUBSCRIBERS_NOT_SUPPORTED = 39, + /** + * Profiling on virtualized GPU is not allowed by hypervisor. + */ + CUPTI_ERROR_VIRTUALIZED_DEVICE_INSUFFICIENT_PRIVILEGES = 40, + /** + * Profiling and tracing are not allowed when confidential computing mode + * is enabled. + */ + CUPTI_ERROR_CONFIDENTIAL_COMPUTING_NOT_SUPPORTED = 41, + /** + * CUPTI does not support NVIDIA Crypto Mining Processors (CMP). + * For more information, please visit https://developer.nvidia.com/ERR_NVCMPGPU + */ + CUPTI_ERROR_CMP_DEVICE_NOT_SUPPORTED = 42, + /** + * An unknown internal error has occurred. + */ + CUPTI_ERROR_UNKNOWN = 999, + CUPTI_ERROR_FORCE_INT = 0x7fffffff +} CUptiResult; + +/** + * \brief Get the descriptive string for a CUptiResult. + * + * Return the descriptive string for a CUptiResult in \p *str. + * \note \b Thread-safety: this function is thread safe. + * + * \param result The result to get the string for + * \param str Returns the string + * + * \retval CUPTI_SUCCESS on success + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p str is NULL or \p + * result is not a valid CUptiResult + */ +CUptiResult CUPTIAPI cuptiGetResultString(CUptiResult result, const char **str); + +/** @} */ /* END CUPTI_RESULT_API */ + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility pop +#endif + +#if defined(__cplusplus) +} +#endif + +#endif /*_CUPTI_RESULT_H_*/ + + diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_runtime_cbid.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_runtime_cbid.h new file mode 100644 index 0000000000000000000000000000000000000000..fa608759184021e13e25144c666cd0e1a95ea7c6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_runtime_cbid.h @@ -0,0 +1,458 @@ + +// ************************************************************************* +// Definitions of indices for API functions, unique across entire API +// ************************************************************************* + +// This file is generated. Any changes you make will be lost during the next clean build. +// CUDA public interface, for type definitions and cu* function prototypes + +typedef enum CUpti_runtime_api_trace_cbid_enum { + CUPTI_RUNTIME_TRACE_CBID_INVALID = 0, + CUPTI_RUNTIME_TRACE_CBID_cudaDriverGetVersion_v3020 = 1, + CUPTI_RUNTIME_TRACE_CBID_cudaRuntimeGetVersion_v3020 = 2, + CUPTI_RUNTIME_TRACE_CBID_cudaGetDeviceCount_v3020 = 3, + CUPTI_RUNTIME_TRACE_CBID_cudaGetDeviceProperties_v3020 = 4, + CUPTI_RUNTIME_TRACE_CBID_cudaChooseDevice_v3020 = 5, + CUPTI_RUNTIME_TRACE_CBID_cudaGetChannelDesc_v3020 = 6, + CUPTI_RUNTIME_TRACE_CBID_cudaCreateChannelDesc_v3020 = 7, + CUPTI_RUNTIME_TRACE_CBID_cudaConfigureCall_v3020 = 8, + CUPTI_RUNTIME_TRACE_CBID_cudaSetupArgument_v3020 = 9, + CUPTI_RUNTIME_TRACE_CBID_cudaGetLastError_v3020 = 10, + CUPTI_RUNTIME_TRACE_CBID_cudaPeekAtLastError_v3020 = 11, + CUPTI_RUNTIME_TRACE_CBID_cudaGetErrorString_v3020 = 12, + CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020 = 13, + CUPTI_RUNTIME_TRACE_CBID_cudaFuncSetCacheConfig_v3020 = 14, + CUPTI_RUNTIME_TRACE_CBID_cudaFuncGetAttributes_v3020 = 15, + CUPTI_RUNTIME_TRACE_CBID_cudaSetDevice_v3020 = 16, + CUPTI_RUNTIME_TRACE_CBID_cudaGetDevice_v3020 = 17, + CUPTI_RUNTIME_TRACE_CBID_cudaSetValidDevices_v3020 = 18, + CUPTI_RUNTIME_TRACE_CBID_cudaSetDeviceFlags_v3020 = 19, + CUPTI_RUNTIME_TRACE_CBID_cudaMalloc_v3020 = 20, + CUPTI_RUNTIME_TRACE_CBID_cudaMallocPitch_v3020 = 21, + CUPTI_RUNTIME_TRACE_CBID_cudaFree_v3020 = 22, + CUPTI_RUNTIME_TRACE_CBID_cudaMallocArray_v3020 = 23, + CUPTI_RUNTIME_TRACE_CBID_cudaFreeArray_v3020 = 24, + CUPTI_RUNTIME_TRACE_CBID_cudaMallocHost_v3020 = 25, + CUPTI_RUNTIME_TRACE_CBID_cudaFreeHost_v3020 = 26, + CUPTI_RUNTIME_TRACE_CBID_cudaHostAlloc_v3020 = 27, + CUPTI_RUNTIME_TRACE_CBID_cudaHostGetDevicePointer_v3020 = 28, + CUPTI_RUNTIME_TRACE_CBID_cudaHostGetFlags_v3020 = 29, + CUPTI_RUNTIME_TRACE_CBID_cudaMemGetInfo_v3020 = 30, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy_v3020 = 31, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2D_v3020 = 32, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToArray_v3020 = 33, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DToArray_v3020 = 34, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromArray_v3020 = 35, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DFromArray_v3020 = 36, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyArrayToArray_v3020 = 37, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DArrayToArray_v3020 = 38, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToSymbol_v3020 = 39, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromSymbol_v3020 = 40, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyAsync_v3020 = 41, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToArrayAsync_v3020 = 42, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromArrayAsync_v3020 = 43, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DAsync_v3020 = 44, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DToArrayAsync_v3020 = 45, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DFromArrayAsync_v3020 = 46, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToSymbolAsync_v3020 = 47, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromSymbolAsync_v3020 = 48, + CUPTI_RUNTIME_TRACE_CBID_cudaMemset_v3020 = 49, + CUPTI_RUNTIME_TRACE_CBID_cudaMemset2D_v3020 = 50, + CUPTI_RUNTIME_TRACE_CBID_cudaMemsetAsync_v3020 = 51, + CUPTI_RUNTIME_TRACE_CBID_cudaMemset2DAsync_v3020 = 52, + CUPTI_RUNTIME_TRACE_CBID_cudaGetSymbolAddress_v3020 = 53, + CUPTI_RUNTIME_TRACE_CBID_cudaGetSymbolSize_v3020 = 54, + CUPTI_RUNTIME_TRACE_CBID_cudaBindTexture_v3020 = 55, + CUPTI_RUNTIME_TRACE_CBID_cudaBindTexture2D_v3020 = 56, + CUPTI_RUNTIME_TRACE_CBID_cudaBindTextureToArray_v3020 = 57, + CUPTI_RUNTIME_TRACE_CBID_cudaUnbindTexture_v3020 = 58, + CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureAlignmentOffset_v3020 = 59, + CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureReference_v3020 = 60, + CUPTI_RUNTIME_TRACE_CBID_cudaBindSurfaceToArray_v3020 = 61, + CUPTI_RUNTIME_TRACE_CBID_cudaGetSurfaceReference_v3020 = 62, + CUPTI_RUNTIME_TRACE_CBID_cudaGLSetGLDevice_v3020 = 63, + CUPTI_RUNTIME_TRACE_CBID_cudaGLRegisterBufferObject_v3020 = 64, + CUPTI_RUNTIME_TRACE_CBID_cudaGLMapBufferObject_v3020 = 65, + CUPTI_RUNTIME_TRACE_CBID_cudaGLUnmapBufferObject_v3020 = 66, + CUPTI_RUNTIME_TRACE_CBID_cudaGLUnregisterBufferObject_v3020 = 67, + CUPTI_RUNTIME_TRACE_CBID_cudaGLSetBufferObjectMapFlags_v3020 = 68, + CUPTI_RUNTIME_TRACE_CBID_cudaGLMapBufferObjectAsync_v3020 = 69, + CUPTI_RUNTIME_TRACE_CBID_cudaGLUnmapBufferObjectAsync_v3020 = 70, + CUPTI_RUNTIME_TRACE_CBID_cudaWGLGetDevice_v3020 = 71, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsGLRegisterImage_v3020 = 72, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsGLRegisterBuffer_v3020 = 73, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsUnregisterResource_v3020 = 74, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsResourceSetMapFlags_v3020 = 75, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsMapResources_v3020 = 76, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsUnmapResources_v3020 = 77, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsResourceGetMappedPointer_v3020 = 78, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsSubResourceGetMappedArray_v3020 = 79, + CUPTI_RUNTIME_TRACE_CBID_cudaVDPAUGetDevice_v3020 = 80, + CUPTI_RUNTIME_TRACE_CBID_cudaVDPAUSetVDPAUDevice_v3020 = 81, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsVDPAURegisterVideoSurface_v3020 = 82, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsVDPAURegisterOutputSurface_v3020 = 83, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D11GetDevice_v3020 = 84, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D11GetDevices_v3020 = 85, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D11SetDirect3DDevice_v3020 = 86, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsD3D11RegisterResource_v3020 = 87, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D10GetDevice_v3020 = 88, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D10GetDevices_v3020 = 89, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D10SetDirect3DDevice_v3020 = 90, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsD3D10RegisterResource_v3020 = 91, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D10RegisterResource_v3020 = 92, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D10UnregisterResource_v3020 = 93, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D10MapResources_v3020 = 94, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D10UnmapResources_v3020 = 95, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceSetMapFlags_v3020 = 96, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetSurfaceDimensions_v3020 = 97, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetMappedArray_v3020 = 98, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetMappedPointer_v3020 = 99, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetMappedSize_v3020 = 100, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetMappedPitch_v3020 = 101, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D9GetDevice_v3020 = 102, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D9GetDevices_v3020 = 103, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D9SetDirect3DDevice_v3020 = 104, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D9GetDirect3DDevice_v3020 = 105, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsD3D9RegisterResource_v3020 = 106, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D9RegisterResource_v3020 = 107, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D9UnregisterResource_v3020 = 108, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D9MapResources_v3020 = 109, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D9UnmapResources_v3020 = 110, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceSetMapFlags_v3020 = 111, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetSurfaceDimensions_v3020 = 112, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetMappedArray_v3020 = 113, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetMappedPointer_v3020 = 114, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetMappedSize_v3020 = 115, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetMappedPitch_v3020 = 116, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D9Begin_v3020 = 117, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D9End_v3020 = 118, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D9RegisterVertexBuffer_v3020 = 119, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D9UnregisterVertexBuffer_v3020 = 120, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D9MapVertexBuffer_v3020 = 121, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D9UnmapVertexBuffer_v3020 = 122, + CUPTI_RUNTIME_TRACE_CBID_cudaThreadExit_v3020 = 123, + CUPTI_RUNTIME_TRACE_CBID_cudaSetDoubleForDevice_v3020 = 124, + CUPTI_RUNTIME_TRACE_CBID_cudaSetDoubleForHost_v3020 = 125, + CUPTI_RUNTIME_TRACE_CBID_cudaThreadSynchronize_v3020 = 126, + CUPTI_RUNTIME_TRACE_CBID_cudaThreadGetLimit_v3020 = 127, + CUPTI_RUNTIME_TRACE_CBID_cudaThreadSetLimit_v3020 = 128, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamCreate_v3020 = 129, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamDestroy_v3020 = 130, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamSynchronize_v3020 = 131, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamQuery_v3020 = 132, + CUPTI_RUNTIME_TRACE_CBID_cudaEventCreate_v3020 = 133, + CUPTI_RUNTIME_TRACE_CBID_cudaEventCreateWithFlags_v3020 = 134, + CUPTI_RUNTIME_TRACE_CBID_cudaEventRecord_v3020 = 135, + CUPTI_RUNTIME_TRACE_CBID_cudaEventDestroy_v3020 = 136, + CUPTI_RUNTIME_TRACE_CBID_cudaEventSynchronize_v3020 = 137, + CUPTI_RUNTIME_TRACE_CBID_cudaEventQuery_v3020 = 138, + CUPTI_RUNTIME_TRACE_CBID_cudaEventElapsedTime_v3020 = 139, + CUPTI_RUNTIME_TRACE_CBID_cudaMalloc3D_v3020 = 140, + CUPTI_RUNTIME_TRACE_CBID_cudaMalloc3DArray_v3020 = 141, + CUPTI_RUNTIME_TRACE_CBID_cudaMemset3D_v3020 = 142, + CUPTI_RUNTIME_TRACE_CBID_cudaMemset3DAsync_v3020 = 143, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3D_v3020 = 144, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DAsync_v3020 = 145, + CUPTI_RUNTIME_TRACE_CBID_cudaThreadSetCacheConfig_v3020 = 146, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamWaitEvent_v3020 = 147, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D11GetDirect3DDevice_v3020 = 148, + CUPTI_RUNTIME_TRACE_CBID_cudaD3D10GetDirect3DDevice_v3020 = 149, + CUPTI_RUNTIME_TRACE_CBID_cudaThreadGetCacheConfig_v3020 = 150, + CUPTI_RUNTIME_TRACE_CBID_cudaPointerGetAttributes_v4000 = 151, + CUPTI_RUNTIME_TRACE_CBID_cudaHostRegister_v4000 = 152, + CUPTI_RUNTIME_TRACE_CBID_cudaHostUnregister_v4000 = 153, + CUPTI_RUNTIME_TRACE_CBID_cudaDeviceCanAccessPeer_v4000 = 154, + CUPTI_RUNTIME_TRACE_CBID_cudaDeviceEnablePeerAccess_v4000 = 155, + CUPTI_RUNTIME_TRACE_CBID_cudaDeviceDisablePeerAccess_v4000 = 156, + CUPTI_RUNTIME_TRACE_CBID_cudaPeerRegister_v4000 = 157, + CUPTI_RUNTIME_TRACE_CBID_cudaPeerUnregister_v4000 = 158, + CUPTI_RUNTIME_TRACE_CBID_cudaPeerGetDevicePointer_v4000 = 159, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyPeer_v4000 = 160, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyPeerAsync_v4000 = 161, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DPeer_v4000 = 162, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DPeerAsync_v4000 = 163, + CUPTI_RUNTIME_TRACE_CBID_cudaDeviceReset_v3020 = 164, + CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSynchronize_v3020 = 165, + CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetLimit_v3020 = 166, + CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetLimit_v3020 = 167, + CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetCacheConfig_v3020 = 168, + CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetCacheConfig_v3020 = 169, + CUPTI_RUNTIME_TRACE_CBID_cudaProfilerInitialize_v4000 = 170, + CUPTI_RUNTIME_TRACE_CBID_cudaProfilerStart_v4000 = 171, + CUPTI_RUNTIME_TRACE_CBID_cudaProfilerStop_v4000 = 172, + CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetByPCIBusId_v4010 = 173, + CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetPCIBusId_v4010 = 174, + CUPTI_RUNTIME_TRACE_CBID_cudaGLGetDevices_v4010 = 175, + CUPTI_RUNTIME_TRACE_CBID_cudaIpcGetEventHandle_v4010 = 176, + CUPTI_RUNTIME_TRACE_CBID_cudaIpcOpenEventHandle_v4010 = 177, + CUPTI_RUNTIME_TRACE_CBID_cudaIpcGetMemHandle_v4010 = 178, + CUPTI_RUNTIME_TRACE_CBID_cudaIpcOpenMemHandle_v4010 = 179, + CUPTI_RUNTIME_TRACE_CBID_cudaIpcCloseMemHandle_v4010 = 180, + CUPTI_RUNTIME_TRACE_CBID_cudaArrayGetInfo_v4010 = 181, + CUPTI_RUNTIME_TRACE_CBID_cudaFuncSetSharedMemConfig_v4020 = 182, + CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetSharedMemConfig_v4020 = 183, + CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetSharedMemConfig_v4020 = 184, + CUPTI_RUNTIME_TRACE_CBID_cudaCreateTextureObject_v5000 = 185, + CUPTI_RUNTIME_TRACE_CBID_cudaDestroyTextureObject_v5000 = 186, + CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureObjectResourceDesc_v5000 = 187, + CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureObjectTextureDesc_v5000 = 188, + CUPTI_RUNTIME_TRACE_CBID_cudaCreateSurfaceObject_v5000 = 189, + CUPTI_RUNTIME_TRACE_CBID_cudaDestroySurfaceObject_v5000 = 190, + CUPTI_RUNTIME_TRACE_CBID_cudaGetSurfaceObjectResourceDesc_v5000 = 191, + CUPTI_RUNTIME_TRACE_CBID_cudaMallocMipmappedArray_v5000 = 192, + CUPTI_RUNTIME_TRACE_CBID_cudaGetMipmappedArrayLevel_v5000 = 193, + CUPTI_RUNTIME_TRACE_CBID_cudaFreeMipmappedArray_v5000 = 194, + CUPTI_RUNTIME_TRACE_CBID_cudaBindTextureToMipmappedArray_v5000 = 195, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsResourceGetMappedMipmappedArray_v5000 = 196, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamAddCallback_v5000 = 197, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamCreateWithFlags_v5000 = 198, + CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureObjectResourceViewDesc_v5000 = 199, + CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetAttribute_v5000 = 200, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamDestroy_v5050 = 201, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamCreateWithPriority_v5050 = 202, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetPriority_v5050 = 203, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetFlags_v5050 = 204, + CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetStreamPriorityRange_v5050 = 205, + CUPTI_RUNTIME_TRACE_CBID_cudaMallocManaged_v6000 = 206, + CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6000 = 207, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamAttachMemAsync_v6000 = 208, + CUPTI_RUNTIME_TRACE_CBID_cudaGetErrorName_v6050 = 209, + CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6050 = 210, + CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_v7000 = 211, + CUPTI_RUNTIME_TRACE_CBID_cudaGetDeviceFlags_v7000 = 212, + CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_ptsz_v7000 = 213, + CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_ptsz_v7000 = 214, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy_ptds_v7000 = 215, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2D_ptds_v7000 = 216, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToArray_ptds_v7000 = 217, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DToArray_ptds_v7000 = 218, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromArray_ptds_v7000 = 219, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DFromArray_ptds_v7000 = 220, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyArrayToArray_ptds_v7000 = 221, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DArrayToArray_ptds_v7000 = 222, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToSymbol_ptds_v7000 = 223, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromSymbol_ptds_v7000 = 224, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyAsync_ptsz_v7000 = 225, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToArrayAsync_ptsz_v7000 = 226, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromArrayAsync_ptsz_v7000 = 227, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DAsync_ptsz_v7000 = 228, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DToArrayAsync_ptsz_v7000 = 229, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DFromArrayAsync_ptsz_v7000 = 230, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToSymbolAsync_ptsz_v7000 = 231, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromSymbolAsync_ptsz_v7000 = 232, + CUPTI_RUNTIME_TRACE_CBID_cudaMemset_ptds_v7000 = 233, + CUPTI_RUNTIME_TRACE_CBID_cudaMemset2D_ptds_v7000 = 234, + CUPTI_RUNTIME_TRACE_CBID_cudaMemsetAsync_ptsz_v7000 = 235, + CUPTI_RUNTIME_TRACE_CBID_cudaMemset2DAsync_ptsz_v7000 = 236, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetPriority_ptsz_v7000 = 237, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetFlags_ptsz_v7000 = 238, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamSynchronize_ptsz_v7000 = 239, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamQuery_ptsz_v7000 = 240, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamAttachMemAsync_ptsz_v7000 = 241, + CUPTI_RUNTIME_TRACE_CBID_cudaEventRecord_ptsz_v7000 = 242, + CUPTI_RUNTIME_TRACE_CBID_cudaMemset3D_ptds_v7000 = 243, + CUPTI_RUNTIME_TRACE_CBID_cudaMemset3DAsync_ptsz_v7000 = 244, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3D_ptds_v7000 = 245, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DAsync_ptsz_v7000 = 246, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamWaitEvent_ptsz_v7000 = 247, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamAddCallback_ptsz_v7000 = 248, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DPeer_ptds_v7000 = 249, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DPeerAsync_ptsz_v7000 = 250, + CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000 = 251, + CUPTI_RUNTIME_TRACE_CBID_cudaMemPrefetchAsync_v8000 = 252, + CUPTI_RUNTIME_TRACE_CBID_cudaMemPrefetchAsync_ptsz_v8000 = 253, + CUPTI_RUNTIME_TRACE_CBID_cudaMemAdvise_v8000 = 254, + CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetP2PAttribute_v8000 = 255, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsEGLRegisterImage_v7000 = 256, + CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerConnect_v7000 = 257, + CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerDisconnect_v7000 = 258, + CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerAcquireFrame_v7000 = 259, + CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerReleaseFrame_v7000 = 260, + CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamProducerConnect_v7000 = 261, + CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamProducerDisconnect_v7000 = 262, + CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamProducerPresentFrame_v7000 = 263, + CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamProducerReturnFrame_v7000 = 264, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsResourceGetMappedEglFrame_v7000 = 265, + CUPTI_RUNTIME_TRACE_CBID_cudaMemRangeGetAttribute_v8000 = 266, + CUPTI_RUNTIME_TRACE_CBID_cudaMemRangeGetAttributes_v8000 = 267, + CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerConnectWithFlags_v7000 = 268, + CUPTI_RUNTIME_TRACE_CBID_cudaLaunchCooperativeKernel_v9000 = 269, + CUPTI_RUNTIME_TRACE_CBID_cudaLaunchCooperativeKernel_ptsz_v9000 = 270, + CUPTI_RUNTIME_TRACE_CBID_cudaEventCreateFromEGLSync_v9000 = 271, + CUPTI_RUNTIME_TRACE_CBID_cudaLaunchCooperativeKernelMultiDevice_v9000 = 272, + CUPTI_RUNTIME_TRACE_CBID_cudaFuncSetAttribute_v9000 = 273, + CUPTI_RUNTIME_TRACE_CBID_cudaImportExternalMemory_v10000 = 274, + CUPTI_RUNTIME_TRACE_CBID_cudaExternalMemoryGetMappedBuffer_v10000 = 275, + CUPTI_RUNTIME_TRACE_CBID_cudaExternalMemoryGetMappedMipmappedArray_v10000 = 276, + CUPTI_RUNTIME_TRACE_CBID_cudaDestroyExternalMemory_v10000 = 277, + CUPTI_RUNTIME_TRACE_CBID_cudaImportExternalSemaphore_v10000 = 278, + CUPTI_RUNTIME_TRACE_CBID_cudaSignalExternalSemaphoresAsync_v10000 = 279, + CUPTI_RUNTIME_TRACE_CBID_cudaSignalExternalSemaphoresAsync_ptsz_v10000 = 280, + CUPTI_RUNTIME_TRACE_CBID_cudaWaitExternalSemaphoresAsync_v10000 = 281, + CUPTI_RUNTIME_TRACE_CBID_cudaWaitExternalSemaphoresAsync_ptsz_v10000 = 282, + CUPTI_RUNTIME_TRACE_CBID_cudaDestroyExternalSemaphore_v10000 = 283, + CUPTI_RUNTIME_TRACE_CBID_cudaLaunchHostFunc_v10000 = 284, + CUPTI_RUNTIME_TRACE_CBID_cudaLaunchHostFunc_ptsz_v10000 = 285, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphCreate_v10000 = 286, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeGetParams_v10000 = 287, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeSetParams_v10000 = 288, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddKernelNode_v10000 = 289, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemcpyNode_v10000 = 290, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeGetParams_v10000 = 291, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeSetParams_v10000 = 292, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemsetNode_v10000 = 293, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemsetNodeGetParams_v10000 = 294, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemsetNodeSetParams_v10000 = 295, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddHostNode_v10000 = 296, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphHostNodeGetParams_v10000 = 297, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddChildGraphNode_v10000 = 298, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphChildGraphNodeGetGraph_v10000 = 299, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddEmptyNode_v10000 = 300, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphClone_v10000 = 301, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeFindInClone_v10000 = 302, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetType_v10000 = 303, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphGetRootNodes_v10000 = 304, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetDependencies_v10000 = 305, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetDependentNodes_v10000 = 306, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddDependencies_v10000 = 307, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphRemoveDependencies_v10000 = 308, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphDestroyNode_v10000 = 309, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiate_v10000 = 310, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphLaunch_v10000 = 311, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphLaunch_ptsz_v10000 = 312, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecDestroy_v10000 = 313, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphDestroy_v10000 = 314, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamBeginCapture_v10000 = 315, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamBeginCapture_ptsz_v10000 = 316, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamIsCapturing_v10000 = 317, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamIsCapturing_ptsz_v10000 = 318, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamEndCapture_v10000 = 319, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamEndCapture_ptsz_v10000 = 320, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphHostNodeSetParams_v10000 = 321, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphGetNodes_v10000 = 322, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphGetEdges_v10000 = 323, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v10010 = 324, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_ptsz_v10010 = 325, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecKernelNodeSetParams_v10010 = 326, + CUPTI_RUNTIME_TRACE_CBID_cudaThreadExchangeStreamCaptureMode_v10010 = 327, + CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetNvSciSyncAttributes_v10020 = 328, + CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyAvailableDynamicSMemPerBlock_v10200 = 329, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamSetFlags_v10200 = 330, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamSetFlags_ptsz_v10200 = 331, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemcpyNodeSetParams_v10020 = 332, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemsetNodeSetParams_v10020 = 333, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecHostNodeSetParams_v10020 = 334, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecUpdate_v10020 = 335, + CUPTI_RUNTIME_TRACE_CBID_cudaGetFuncBySymbol_v11000 = 336, + CUPTI_RUNTIME_TRACE_CBID_cudaCtxResetPersistingL2Cache_v11000 = 337, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeCopyAttributes_v11000 = 338, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeGetAttribute_v11000 = 339, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeSetAttribute_v11000 = 340, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamCopyAttributes_v11000 = 341, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamCopyAttributes_ptsz_v11000 = 342, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetAttribute_v11000 = 343, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetAttribute_ptsz_v11000 = 344, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamSetAttribute_v11000 = 345, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamSetAttribute_ptsz_v11000 = 346, + CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetTexture1DLinearMaxWidth_v11010 = 347, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphUpload_v10000 = 348, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphUpload_ptsz_v10000 = 349, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemcpyNodeToSymbol_v11010 = 350, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemcpyNodeFromSymbol_v11010 = 351, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemcpyNode1D_v11010 = 352, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeSetParamsToSymbol_v11010 = 353, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeSetParamsFromSymbol_v11010 = 354, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeSetParams1D_v11010 = 355, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemcpyNodeSetParamsToSymbol_v11010 = 356, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemcpyNodeSetParamsFromSymbol_v11010 = 357, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemcpyNodeSetParams1D_v11010 = 358, + CUPTI_RUNTIME_TRACE_CBID_cudaArrayGetSparseProperties_v11010 = 359, + CUPTI_RUNTIME_TRACE_CBID_cudaMipmappedArrayGetSparseProperties_v11010 = 360, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecChildGraphNodeSetParams_v11010 = 361, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddEventRecordNode_v11010 = 362, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphEventRecordNodeGetEvent_v11010 = 363, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphEventRecordNodeSetEvent_v11010 = 364, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddEventWaitNode_v11010 = 365, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphEventWaitNodeGetEvent_v11010 = 366, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphEventWaitNodeSetEvent_v11010 = 367, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecEventRecordNodeSetEvent_v11010 = 368, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecEventWaitNodeSetEvent_v11010 = 369, + CUPTI_RUNTIME_TRACE_CBID_cudaEventRecordWithFlags_v11010 = 370, + CUPTI_RUNTIME_TRACE_CBID_cudaEventRecordWithFlags_ptsz_v11010 = 371, + CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetDefaultMemPool_v11020 = 372, + CUPTI_RUNTIME_TRACE_CBID_cudaMallocAsync_v11020 = 373, + CUPTI_RUNTIME_TRACE_CBID_cudaMallocAsync_ptsz_v11020 = 374, + CUPTI_RUNTIME_TRACE_CBID_cudaFreeAsync_v11020 = 375, + CUPTI_RUNTIME_TRACE_CBID_cudaFreeAsync_ptsz_v11020 = 376, + CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolTrimTo_v11020 = 377, + CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolSetAttribute_v11020 = 378, + CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolGetAttribute_v11020 = 379, + CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolSetAccess_v11020 = 380, + CUPTI_RUNTIME_TRACE_CBID_cudaArrayGetPlane_v11020 = 381, + CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolGetAccess_v11020 = 382, + CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolCreate_v11020 = 383, + CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolDestroy_v11020 = 384, + CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetMemPool_v11020 = 385, + CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetMemPool_v11020 = 386, + CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolExportToShareableHandle_v11020 = 387, + CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolImportFromShareableHandle_v11020 = 388, + CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolExportPointer_v11020 = 389, + CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolImportPointer_v11020 = 390, + CUPTI_RUNTIME_TRACE_CBID_cudaMallocFromPoolAsync_v11020 = 391, + CUPTI_RUNTIME_TRACE_CBID_cudaMallocFromPoolAsync_ptsz_v11020 = 392, + CUPTI_RUNTIME_TRACE_CBID_cudaSignalExternalSemaphoresAsync_v2_v11020 = 393, + CUPTI_RUNTIME_TRACE_CBID_cudaSignalExternalSemaphoresAsync_v2_ptsz_v11020 = 394, + CUPTI_RUNTIME_TRACE_CBID_cudaWaitExternalSemaphoresAsync_v2_v11020 = 395, + CUPTI_RUNTIME_TRACE_CBID_cudaWaitExternalSemaphoresAsync_v2_ptsz_v11020 = 396, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddExternalSemaphoresSignalNode_v11020 = 397, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphExternalSemaphoresSignalNodeGetParams_v11020 = 398, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphExternalSemaphoresSignalNodeSetParams_v11020 = 399, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddExternalSemaphoresWaitNode_v11020 = 400, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphExternalSemaphoresWaitNodeGetParams_v11020 = 401, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphExternalSemaphoresWaitNodeSetParams_v11020 = 402, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecExternalSemaphoresSignalNodeSetParams_v11020 = 403, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecExternalSemaphoresWaitNodeSetParams_v11020 = 404, + CUPTI_RUNTIME_TRACE_CBID_cudaDeviceFlushGPUDirectRDMAWrites_v11030 = 405, + CUPTI_RUNTIME_TRACE_CBID_cudaGetDriverEntryPoint_v11030 = 406, + CUPTI_RUNTIME_TRACE_CBID_cudaGetDriverEntryPoint_ptsz_v11030 = 407, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphDebugDotPrint_v11030 = 408, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v2_v11030 = 409, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v2_ptsz_v11030 = 410, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamUpdateCaptureDependencies_v11030 = 411, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamUpdateCaptureDependencies_ptsz_v11030 = 412, + CUPTI_RUNTIME_TRACE_CBID_cudaUserObjectCreate_v11030 = 413, + CUPTI_RUNTIME_TRACE_CBID_cudaUserObjectRetain_v11030 = 414, + CUPTI_RUNTIME_TRACE_CBID_cudaUserObjectRelease_v11030 = 415, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphRetainUserObject_v11030 = 416, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphReleaseUserObject_v11030 = 417, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiateWithFlags_v11040 = 418, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemAllocNode_v11040 = 419, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemAllocNodeGetParams_v11040 = 420, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemFreeNode_v11040 = 421, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemFreeNodeGetParams_v11040 = 422, + CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGraphMemTrim_v11040 = 423, + CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetGraphMemAttribute_v11040 = 424, + CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetGraphMemAttribute_v11040 = 425, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeSetEnabled_v11060 = 426, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetEnabled_v11060 = 427, + CUPTI_RUNTIME_TRACE_CBID_cudaArrayGetMemoryRequirements_v11060 = 428, + CUPTI_RUNTIME_TRACE_CBID_cudaMipmappedArrayGetMemoryRequirements_v11060 = 429, + CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernelExC_v11060 = 430, + CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernelExC_ptsz_v11060 = 431, + CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxPotentialClusterSize_v11070 = 432, + CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxActiveClusters_v11070 = 433, + CUPTI_RUNTIME_TRACE_CBID_cudaCreateTextureObject_v2_v11080 = 434, + CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureObjectTextureDesc_v2_v11080 = 435, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiateWithParams_v12000 = 436, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiateWithParams_ptsz_v12000 = 437, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecGetFlags_v12000 = 438, + CUPTI_RUNTIME_TRACE_CBID_cudaGetKernel_v12000 = 439, + CUPTI_RUNTIME_TRACE_CBID_cudaGetDeviceProperties_v2_v12000 = 440, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetId_v12000 = 441, + CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetId_ptsz_v12000 = 442, + CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiate_v12000 = 443, + CUPTI_RUNTIME_TRACE_CBID_cudaInitDevice_v12000 = 444, + CUPTI_RUNTIME_TRACE_CBID_SIZE = 445, + CUPTI_RUNTIME_TRACE_CBID_FORCE_INT = 0x7fffffff +} CUpti_runtime_api_trace_cbid; + diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_target.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_target.h new file mode 100644 index 0000000000000000000000000000000000000000..e4b625d45c65288fa2ea7dc05819ee4dfc4cbdd3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_target.h @@ -0,0 +1,43 @@ +#if !defined(_CUPTI_TARGET_H_) +#define _CUPTI_TARGET_H_ + +/* +CUPTI profiler target API's +This file contains the CUPTI profiling API's. +*/ +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility push(default) +#endif + +#ifndef CUPTI_PROFILER_STRUCT_SIZE +#define CUPTI_PROFILER_STRUCT_SIZE(type_, lastfield_) (offsetof(type_, lastfield_) + sizeof(((type_*)0)->lastfield_)) +#endif + +typedef struct CUpti_Device_GetChipName_Params +{ + size_t structSize; //!< [in] + void* pPriv; //!< [in] assign to NULL + + size_t deviceIndex; //!< [in] + const char* pChipName; //!< [out] +} CUpti_Device_GetChipName_Params; + +#define CUpti_Device_GetChipName_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Device_GetChipName_Params, pChipName) +CUptiResult CUPTIAPI cuptiDeviceGetChipName(CUpti_Device_GetChipName_Params *pParams); + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility pop +#endif + +#ifdef __cplusplus +} /* extern "C" */ +#endif +#endif diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_version.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_version.h new file mode 100644 index 0000000000000000000000000000000000000000..ef8c6a1192db8ef7879d316aa48d5428f7f9b97e --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_version.h @@ -0,0 +1,131 @@ +/* + * Copyright 2010-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(_CUPTI_VERSION_H_) +#define _CUPTI_VERSION_H_ + +#include +#include + +#ifndef CUPTIAPI +#ifdef _WIN32 +#define CUPTIAPI __stdcall +#else +#define CUPTIAPI +#endif +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility push(default) +#endif + +/** + * \defgroup CUPTI_VERSION_API CUPTI Version + * Function and macro to determine the CUPTI version. + * @{ + */ + +/** + * \brief The API version for this implementation of CUPTI. + * + * The API version for this implementation of CUPTI. This define along + * with \ref cuptiGetVersion can be used to dynamically detect if the + * version of CUPTI compiled against matches the version of the loaded + * CUPTI library. + * + * v1 : CUDAToolsSDK 4.0 + * v2 : CUDAToolsSDK 4.1 + * v3 : CUDA Toolkit 5.0 + * v4 : CUDA Toolkit 5.5 + * v5 : CUDA Toolkit 6.0 + * v6 : CUDA Toolkit 6.5 + * v7 : CUDA Toolkit 6.5(with sm_52 support) + * v8 : CUDA Toolkit 7.0 + * v9 : CUDA Toolkit 8.0 + * v10 : CUDA Toolkit 9.0 + * v11 : CUDA Toolkit 9.1 + * v12 : CUDA Toolkit 10.0, 10.1 and 10.2 + * v13 : CUDA Toolkit 11.0 + * v14 : CUDA Toolkit 11.1 + * v15 : CUDA Toolkit 11.2, 11.3 and 11.4 + * v16 : CUDA Toolkit 11.5 + * v17 : CUDA Toolkit 11.6 + * v18 : CUDA Toolkit 11.8 + * v19 : CUDA Toolkit 12.0 + */ +#define CUPTI_API_VERSION 18 + +/** + * \brief Get the CUPTI API version. + * + * Return the API version in \p *version. + * + * \param version Returns the version + * + * \retval CUPTI_SUCCESS on success + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p version is NULL + * \sa CUPTI_API_VERSION + */ +CUptiResult CUPTIAPI cuptiGetVersion(uint32_t *version); + +/** @} */ /* END CUPTI_VERSION_API */ + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility pop +#endif + +#if defined(__cplusplus) +} +#endif + +#endif /*_CUPTI_VERSION_H_*/ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cudaGL_meta.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cudaGL_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..7a52e194b265d32f61d47bd3081f4958755bff46 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cudaGL_meta.h @@ -0,0 +1,116 @@ +// This file is generated. Any changes you make will be lost during the next clean build. + +// Dependent includes +#ifdef __APPLE__ +#include +#else +#include +#endif + +// CUDA public interface, for type definitions and cu* function prototypes +#include "cudaGL.h" + + +// ************************************************************************* +// Definitions of structs to hold parameters for each function +// ************************************************************************* + +typedef struct cuGraphicsGLRegisterBuffer_params_st { + CUgraphicsResource *pCudaResource; + GLuint buffer; + unsigned int Flags; +} cuGraphicsGLRegisterBuffer_params; + +typedef struct cuGraphicsGLRegisterImage_params_st { + CUgraphicsResource *pCudaResource; + GLuint image; + GLenum target; + unsigned int Flags; +} cuGraphicsGLRegisterImage_params; + +typedef struct cuGLGetDevices_v2_params_st { + unsigned int *pCudaDeviceCount; + CUdevice *pCudaDevices; + unsigned int cudaDeviceCount; + CUGLDeviceList deviceList; +} cuGLGetDevices_v2_params; + +typedef struct cuGLCtxCreate_v2_params_st { + CUcontext *pCtx; + unsigned int Flags; + CUdevice device; +} cuGLCtxCreate_v2_params; + +typedef struct cuGLRegisterBufferObject_params_st { + GLuint buffer; +} cuGLRegisterBufferObject_params; + +typedef struct cuGLMapBufferObject_v2_ptds_params_st { + CUdeviceptr *dptr; + size_t *size; + GLuint buffer; +} cuGLMapBufferObject_v2_ptds_params; + +typedef struct cuGLUnmapBufferObject_params_st { + GLuint buffer; +} cuGLUnmapBufferObject_params; + +typedef struct cuGLUnregisterBufferObject_params_st { + GLuint buffer; +} cuGLUnregisterBufferObject_params; + +typedef struct cuGLSetBufferObjectMapFlags_params_st { + GLuint buffer; + unsigned int Flags; +} cuGLSetBufferObjectMapFlags_params; + +typedef struct cuGLMapBufferObjectAsync_v2_ptsz_params_st { + CUdeviceptr *dptr; + size_t *size; + GLuint buffer; + CUstream hStream; +} cuGLMapBufferObjectAsync_v2_ptsz_params; + +typedef struct cuGLUnmapBufferObjectAsync_params_st { + GLuint buffer; + CUstream hStream; +} cuGLUnmapBufferObjectAsync_params; + +typedef struct cuGLGetDevices_params_st { + unsigned int *pCudaDeviceCount; + CUdevice *pCudaDevices; + unsigned int cudaDeviceCount; + CUGLDeviceList deviceList; +} cuGLGetDevices_params; + +typedef struct cuGLMapBufferObject_v2_params_st { + CUdeviceptr *dptr; + size_t *size; + GLuint buffer; +} cuGLMapBufferObject_v2_params; + +typedef struct cuGLMapBufferObjectAsync_v2_params_st { + CUdeviceptr *dptr; + size_t *size; + GLuint buffer; + CUstream hStream; +} cuGLMapBufferObjectAsync_v2_params; + +typedef struct cuGLCtxCreate_params_st { + CUcontext *pCtx; + unsigned int Flags; + CUdevice device; +} cuGLCtxCreate_params; + +typedef struct cuGLMapBufferObject_params_st { + CUdeviceptr_v1 *dptr; + unsigned int *size; + GLuint buffer; +} cuGLMapBufferObject_params; + +typedef struct cuGLMapBufferObjectAsync_params_st { + CUdeviceptr_v1 *dptr; + unsigned int *size; + GLuint buffer; + CUstream hStream; +} cuGLMapBufferObjectAsync_params; diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cudaVDPAU_meta.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cudaVDPAU_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..abc603c8d9be21e012a9b1641330c2e203d623b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cudaVDPAU_meta.h @@ -0,0 +1,46 @@ +// This file is generated. Any changes you make will be lost during the next clean build. + +// Dependent includes +#include + +// CUDA public interface, for type definitions and cu* function prototypes +#include "cudaVDPAU.h" + + +// ************************************************************************* +// Definitions of structs to hold parameters for each function +// ************************************************************************* + +typedef struct cuVDPAUGetDevice_params_st { + CUdevice *pDevice; + VdpDevice vdpDevice; + VdpGetProcAddress *vdpGetProcAddress; +} cuVDPAUGetDevice_params; + +typedef struct cuVDPAUCtxCreate_v2_params_st { + CUcontext *pCtx; + unsigned int flags; + CUdevice device; + VdpDevice vdpDevice; + VdpGetProcAddress *vdpGetProcAddress; +} cuVDPAUCtxCreate_v2_params; + +typedef struct cuGraphicsVDPAURegisterVideoSurface_params_st { + CUgraphicsResource *pCudaResource; + VdpVideoSurface vdpSurface; + unsigned int flags; +} cuGraphicsVDPAURegisterVideoSurface_params; + +typedef struct cuGraphicsVDPAURegisterOutputSurface_params_st { + CUgraphicsResource *pCudaResource; + VdpOutputSurface vdpSurface; + unsigned int flags; +} cuGraphicsVDPAURegisterOutputSurface_params; + +typedef struct cuVDPAUCtxCreate_params_st { + CUcontext *pCtx; + unsigned int flags; + CUdevice device; + VdpDevice vdpDevice; + VdpGetProcAddress *vdpGetProcAddress; +} cuVDPAUCtxCreate_params; diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_gl_interop_meta.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_gl_interop_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..eaba3ac5a760e338f1edc191609f6fa2a32adee7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_gl_interop_meta.h @@ -0,0 +1,71 @@ +// This file is generated. Any changes you make will be lost during the next clean build. + +// CUDA public interface, for type definitions and api function prototypes +#include "cuda_gl_interop.h" + +// ************************************************************************* +// Definitions of structs to hold parameters for each function +// ************************************************************************* + +// Currently used parameter trace structures +typedef struct cudaGLGetDevices_v4010_params_st { + unsigned int *pCudaDeviceCount; + int *pCudaDevices; + unsigned int cudaDeviceCount; + enum cudaGLDeviceList deviceList; +} cudaGLGetDevices_v4010_params; + +typedef struct cudaGraphicsGLRegisterImage_v3020_params_st { + struct cudaGraphicsResource **resource; + GLuint image; + GLenum target; + unsigned int flags; +} cudaGraphicsGLRegisterImage_v3020_params; + +typedef struct cudaGraphicsGLRegisterBuffer_v3020_params_st { + struct cudaGraphicsResource **resource; + GLuint buffer; + unsigned int flags; +} cudaGraphicsGLRegisterBuffer_v3020_params; + +typedef struct cudaGLSetGLDevice_v3020_params_st { + int device; +} cudaGLSetGLDevice_v3020_params; + +typedef struct cudaGLRegisterBufferObject_v3020_params_st { + GLuint bufObj; +} cudaGLRegisterBufferObject_v3020_params; + +typedef struct cudaGLMapBufferObject_v3020_params_st { + void **devPtr; + GLuint bufObj; +} cudaGLMapBufferObject_v3020_params; + +typedef struct cudaGLUnmapBufferObject_v3020_params_st { + GLuint bufObj; +} cudaGLUnmapBufferObject_v3020_params; + +typedef struct cudaGLUnregisterBufferObject_v3020_params_st { + GLuint bufObj; +} cudaGLUnregisterBufferObject_v3020_params; + +typedef struct cudaGLSetBufferObjectMapFlags_v3020_params_st { + GLuint bufObj; + unsigned int flags; +} cudaGLSetBufferObjectMapFlags_v3020_params; + +typedef struct cudaGLMapBufferObjectAsync_v3020_params_st { + void **devPtr; + GLuint bufObj; + cudaStream_t stream; +} cudaGLMapBufferObjectAsync_v3020_params; + +typedef struct cudaGLUnmapBufferObjectAsync_v3020_params_st { + GLuint bufObj; + cudaStream_t stream; +} cudaGLUnmapBufferObjectAsync_v3020_params; + +// Parameter trace structures for removed functions + + +// End of parameter trace structures diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_meta.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..c81882bbf16477a11c3fe76f274978f282da1a3a --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_meta.h @@ -0,0 +1,3293 @@ +// This file is generated. Any changes you make will be lost during the next clean build. + +// No dependent includes + +// CUDA public interface, for type definitions and cu* function prototypes +#include "cuda.h" + + +// ************************************************************************* +// Definitions of structs to hold parameters for each function +// ************************************************************************* + +typedef struct cuGetErrorString_params_st { + CUresult error; + const char **pStr; +} cuGetErrorString_params; + +typedef struct cuGetErrorName_params_st { + CUresult error; + const char **pStr; +} cuGetErrorName_params; + +typedef struct cuInit_params_st { + unsigned int Flags; +} cuInit_params; + +typedef struct cuDriverGetVersion_params_st { + int *driverVersion; +} cuDriverGetVersion_params; + +typedef struct cuDeviceGet_params_st { + CUdevice *device; + int ordinal; +} cuDeviceGet_params; + +typedef struct cuDeviceGetCount_params_st { + int *count; +} cuDeviceGetCount_params; + +typedef struct cuDeviceGetName_params_st { + char *name; + int len; + CUdevice dev; +} cuDeviceGetName_params; + +typedef struct cuDeviceGetUuid_params_st { + CUuuid *uuid; + CUdevice dev; +} cuDeviceGetUuid_params; + +typedef struct cuDeviceGetUuid_v2_params_st { + CUuuid *uuid; + CUdevice dev; +} cuDeviceGetUuid_v2_params; + +typedef struct cuDeviceGetLuid_params_st { + char *luid; + unsigned int *deviceNodeMask; + CUdevice dev; +} cuDeviceGetLuid_params; + +typedef struct cuDeviceTotalMem_v2_params_st { + size_t *bytes; + CUdevice dev; +} cuDeviceTotalMem_v2_params; + +typedef struct cuDeviceGetTexture1DLinearMaxWidth_params_st { + size_t *maxWidthInElements; + CUarray_format format; + unsigned numChannels; + CUdevice dev; +} cuDeviceGetTexture1DLinearMaxWidth_params; + +typedef struct cuDeviceGetAttribute_params_st { + int *pi; + CUdevice_attribute attrib; + CUdevice dev; +} cuDeviceGetAttribute_params; + +typedef struct cuDeviceGetNvSciSyncAttributes_params_st { + void *nvSciSyncAttrList; + CUdevice dev; + int flags; +} cuDeviceGetNvSciSyncAttributes_params; + +typedef struct cuDeviceSetMemPool_params_st { + CUdevice dev; + CUmemoryPool pool; +} cuDeviceSetMemPool_params; + +typedef struct cuDeviceGetMemPool_params_st { + CUmemoryPool *pool; + CUdevice dev; +} cuDeviceGetMemPool_params; + +typedef struct cuDeviceGetDefaultMemPool_params_st { + CUmemoryPool *pool_out; + CUdevice dev; +} cuDeviceGetDefaultMemPool_params; + +typedef struct cuDeviceGetExecAffinitySupport_params_st { + int *pi; + CUexecAffinityType type; + CUdevice dev; +} cuDeviceGetExecAffinitySupport_params; + +typedef struct cuFlushGPUDirectRDMAWrites_params_st { + CUflushGPUDirectRDMAWritesTarget target; + CUflushGPUDirectRDMAWritesScope scope; +} cuFlushGPUDirectRDMAWrites_params; + +typedef struct cuDeviceGetProperties_params_st { + CUdevprop *prop; + CUdevice dev; +} cuDeviceGetProperties_params; + +typedef struct cuDeviceComputeCapability_params_st { + int *major; + int *minor; + CUdevice dev; +} cuDeviceComputeCapability_params; + +typedef struct cuDevicePrimaryCtxRetain_params_st { + CUcontext *pctx; + CUdevice dev; +} cuDevicePrimaryCtxRetain_params; + +typedef struct cuDevicePrimaryCtxRelease_v2_params_st { + CUdevice dev; +} cuDevicePrimaryCtxRelease_v2_params; + +typedef struct cuDevicePrimaryCtxSetFlags_v2_params_st { + CUdevice dev; + unsigned int flags; +} cuDevicePrimaryCtxSetFlags_v2_params; + +typedef struct cuDevicePrimaryCtxGetState_params_st { + CUdevice dev; + unsigned int *flags; + int *active; +} cuDevicePrimaryCtxGetState_params; + +typedef struct cuDevicePrimaryCtxReset_v2_params_st { + CUdevice dev; +} cuDevicePrimaryCtxReset_v2_params; + +typedef struct cuCtxCreate_v2_params_st { + CUcontext *pctx; + unsigned int flags; + CUdevice dev; +} cuCtxCreate_v2_params; + +typedef struct cuCtxCreate_v3_params_st { + CUcontext *pctx; + CUexecAffinityParam *paramsArray; + int numParams; + unsigned int flags; + CUdevice dev; +} cuCtxCreate_v3_params; + +typedef struct cuCtxDestroy_v2_params_st { + CUcontext ctx; +} cuCtxDestroy_v2_params; + +typedef struct cuCtxPushCurrent_v2_params_st { + CUcontext ctx; +} cuCtxPushCurrent_v2_params; + +typedef struct cuCtxPopCurrent_v2_params_st { + CUcontext *pctx; +} cuCtxPopCurrent_v2_params; + +typedef struct cuCtxSetCurrent_params_st { + CUcontext ctx; +} cuCtxSetCurrent_params; + +typedef struct cuCtxGetCurrent_params_st { + CUcontext *pctx; +} cuCtxGetCurrent_params; + +typedef struct cuCtxGetDevice_params_st { + CUdevice *device; +} cuCtxGetDevice_params; + +typedef struct cuCtxGetFlags_params_st { + unsigned int *flags; +} cuCtxGetFlags_params; + +typedef struct cuCtxSetFlags_params_st { + unsigned int flags; +} cuCtxSetFlags_params; + +typedef struct cuCtxGetId_params_st { + CUcontext ctx; + unsigned long long *ctxId; +} cuCtxGetId_params; + +typedef struct cuCtxSetLimit_params_st { + CUlimit limit; + size_t value; +} cuCtxSetLimit_params; + +typedef struct cuCtxGetLimit_params_st { + size_t *pvalue; + CUlimit limit; +} cuCtxGetLimit_params; + +typedef struct cuCtxGetCacheConfig_params_st { + CUfunc_cache *pconfig; +} cuCtxGetCacheConfig_params; + +typedef struct cuCtxSetCacheConfig_params_st { + CUfunc_cache config; +} cuCtxSetCacheConfig_params; + +typedef struct cuCtxGetSharedMemConfig_params_st { + CUsharedconfig *pConfig; +} cuCtxGetSharedMemConfig_params; + +typedef struct cuCtxSetSharedMemConfig_params_st { + CUsharedconfig config; +} cuCtxSetSharedMemConfig_params; + +typedef struct cuCtxGetApiVersion_params_st { + CUcontext ctx; + unsigned int *version; +} cuCtxGetApiVersion_params; + +typedef struct cuCtxGetStreamPriorityRange_params_st { + int *leastPriority; + int *greatestPriority; +} cuCtxGetStreamPriorityRange_params; + +typedef struct cuCtxGetExecAffinity_params_st { + CUexecAffinityParam *pExecAffinity; + CUexecAffinityType type; +} cuCtxGetExecAffinity_params; + +typedef struct cuCtxAttach_params_st { + CUcontext *pctx; + unsigned int flags; +} cuCtxAttach_params; + +typedef struct cuCtxDetach_params_st { + CUcontext ctx; +} cuCtxDetach_params; + +typedef struct cuModuleLoad_params_st { + CUmodule *module; + const char *fname; +} cuModuleLoad_params; + +typedef struct cuModuleLoadData_params_st { + CUmodule *module; + const void *image; +} cuModuleLoadData_params; + +typedef struct cuModuleLoadDataEx_params_st { + CUmodule *module; + const void *image; + unsigned int numOptions; + CUjit_option *options; + void **optionValues; +} cuModuleLoadDataEx_params; + +typedef struct cuModuleLoadFatBinary_params_st { + CUmodule *module; + const void *fatCubin; +} cuModuleLoadFatBinary_params; + +typedef struct cuModuleUnload_params_st { + CUmodule hmod; +} cuModuleUnload_params; + +typedef struct cuModuleGetLoadingMode_params_st { + CUmoduleLoadingMode *mode; +} cuModuleGetLoadingMode_params; + +typedef struct cuModuleGetFunction_params_st { + CUfunction *hfunc; + CUmodule hmod; + const char *name; +} cuModuleGetFunction_params; + +typedef struct cuModuleGetGlobal_v2_params_st { + CUdeviceptr *dptr; + size_t *bytes; + CUmodule hmod; + const char *name; +} cuModuleGetGlobal_v2_params; + +typedef struct cuLinkCreate_v2_params_st { + unsigned int numOptions; + CUjit_option *options; + void **optionValues; + CUlinkState *stateOut; +} cuLinkCreate_v2_params; + +typedef struct cuLinkAddData_v2_params_st { + CUlinkState state; + CUjitInputType type; + void *data; + size_t size; + const char *name; + unsigned int numOptions; + CUjit_option *options; + void **optionValues; +} cuLinkAddData_v2_params; + +typedef struct cuLinkAddFile_v2_params_st { + CUlinkState state; + CUjitInputType type; + const char *path; + unsigned int numOptions; + CUjit_option *options; + void **optionValues; +} cuLinkAddFile_v2_params; + +typedef struct cuLinkComplete_params_st { + CUlinkState state; + void **cubinOut; + size_t *sizeOut; +} cuLinkComplete_params; + +typedef struct cuLinkDestroy_params_st { + CUlinkState state; +} cuLinkDestroy_params; + +typedef struct cuModuleGetTexRef_params_st { + CUtexref *pTexRef; + CUmodule hmod; + const char *name; +} cuModuleGetTexRef_params; + +typedef struct cuModuleGetSurfRef_params_st { + CUsurfref *pSurfRef; + CUmodule hmod; + const char *name; +} cuModuleGetSurfRef_params; + +typedef struct cuLibraryLoadData_params_st { + CUlibrary *library; + const void *code; + CUjit_option *jitOptions; + void **jitOptionsValues; + unsigned int numJitOptions; + CUlibraryOption *libraryOptions; + void **libraryOptionValues; + unsigned int numLibraryOptions; +} cuLibraryLoadData_params; + +typedef struct cuLibraryLoadFromFile_params_st { + CUlibrary *library; + const char *fileName; + CUjit_option *jitOptions; + void **jitOptionsValues; + unsigned int numJitOptions; + CUlibraryOption *libraryOptions; + void **libraryOptionValues; + unsigned int numLibraryOptions; +} cuLibraryLoadFromFile_params; + +typedef struct cuLibraryUnload_params_st { + CUlibrary library; +} cuLibraryUnload_params; + +typedef struct cuLibraryGetKernel_params_st { + CUkernel *pKernel; + CUlibrary library; + const char *name; +} cuLibraryGetKernel_params; + +typedef struct cuLibraryGetModule_params_st { + CUmodule *pMod; + CUlibrary library; +} cuLibraryGetModule_params; + +typedef struct cuKernelGetFunction_params_st { + CUfunction *pFunc; + CUkernel kernel; +} cuKernelGetFunction_params; + +typedef struct cuLibraryGetGlobal_params_st { + CUdeviceptr *dptr; + size_t *bytes; + CUlibrary library; + const char *name; +} cuLibraryGetGlobal_params; + +typedef struct cuLibraryGetManaged_params_st { + CUdeviceptr *dptr; + size_t *bytes; + CUlibrary library; + const char *name; +} cuLibraryGetManaged_params; + +typedef struct cuLibraryGetUnifiedFunction_params_st { + void **fptr; + CUlibrary library; + const char *symbol; +} cuLibraryGetUnifiedFunction_params; + +typedef struct cuKernelGetAttribute_params_st { + int *pi; + CUfunction_attribute attrib; + CUkernel kernel; + CUdevice dev; +} cuKernelGetAttribute_params; + +typedef struct cuKernelSetAttribute_params_st { + CUfunction_attribute attrib; + int val; + CUkernel kernel; + CUdevice dev; +} cuKernelSetAttribute_params; + +typedef struct cuKernelSetCacheConfig_params_st { + CUkernel kernel; + CUfunc_cache config; + CUdevice dev; +} cuKernelSetCacheConfig_params; + +typedef struct cuMemGetInfo_v2_params_st { + size_t *free; + size_t *total; +} cuMemGetInfo_v2_params; + +typedef struct cuMemAlloc_v2_params_st { + CUdeviceptr *dptr; + size_t bytesize; +} cuMemAlloc_v2_params; + +typedef struct cuMemAllocPitch_v2_params_st { + CUdeviceptr *dptr; + size_t *pPitch; + size_t WidthInBytes; + size_t Height; + unsigned int ElementSizeBytes; +} cuMemAllocPitch_v2_params; + +typedef struct cuMemFree_v2_params_st { + CUdeviceptr dptr; +} cuMemFree_v2_params; + +typedef struct cuMemGetAddressRange_v2_params_st { + CUdeviceptr *pbase; + size_t *psize; + CUdeviceptr dptr; +} cuMemGetAddressRange_v2_params; + +typedef struct cuMemAllocHost_v2_params_st { + void **pp; + size_t bytesize; +} cuMemAllocHost_v2_params; + +typedef struct cuMemFreeHost_params_st { + void *p; +} cuMemFreeHost_params; + +typedef struct cuMemHostAlloc_params_st { + void **pp; + size_t bytesize; + unsigned int Flags; +} cuMemHostAlloc_params; + +typedef struct cuMemHostGetDevicePointer_v2_params_st { + CUdeviceptr *pdptr; + void *p; + unsigned int Flags; +} cuMemHostGetDevicePointer_v2_params; + +typedef struct cuMemHostGetFlags_params_st { + unsigned int *pFlags; + void *p; +} cuMemHostGetFlags_params; + +typedef struct cuMemAllocManaged_params_st { + CUdeviceptr *dptr; + size_t bytesize; + unsigned int flags; +} cuMemAllocManaged_params; + +typedef struct cuDeviceGetByPCIBusId_params_st { + CUdevice *dev; + const char *pciBusId; +} cuDeviceGetByPCIBusId_params; + +typedef struct cuDeviceGetPCIBusId_params_st { + char *pciBusId; + int len; + CUdevice dev; +} cuDeviceGetPCIBusId_params; + +typedef struct cuIpcGetEventHandle_params_st { + CUipcEventHandle *pHandle; + CUevent event; +} cuIpcGetEventHandle_params; + +typedef struct cuIpcOpenEventHandle_params_st { + CUevent *phEvent; + CUipcEventHandle handle; +} cuIpcOpenEventHandle_params; + +typedef struct cuIpcGetMemHandle_params_st { + CUipcMemHandle *pHandle; + CUdeviceptr dptr; +} cuIpcGetMemHandle_params; + +typedef struct cuIpcOpenMemHandle_v2_params_st { + CUdeviceptr *pdptr; + CUipcMemHandle handle; + unsigned int Flags; +} cuIpcOpenMemHandle_v2_params; + +typedef struct cuIpcCloseMemHandle_params_st { + CUdeviceptr dptr; +} cuIpcCloseMemHandle_params; + +typedef struct cuMemHostRegister_v2_params_st { + void *p; + size_t bytesize; + unsigned int Flags; +} cuMemHostRegister_v2_params; + +typedef struct cuMemHostUnregister_params_st { + void *p; +} cuMemHostUnregister_params; + +typedef struct cuMemcpy_ptds_params_st { + CUdeviceptr dst; + CUdeviceptr src; + size_t ByteCount; +} cuMemcpy_ptds_params; + +typedef struct cuMemcpyPeer_ptds_params_st { + CUdeviceptr dstDevice; + CUcontext dstContext; + CUdeviceptr srcDevice; + CUcontext srcContext; + size_t ByteCount; +} cuMemcpyPeer_ptds_params; + +typedef struct cuMemcpyHtoD_v2_ptds_params_st { + CUdeviceptr dstDevice; + const void *srcHost; + size_t ByteCount; +} cuMemcpyHtoD_v2_ptds_params; + +typedef struct cuMemcpyDtoH_v2_ptds_params_st { + void *dstHost; + CUdeviceptr srcDevice; + size_t ByteCount; +} cuMemcpyDtoH_v2_ptds_params; + +typedef struct cuMemcpyDtoD_v2_ptds_params_st { + CUdeviceptr dstDevice; + CUdeviceptr srcDevice; + size_t ByteCount; +} cuMemcpyDtoD_v2_ptds_params; + +typedef struct cuMemcpyDtoA_v2_ptds_params_st { + CUarray dstArray; + size_t dstOffset; + CUdeviceptr srcDevice; + size_t ByteCount; +} cuMemcpyDtoA_v2_ptds_params; + +typedef struct cuMemcpyAtoD_v2_ptds_params_st { + CUdeviceptr dstDevice; + CUarray srcArray; + size_t srcOffset; + size_t ByteCount; +} cuMemcpyAtoD_v2_ptds_params; + +typedef struct cuMemcpyHtoA_v2_ptds_params_st { + CUarray dstArray; + size_t dstOffset; + const void *srcHost; + size_t ByteCount; +} cuMemcpyHtoA_v2_ptds_params; + +typedef struct cuMemcpyAtoH_v2_ptds_params_st { + void *dstHost; + CUarray srcArray; + size_t srcOffset; + size_t ByteCount; +} cuMemcpyAtoH_v2_ptds_params; + +typedef struct cuMemcpyAtoA_v2_ptds_params_st { + CUarray dstArray; + size_t dstOffset; + CUarray srcArray; + size_t srcOffset; + size_t ByteCount; +} cuMemcpyAtoA_v2_ptds_params; + +typedef struct cuMemcpy2D_v2_ptds_params_st { + const CUDA_MEMCPY2D *pCopy; +} cuMemcpy2D_v2_ptds_params; + +typedef struct cuMemcpy2DUnaligned_v2_ptds_params_st { + const CUDA_MEMCPY2D *pCopy; +} cuMemcpy2DUnaligned_v2_ptds_params; + +typedef struct cuMemcpy3D_v2_ptds_params_st { + const CUDA_MEMCPY3D *pCopy; +} cuMemcpy3D_v2_ptds_params; + +typedef struct cuMemcpy3DPeer_ptds_params_st { + const CUDA_MEMCPY3D_PEER *pCopy; +} cuMemcpy3DPeer_ptds_params; + +typedef struct cuMemcpyAsync_ptsz_params_st { + CUdeviceptr dst; + CUdeviceptr src; + size_t ByteCount; + CUstream hStream; +} cuMemcpyAsync_ptsz_params; + +typedef struct cuMemcpyPeerAsync_ptsz_params_st { + CUdeviceptr dstDevice; + CUcontext dstContext; + CUdeviceptr srcDevice; + CUcontext srcContext; + size_t ByteCount; + CUstream hStream; +} cuMemcpyPeerAsync_ptsz_params; + +typedef struct cuMemcpyHtoDAsync_v2_ptsz_params_st { + CUdeviceptr dstDevice; + const void *srcHost; + size_t ByteCount; + CUstream hStream; +} cuMemcpyHtoDAsync_v2_ptsz_params; + +typedef struct cuMemcpyDtoHAsync_v2_ptsz_params_st { + void *dstHost; + CUdeviceptr srcDevice; + size_t ByteCount; + CUstream hStream; +} cuMemcpyDtoHAsync_v2_ptsz_params; + +typedef struct cuMemcpyDtoDAsync_v2_ptsz_params_st { + CUdeviceptr dstDevice; + CUdeviceptr srcDevice; + size_t ByteCount; + CUstream hStream; +} cuMemcpyDtoDAsync_v2_ptsz_params; + +typedef struct cuMemcpyHtoAAsync_v2_ptsz_params_st { + CUarray dstArray; + size_t dstOffset; + const void *srcHost; + size_t ByteCount; + CUstream hStream; +} cuMemcpyHtoAAsync_v2_ptsz_params; + +typedef struct cuMemcpyAtoHAsync_v2_ptsz_params_st { + void *dstHost; + CUarray srcArray; + size_t srcOffset; + size_t ByteCount; + CUstream hStream; +} cuMemcpyAtoHAsync_v2_ptsz_params; + +typedef struct cuMemcpy2DAsync_v2_ptsz_params_st { + const CUDA_MEMCPY2D *pCopy; + CUstream hStream; +} cuMemcpy2DAsync_v2_ptsz_params; + +typedef struct cuMemcpy3DAsync_v2_ptsz_params_st { + const CUDA_MEMCPY3D *pCopy; + CUstream hStream; +} cuMemcpy3DAsync_v2_ptsz_params; + +typedef struct cuMemcpy3DPeerAsync_ptsz_params_st { + const CUDA_MEMCPY3D_PEER *pCopy; + CUstream hStream; +} cuMemcpy3DPeerAsync_ptsz_params; + +typedef struct cuMemsetD8_v2_ptds_params_st { + CUdeviceptr dstDevice; + unsigned char uc; + size_t N; +} cuMemsetD8_v2_ptds_params; + +typedef struct cuMemsetD16_v2_ptds_params_st { + CUdeviceptr dstDevice; + unsigned short us; + size_t N; +} cuMemsetD16_v2_ptds_params; + +typedef struct cuMemsetD32_v2_ptds_params_st { + CUdeviceptr dstDevice; + unsigned int ui; + size_t N; +} cuMemsetD32_v2_ptds_params; + +typedef struct cuMemsetD2D8_v2_ptds_params_st { + CUdeviceptr dstDevice; + size_t dstPitch; + unsigned char uc; + size_t Width; + size_t Height; +} cuMemsetD2D8_v2_ptds_params; + +typedef struct cuMemsetD2D16_v2_ptds_params_st { + CUdeviceptr dstDevice; + size_t dstPitch; + unsigned short us; + size_t Width; + size_t Height; +} cuMemsetD2D16_v2_ptds_params; + +typedef struct cuMemsetD2D32_v2_ptds_params_st { + CUdeviceptr dstDevice; + size_t dstPitch; + unsigned int ui; + size_t Width; + size_t Height; +} cuMemsetD2D32_v2_ptds_params; + +typedef struct cuMemsetD8Async_ptsz_params_st { + CUdeviceptr dstDevice; + unsigned char uc; + size_t N; + CUstream hStream; +} cuMemsetD8Async_ptsz_params; + +typedef struct cuMemsetD16Async_ptsz_params_st { + CUdeviceptr dstDevice; + unsigned short us; + size_t N; + CUstream hStream; +} cuMemsetD16Async_ptsz_params; + +typedef struct cuMemsetD32Async_ptsz_params_st { + CUdeviceptr dstDevice; + unsigned int ui; + size_t N; + CUstream hStream; +} cuMemsetD32Async_ptsz_params; + +typedef struct cuMemsetD2D8Async_ptsz_params_st { + CUdeviceptr dstDevice; + size_t dstPitch; + unsigned char uc; + size_t Width; + size_t Height; + CUstream hStream; +} cuMemsetD2D8Async_ptsz_params; + +typedef struct cuMemsetD2D16Async_ptsz_params_st { + CUdeviceptr dstDevice; + size_t dstPitch; + unsigned short us; + size_t Width; + size_t Height; + CUstream hStream; +} cuMemsetD2D16Async_ptsz_params; + +typedef struct cuMemsetD2D32Async_ptsz_params_st { + CUdeviceptr dstDevice; + size_t dstPitch; + unsigned int ui; + size_t Width; + size_t Height; + CUstream hStream; +} cuMemsetD2D32Async_ptsz_params; + +typedef struct cuArrayCreate_v2_params_st { + CUarray *pHandle; + const CUDA_ARRAY_DESCRIPTOR *pAllocateArray; +} cuArrayCreate_v2_params; + +typedef struct cuArrayGetDescriptor_v2_params_st { + CUDA_ARRAY_DESCRIPTOR *pArrayDescriptor; + CUarray hArray; +} cuArrayGetDescriptor_v2_params; + +typedef struct cuArrayGetSparseProperties_params_st { + CUDA_ARRAY_SPARSE_PROPERTIES *sparseProperties; + CUarray array; +} cuArrayGetSparseProperties_params; + +typedef struct cuMipmappedArrayGetSparseProperties_params_st { + CUDA_ARRAY_SPARSE_PROPERTIES *sparseProperties; + CUmipmappedArray mipmap; +} cuMipmappedArrayGetSparseProperties_params; + +typedef struct cuArrayGetMemoryRequirements_params_st { + CUDA_ARRAY_MEMORY_REQUIREMENTS *memoryRequirements; + CUarray array; + CUdevice device; +} cuArrayGetMemoryRequirements_params; + +typedef struct cuMipmappedArrayGetMemoryRequirements_params_st { + CUDA_ARRAY_MEMORY_REQUIREMENTS *memoryRequirements; + CUmipmappedArray mipmap; + CUdevice device; +} cuMipmappedArrayGetMemoryRequirements_params; + +typedef struct cuArrayGetPlane_params_st { + CUarray *pPlaneArray; + CUarray hArray; + unsigned int planeIdx; +} cuArrayGetPlane_params; + +typedef struct cuArrayDestroy_params_st { + CUarray hArray; +} cuArrayDestroy_params; + +typedef struct cuArray3DCreate_v2_params_st { + CUarray *pHandle; + const CUDA_ARRAY3D_DESCRIPTOR *pAllocateArray; +} cuArray3DCreate_v2_params; + +typedef struct cuArray3DGetDescriptor_v2_params_st { + CUDA_ARRAY3D_DESCRIPTOR *pArrayDescriptor; + CUarray hArray; +} cuArray3DGetDescriptor_v2_params; + +typedef struct cuMipmappedArrayCreate_params_st { + CUmipmappedArray *pHandle; + const CUDA_ARRAY3D_DESCRIPTOR *pMipmappedArrayDesc; + unsigned int numMipmapLevels; +} cuMipmappedArrayCreate_params; + +typedef struct cuMipmappedArrayGetLevel_params_st { + CUarray *pLevelArray; + CUmipmappedArray hMipmappedArray; + unsigned int level; +} cuMipmappedArrayGetLevel_params; + +typedef struct cuMipmappedArrayDestroy_params_st { + CUmipmappedArray hMipmappedArray; +} cuMipmappedArrayDestroy_params; + +typedef struct cuMemGetHandleForAddressRange_params_st { + void *handle; + CUdeviceptr dptr; + size_t size; + CUmemRangeHandleType handleType; + unsigned long long flags; +} cuMemGetHandleForAddressRange_params; + +typedef struct cuMemAddressReserve_params_st { + CUdeviceptr *ptr; + size_t size; + size_t alignment; + CUdeviceptr addr; + unsigned long long flags; +} cuMemAddressReserve_params; + +typedef struct cuMemAddressFree_params_st { + CUdeviceptr ptr; + size_t size; +} cuMemAddressFree_params; + +typedef struct cuMemCreate_params_st { + CUmemGenericAllocationHandle *handle; + size_t size; + const CUmemAllocationProp *prop; + unsigned long long flags; +} cuMemCreate_params; + +typedef struct cuMemRelease_params_st { + CUmemGenericAllocationHandle handle; +} cuMemRelease_params; + +typedef struct cuMemMap_params_st { + CUdeviceptr ptr; + size_t size; + size_t offset; + CUmemGenericAllocationHandle handle; + unsigned long long flags; +} cuMemMap_params; + +typedef struct cuMemMapArrayAsync_ptsz_params_st { + CUarrayMapInfo *mapInfoList; + unsigned int count; + CUstream hStream; +} cuMemMapArrayAsync_ptsz_params; + +typedef struct cuMemUnmap_params_st { + CUdeviceptr ptr; + size_t size; +} cuMemUnmap_params; + +typedef struct cuMemSetAccess_params_st { + CUdeviceptr ptr; + size_t size; + const CUmemAccessDesc *desc; + size_t count; +} cuMemSetAccess_params; + +typedef struct cuMemGetAccess_params_st { + unsigned long long *flags; + const CUmemLocation *location; + CUdeviceptr ptr; +} cuMemGetAccess_params; + +typedef struct cuMemExportToShareableHandle_params_st { + void *shareableHandle; + CUmemGenericAllocationHandle handle; + CUmemAllocationHandleType handleType; + unsigned long long flags; +} cuMemExportToShareableHandle_params; + +typedef struct cuMemImportFromShareableHandle_params_st { + CUmemGenericAllocationHandle *handle; + void *osHandle; + CUmemAllocationHandleType shHandleType; +} cuMemImportFromShareableHandle_params; + +typedef struct cuMemGetAllocationGranularity_params_st { + size_t *granularity; + const CUmemAllocationProp *prop; + CUmemAllocationGranularity_flags option; +} cuMemGetAllocationGranularity_params; + +typedef struct cuMemGetAllocationPropertiesFromHandle_params_st { + CUmemAllocationProp *prop; + CUmemGenericAllocationHandle handle; +} cuMemGetAllocationPropertiesFromHandle_params; + +typedef struct cuMemRetainAllocationHandle_params_st { + CUmemGenericAllocationHandle *handle; + void *addr; +} cuMemRetainAllocationHandle_params; + +typedef struct cuMemFreeAsync_ptsz_params_st { + CUdeviceptr dptr; + CUstream hStream; +} cuMemFreeAsync_ptsz_params; + +typedef struct cuMemAllocAsync_ptsz_params_st { + CUdeviceptr *dptr; + size_t bytesize; + CUstream hStream; +} cuMemAllocAsync_ptsz_params; + +typedef struct cuMemPoolTrimTo_params_st { + CUmemoryPool pool; + size_t minBytesToKeep; +} cuMemPoolTrimTo_params; + +typedef struct cuMemPoolSetAttribute_params_st { + CUmemoryPool pool; + CUmemPool_attribute attr; + void *value; +} cuMemPoolSetAttribute_params; + +typedef struct cuMemPoolGetAttribute_params_st { + CUmemoryPool pool; + CUmemPool_attribute attr; + void *value; +} cuMemPoolGetAttribute_params; + +typedef struct cuMemPoolSetAccess_params_st { + CUmemoryPool pool; + const CUmemAccessDesc *map; + size_t count; +} cuMemPoolSetAccess_params; + +typedef struct cuMemPoolGetAccess_params_st { + CUmemAccess_flags *flags; + CUmemoryPool memPool; + CUmemLocation *location; +} cuMemPoolGetAccess_params; + +typedef struct cuMemPoolCreate_params_st { + CUmemoryPool *pool; + const CUmemPoolProps *poolProps; +} cuMemPoolCreate_params; + +typedef struct cuMemPoolDestroy_params_st { + CUmemoryPool pool; +} cuMemPoolDestroy_params; + +typedef struct cuMemAllocFromPoolAsync_ptsz_params_st { + CUdeviceptr *dptr; + size_t bytesize; + CUmemoryPool pool; + CUstream hStream; +} cuMemAllocFromPoolAsync_ptsz_params; + +typedef struct cuMemPoolExportToShareableHandle_params_st { + void *handle_out; + CUmemoryPool pool; + CUmemAllocationHandleType handleType; + unsigned long long flags; +} cuMemPoolExportToShareableHandle_params; + +typedef struct cuMemPoolImportFromShareableHandle_params_st { + CUmemoryPool *pool_out; + void *handle; + CUmemAllocationHandleType handleType; + unsigned long long flags; +} cuMemPoolImportFromShareableHandle_params; + +typedef struct cuMemPoolExportPointer_params_st { + CUmemPoolPtrExportData *shareData_out; + CUdeviceptr ptr; +} cuMemPoolExportPointer_params; + +typedef struct cuMemPoolImportPointer_params_st { + CUdeviceptr *ptr_out; + CUmemoryPool pool; + CUmemPoolPtrExportData *shareData; +} cuMemPoolImportPointer_params; + +typedef struct cuMulticastCreate_params_st { + CUmemGenericAllocationHandle *mcHandle; + const CUmulticastObjectProp *prop; +} cuMulticastCreate_params; + +typedef struct cuMulticastAddDevice_params_st { + CUmemGenericAllocationHandle mcHandle; + CUdevice dev; +} cuMulticastAddDevice_params; + +typedef struct cuMulticastBindMem_params_st { + CUmemGenericAllocationHandle mcHandle; + size_t mcOffset; + CUmemGenericAllocationHandle memHandle; + size_t memOffset; + size_t size; + unsigned long long flags; +} cuMulticastBindMem_params; + +typedef struct cuMulticastBindAddr_params_st { + CUmemGenericAllocationHandle mcHandle; + size_t mcOffset; + CUdeviceptr memptr; + size_t size; + unsigned long long flags; +} cuMulticastBindAddr_params; + +typedef struct cuMulticastUnbind_params_st { + CUmemGenericAllocationHandle mcHandle; + CUdevice dev; + size_t mcOffset; + size_t size; +} cuMulticastUnbind_params; + +typedef struct cuMulticastGetGranularity_params_st { + size_t *granularity; + const CUmulticastObjectProp *prop; + CUmulticastGranularity_flags option; +} cuMulticastGetGranularity_params; + +typedef struct cuPointerGetAttribute_params_st { + void *data; + CUpointer_attribute attribute; + CUdeviceptr ptr; +} cuPointerGetAttribute_params; + +typedef struct cuMemPrefetchAsync_ptsz_params_st { + CUdeviceptr devPtr; + size_t count; + CUdevice dstDevice; + CUstream hStream; +} cuMemPrefetchAsync_ptsz_params; + +typedef struct cuMemAdvise_params_st { + CUdeviceptr devPtr; + size_t count; + CUmem_advise advice; + CUdevice device; +} cuMemAdvise_params; + +typedef struct cuMemRangeGetAttribute_params_st { + void *data; + size_t dataSize; + CUmem_range_attribute attribute; + CUdeviceptr devPtr; + size_t count; +} cuMemRangeGetAttribute_params; + +typedef struct cuMemRangeGetAttributes_params_st { + void **data; + size_t *dataSizes; + CUmem_range_attribute *attributes; + size_t numAttributes; + CUdeviceptr devPtr; + size_t count; +} cuMemRangeGetAttributes_params; + +typedef struct cuPointerSetAttribute_params_st { + const void *value; + CUpointer_attribute attribute; + CUdeviceptr ptr; +} cuPointerSetAttribute_params; + +typedef struct cuPointerGetAttributes_params_st { + unsigned int numAttributes; + CUpointer_attribute *attributes; + void **data; + CUdeviceptr ptr; +} cuPointerGetAttributes_params; + +typedef struct cuStreamCreate_params_st { + CUstream *phStream; + unsigned int Flags; +} cuStreamCreate_params; + +typedef struct cuStreamCreateWithPriority_params_st { + CUstream *phStream; + unsigned int flags; + int priority; +} cuStreamCreateWithPriority_params; + +typedef struct cuStreamGetPriority_ptsz_params_st { + CUstream hStream; + int *priority; +} cuStreamGetPriority_ptsz_params; + +typedef struct cuStreamGetFlags_ptsz_params_st { + CUstream hStream; + unsigned int *flags; +} cuStreamGetFlags_ptsz_params; + +typedef struct cuStreamGetId_ptsz_params_st { + CUstream hStream; + unsigned long long *streamId; +} cuStreamGetId_ptsz_params; + +typedef struct cuStreamGetCtx_ptsz_params_st { + CUstream hStream; + CUcontext *pctx; +} cuStreamGetCtx_ptsz_params; + +typedef struct cuStreamWaitEvent_ptsz_params_st { + CUstream hStream; + CUevent hEvent; + unsigned int Flags; +} cuStreamWaitEvent_ptsz_params; + +typedef struct cuStreamAddCallback_ptsz_params_st { + CUstream hStream; + CUstreamCallback callback; + void *userData; + unsigned int flags; +} cuStreamAddCallback_ptsz_params; + +typedef struct cuStreamBeginCapture_v2_ptsz_params_st { + CUstream hStream; + CUstreamCaptureMode mode; +} cuStreamBeginCapture_v2_ptsz_params; + +typedef struct cuThreadExchangeStreamCaptureMode_params_st { + CUstreamCaptureMode *mode; +} cuThreadExchangeStreamCaptureMode_params; + +typedef struct cuStreamEndCapture_ptsz_params_st { + CUstream hStream; + CUgraph *phGraph; +} cuStreamEndCapture_ptsz_params; + +typedef struct cuStreamIsCapturing_ptsz_params_st { + CUstream hStream; + CUstreamCaptureStatus *captureStatus; +} cuStreamIsCapturing_ptsz_params; + +typedef struct cuStreamGetCaptureInfo_v2_ptsz_params_st { + CUstream hStream; + CUstreamCaptureStatus *captureStatus_out; + cuuint64_t *id_out; + CUgraph *graph_out; + const CUgraphNode **dependencies_out; + size_t *numDependencies_out; +} cuStreamGetCaptureInfo_v2_ptsz_params; + +typedef struct cuStreamUpdateCaptureDependencies_ptsz_params_st { + CUstream hStream; + CUgraphNode *dependencies; + size_t numDependencies; + unsigned int flags; +} cuStreamUpdateCaptureDependencies_ptsz_params; + +typedef struct cuStreamAttachMemAsync_ptsz_params_st { + CUstream hStream; + CUdeviceptr dptr; + size_t length; + unsigned int flags; +} cuStreamAttachMemAsync_ptsz_params; + +typedef struct cuStreamQuery_ptsz_params_st { + CUstream hStream; +} cuStreamQuery_ptsz_params; + +typedef struct cuStreamSynchronize_ptsz_params_st { + CUstream hStream; +} cuStreamSynchronize_ptsz_params; + +typedef struct cuStreamDestroy_v2_params_st { + CUstream hStream; +} cuStreamDestroy_v2_params; + +typedef struct cuStreamCopyAttributes_ptsz_params_st { + CUstream dst; + CUstream src; +} cuStreamCopyAttributes_ptsz_params; + +typedef struct cuStreamGetAttribute_ptsz_params_st { + CUstream hStream; + CUstreamAttrID attr; + CUstreamAttrValue *value_out; +} cuStreamGetAttribute_ptsz_params; + +typedef struct cuStreamSetAttribute_ptsz_params_st { + CUstream hStream; + CUstreamAttrID attr; + const CUstreamAttrValue *value; +} cuStreamSetAttribute_ptsz_params; + +typedef struct cuEventCreate_params_st { + CUevent *phEvent; + unsigned int Flags; +} cuEventCreate_params; + +typedef struct cuEventRecord_ptsz_params_st { + CUevent hEvent; + CUstream hStream; +} cuEventRecord_ptsz_params; + +typedef struct cuEventRecordWithFlags_ptsz_params_st { + CUevent hEvent; + CUstream hStream; + unsigned int flags; +} cuEventRecordWithFlags_ptsz_params; + +typedef struct cuEventQuery_params_st { + CUevent hEvent; +} cuEventQuery_params; + +typedef struct cuEventSynchronize_params_st { + CUevent hEvent; +} cuEventSynchronize_params; + +typedef struct cuEventDestroy_v2_params_st { + CUevent hEvent; +} cuEventDestroy_v2_params; + +typedef struct cuEventElapsedTime_params_st { + float *pMilliseconds; + CUevent hStart; + CUevent hEnd; +} cuEventElapsedTime_params; + +typedef struct cuImportExternalMemory_params_st { + CUexternalMemory *extMem_out; + const CUDA_EXTERNAL_MEMORY_HANDLE_DESC *memHandleDesc; +} cuImportExternalMemory_params; + +typedef struct cuExternalMemoryGetMappedBuffer_params_st { + CUdeviceptr *devPtr; + CUexternalMemory extMem; + const CUDA_EXTERNAL_MEMORY_BUFFER_DESC *bufferDesc; +} cuExternalMemoryGetMappedBuffer_params; + +typedef struct cuExternalMemoryGetMappedMipmappedArray_params_st { + CUmipmappedArray *mipmap; + CUexternalMemory extMem; + const CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC *mipmapDesc; +} cuExternalMemoryGetMappedMipmappedArray_params; + +typedef struct cuDestroyExternalMemory_params_st { + CUexternalMemory extMem; +} cuDestroyExternalMemory_params; + +typedef struct cuImportExternalSemaphore_params_st { + CUexternalSemaphore *extSem_out; + const CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC *semHandleDesc; +} cuImportExternalSemaphore_params; + +typedef struct cuSignalExternalSemaphoresAsync_ptsz_params_st { + const CUexternalSemaphore *extSemArray; + const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS *paramsArray; + unsigned int numExtSems; + CUstream stream; +} cuSignalExternalSemaphoresAsync_ptsz_params; + +typedef struct cuWaitExternalSemaphoresAsync_ptsz_params_st { + const CUexternalSemaphore *extSemArray; + const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS *paramsArray; + unsigned int numExtSems; + CUstream stream; +} cuWaitExternalSemaphoresAsync_ptsz_params; + +typedef struct cuDestroyExternalSemaphore_params_st { + CUexternalSemaphore extSem; +} cuDestroyExternalSemaphore_params; + +typedef struct cuStreamWaitValue32_v2_ptsz_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint32_t value; + unsigned int flags; +} cuStreamWaitValue32_v2_ptsz_params; + +typedef struct cuStreamWaitValue64_v2_ptsz_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint64_t value; + unsigned int flags; +} cuStreamWaitValue64_v2_ptsz_params; + +typedef struct cuStreamWriteValue32_v2_ptsz_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint32_t value; + unsigned int flags; +} cuStreamWriteValue32_v2_ptsz_params; + +typedef struct cuStreamWriteValue64_v2_ptsz_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint64_t value; + unsigned int flags; +} cuStreamWriteValue64_v2_ptsz_params; + +typedef struct cuStreamBatchMemOp_v2_ptsz_params_st { + CUstream stream; + unsigned int count; + CUstreamBatchMemOpParams *paramArray; + unsigned int flags; +} cuStreamBatchMemOp_v2_ptsz_params; + +typedef struct cuFuncGetAttribute_params_st { + int *pi; + CUfunction_attribute attrib; + CUfunction hfunc; +} cuFuncGetAttribute_params; + +typedef struct cuFuncSetAttribute_params_st { + CUfunction hfunc; + CUfunction_attribute attrib; + int value; +} cuFuncSetAttribute_params; + +typedef struct cuFuncSetCacheConfig_params_st { + CUfunction hfunc; + CUfunc_cache config; +} cuFuncSetCacheConfig_params; + +typedef struct cuFuncSetSharedMemConfig_params_st { + CUfunction hfunc; + CUsharedconfig config; +} cuFuncSetSharedMemConfig_params; + +typedef struct cuFuncGetModule_params_st { + CUmodule *hmod; + CUfunction hfunc; +} cuFuncGetModule_params; + +typedef struct cuLaunchKernel_ptsz_params_st { + CUfunction f; + unsigned int gridDimX; + unsigned int gridDimY; + unsigned int gridDimZ; + unsigned int blockDimX; + unsigned int blockDimY; + unsigned int blockDimZ; + unsigned int sharedMemBytes; + CUstream hStream; + void **kernelParams; + void **extra; +} cuLaunchKernel_ptsz_params; + +typedef struct cuLaunchKernelEx_ptsz_params_st { + const CUlaunchConfig *config; + CUfunction f; + void **kernelParams; + void **extra; +} cuLaunchKernelEx_ptsz_params; + +typedef struct cuLaunchCooperativeKernel_ptsz_params_st { + CUfunction f; + unsigned int gridDimX; + unsigned int gridDimY; + unsigned int gridDimZ; + unsigned int blockDimX; + unsigned int blockDimY; + unsigned int blockDimZ; + unsigned int sharedMemBytes; + CUstream hStream; + void **kernelParams; +} cuLaunchCooperativeKernel_ptsz_params; + +typedef struct cuLaunchCooperativeKernelMultiDevice_params_st { + CUDA_LAUNCH_PARAMS *launchParamsList; + unsigned int numDevices; + unsigned int flags; +} cuLaunchCooperativeKernelMultiDevice_params; + +typedef struct cuLaunchHostFunc_ptsz_params_st { + CUstream hStream; + CUhostFn fn; + void *userData; +} cuLaunchHostFunc_ptsz_params; + +typedef struct cuFuncSetBlockShape_params_st { + CUfunction hfunc; + int x; + int y; + int z; +} cuFuncSetBlockShape_params; + +typedef struct cuFuncSetSharedSize_params_st { + CUfunction hfunc; + unsigned int bytes; +} cuFuncSetSharedSize_params; + +typedef struct cuParamSetSize_params_st { + CUfunction hfunc; + unsigned int numbytes; +} cuParamSetSize_params; + +typedef struct cuParamSeti_params_st { + CUfunction hfunc; + int offset; + unsigned int value; +} cuParamSeti_params; + +typedef struct cuParamSetf_params_st { + CUfunction hfunc; + int offset; + float value; +} cuParamSetf_params; + +typedef struct cuParamSetv_params_st { + CUfunction hfunc; + int offset; + void *ptr; + unsigned int numbytes; +} cuParamSetv_params; + +typedef struct cuLaunch_params_st { + CUfunction f; +} cuLaunch_params; + +typedef struct cuLaunchGrid_params_st { + CUfunction f; + int grid_width; + int grid_height; +} cuLaunchGrid_params; + +typedef struct cuLaunchGridAsync_params_st { + CUfunction f; + int grid_width; + int grid_height; + CUstream hStream; +} cuLaunchGridAsync_params; + +typedef struct cuParamSetTexRef_params_st { + CUfunction hfunc; + int texunit; + CUtexref hTexRef; +} cuParamSetTexRef_params; + +typedef struct cuGraphCreate_params_st { + CUgraph *phGraph; + unsigned int flags; +} cuGraphCreate_params; + +typedef struct cuGraphAddKernelNode_v2_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + const CUDA_KERNEL_NODE_PARAMS *nodeParams; +} cuGraphAddKernelNode_v2_params; + +typedef struct cuGraphKernelNodeGetParams_v2_params_st { + CUgraphNode hNode; + CUDA_KERNEL_NODE_PARAMS *nodeParams; +} cuGraphKernelNodeGetParams_v2_params; + +typedef struct cuGraphKernelNodeSetParams_v2_params_st { + CUgraphNode hNode; + const CUDA_KERNEL_NODE_PARAMS *nodeParams; +} cuGraphKernelNodeSetParams_v2_params; + +typedef struct cuGraphAddMemcpyNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + const CUDA_MEMCPY3D *copyParams; + CUcontext ctx; +} cuGraphAddMemcpyNode_params; + +typedef struct cuGraphMemcpyNodeGetParams_params_st { + CUgraphNode hNode; + CUDA_MEMCPY3D *nodeParams; +} cuGraphMemcpyNodeGetParams_params; + +typedef struct cuGraphMemcpyNodeSetParams_params_st { + CUgraphNode hNode; + const CUDA_MEMCPY3D *nodeParams; +} cuGraphMemcpyNodeSetParams_params; + +typedef struct cuGraphAddMemsetNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + const CUDA_MEMSET_NODE_PARAMS *memsetParams; + CUcontext ctx; +} cuGraphAddMemsetNode_params; + +typedef struct cuGraphMemsetNodeGetParams_params_st { + CUgraphNode hNode; + CUDA_MEMSET_NODE_PARAMS *nodeParams; +} cuGraphMemsetNodeGetParams_params; + +typedef struct cuGraphMemsetNodeSetParams_params_st { + CUgraphNode hNode; + const CUDA_MEMSET_NODE_PARAMS *nodeParams; +} cuGraphMemsetNodeSetParams_params; + +typedef struct cuGraphAddHostNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + const CUDA_HOST_NODE_PARAMS *nodeParams; +} cuGraphAddHostNode_params; + +typedef struct cuGraphHostNodeGetParams_params_st { + CUgraphNode hNode; + CUDA_HOST_NODE_PARAMS *nodeParams; +} cuGraphHostNodeGetParams_params; + +typedef struct cuGraphHostNodeSetParams_params_st { + CUgraphNode hNode; + const CUDA_HOST_NODE_PARAMS *nodeParams; +} cuGraphHostNodeSetParams_params; + +typedef struct cuGraphAddChildGraphNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + CUgraph childGraph; +} cuGraphAddChildGraphNode_params; + +typedef struct cuGraphChildGraphNodeGetGraph_params_st { + CUgraphNode hNode; + CUgraph *phGraph; +} cuGraphChildGraphNodeGetGraph_params; + +typedef struct cuGraphAddEmptyNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; +} cuGraphAddEmptyNode_params; + +typedef struct cuGraphAddEventRecordNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + CUevent event; +} cuGraphAddEventRecordNode_params; + +typedef struct cuGraphEventRecordNodeGetEvent_params_st { + CUgraphNode hNode; + CUevent *event_out; +} cuGraphEventRecordNodeGetEvent_params; + +typedef struct cuGraphEventRecordNodeSetEvent_params_st { + CUgraphNode hNode; + CUevent event; +} cuGraphEventRecordNodeSetEvent_params; + +typedef struct cuGraphAddEventWaitNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + CUevent event; +} cuGraphAddEventWaitNode_params; + +typedef struct cuGraphEventWaitNodeGetEvent_params_st { + CUgraphNode hNode; + CUevent *event_out; +} cuGraphEventWaitNodeGetEvent_params; + +typedef struct cuGraphEventWaitNodeSetEvent_params_st { + CUgraphNode hNode; + CUevent event; +} cuGraphEventWaitNodeSetEvent_params; + +typedef struct cuGraphAddExternalSemaphoresSignalNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS *nodeParams; +} cuGraphAddExternalSemaphoresSignalNode_params; + +typedef struct cuGraphExternalSemaphoresSignalNodeGetParams_params_st { + CUgraphNode hNode; + CUDA_EXT_SEM_SIGNAL_NODE_PARAMS *params_out; +} cuGraphExternalSemaphoresSignalNodeGetParams_params; + +typedef struct cuGraphExternalSemaphoresSignalNodeSetParams_params_st { + CUgraphNode hNode; + const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS *nodeParams; +} cuGraphExternalSemaphoresSignalNodeSetParams_params; + +typedef struct cuGraphAddExternalSemaphoresWaitNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + const CUDA_EXT_SEM_WAIT_NODE_PARAMS *nodeParams; +} cuGraphAddExternalSemaphoresWaitNode_params; + +typedef struct cuGraphExternalSemaphoresWaitNodeGetParams_params_st { + CUgraphNode hNode; + CUDA_EXT_SEM_WAIT_NODE_PARAMS *params_out; +} cuGraphExternalSemaphoresWaitNodeGetParams_params; + +typedef struct cuGraphExternalSemaphoresWaitNodeSetParams_params_st { + CUgraphNode hNode; + const CUDA_EXT_SEM_WAIT_NODE_PARAMS *nodeParams; +} cuGraphExternalSemaphoresWaitNodeSetParams_params; + +typedef struct cuGraphAddBatchMemOpNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + const CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams; +} cuGraphAddBatchMemOpNode_params; + +typedef struct cuGraphBatchMemOpNodeGetParams_params_st { + CUgraphNode hNode; + CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams_out; +} cuGraphBatchMemOpNodeGetParams_params; + +typedef struct cuGraphBatchMemOpNodeSetParams_params_st { + CUgraphNode hNode; + const CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams; +} cuGraphBatchMemOpNodeSetParams_params; + +typedef struct cuGraphExecBatchMemOpNodeSetParams_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + const CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams; +} cuGraphExecBatchMemOpNodeSetParams_params; + +typedef struct cuGraphAddMemAllocNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + CUDA_MEM_ALLOC_NODE_PARAMS *nodeParams; +} cuGraphAddMemAllocNode_params; + +typedef struct cuGraphMemAllocNodeGetParams_params_st { + CUgraphNode hNode; + CUDA_MEM_ALLOC_NODE_PARAMS *params_out; +} cuGraphMemAllocNodeGetParams_params; + +typedef struct cuGraphAddMemFreeNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + CUdeviceptr dptr; +} cuGraphAddMemFreeNode_params; + +typedef struct cuGraphMemFreeNodeGetParams_params_st { + CUgraphNode hNode; + CUdeviceptr *dptr_out; +} cuGraphMemFreeNodeGetParams_params; + +typedef struct cuDeviceGraphMemTrim_params_st { + CUdevice device; +} cuDeviceGraphMemTrim_params; + +typedef struct cuDeviceGetGraphMemAttribute_params_st { + CUdevice device; + CUgraphMem_attribute attr; + void *value; +} cuDeviceGetGraphMemAttribute_params; + +typedef struct cuDeviceSetGraphMemAttribute_params_st { + CUdevice device; + CUgraphMem_attribute attr; + void *value; +} cuDeviceSetGraphMemAttribute_params; + +typedef struct cuGraphClone_params_st { + CUgraph *phGraphClone; + CUgraph originalGraph; +} cuGraphClone_params; + +typedef struct cuGraphNodeFindInClone_params_st { + CUgraphNode *phNode; + CUgraphNode hOriginalNode; + CUgraph hClonedGraph; +} cuGraphNodeFindInClone_params; + +typedef struct cuGraphNodeGetType_params_st { + CUgraphNode hNode; + CUgraphNodeType *type; +} cuGraphNodeGetType_params; + +typedef struct cuGraphGetNodes_params_st { + CUgraph hGraph; + CUgraphNode *nodes; + size_t *numNodes; +} cuGraphGetNodes_params; + +typedef struct cuGraphGetRootNodes_params_st { + CUgraph hGraph; + CUgraphNode *rootNodes; + size_t *numRootNodes; +} cuGraphGetRootNodes_params; + +typedef struct cuGraphGetEdges_params_st { + CUgraph hGraph; + CUgraphNode *from; + CUgraphNode *to; + size_t *numEdges; +} cuGraphGetEdges_params; + +typedef struct cuGraphNodeGetDependencies_params_st { + CUgraphNode hNode; + CUgraphNode *dependencies; + size_t *numDependencies; +} cuGraphNodeGetDependencies_params; + +typedef struct cuGraphNodeGetDependentNodes_params_st { + CUgraphNode hNode; + CUgraphNode *dependentNodes; + size_t *numDependentNodes; +} cuGraphNodeGetDependentNodes_params; + +typedef struct cuGraphAddDependencies_params_st { + CUgraph hGraph; + const CUgraphNode *from; + const CUgraphNode *to; + size_t numDependencies; +} cuGraphAddDependencies_params; + +typedef struct cuGraphRemoveDependencies_params_st { + CUgraph hGraph; + const CUgraphNode *from; + const CUgraphNode *to; + size_t numDependencies; +} cuGraphRemoveDependencies_params; + +typedef struct cuGraphDestroyNode_params_st { + CUgraphNode hNode; +} cuGraphDestroyNode_params; + +typedef struct cuGraphInstantiateWithFlags_params_st { + CUgraphExec *phGraphExec; + CUgraph hGraph; + unsigned long long flags; +} cuGraphInstantiateWithFlags_params; + +typedef struct cuGraphInstantiateWithParams_ptsz_params_st { + CUgraphExec *phGraphExec; + CUgraph hGraph; + CUDA_GRAPH_INSTANTIATE_PARAMS *instantiateParams; +} cuGraphInstantiateWithParams_ptsz_params; + +typedef struct cuGraphExecGetFlags_params_st { + CUgraphExec hGraphExec; + cuuint64_t *flags; +} cuGraphExecGetFlags_params; + +typedef struct cuGraphExecKernelNodeSetParams_v2_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + const CUDA_KERNEL_NODE_PARAMS *nodeParams; +} cuGraphExecKernelNodeSetParams_v2_params; + +typedef struct cuGraphExecMemcpyNodeSetParams_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + const CUDA_MEMCPY3D *copyParams; + CUcontext ctx; +} cuGraphExecMemcpyNodeSetParams_params; + +typedef struct cuGraphExecMemsetNodeSetParams_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + const CUDA_MEMSET_NODE_PARAMS *memsetParams; + CUcontext ctx; +} cuGraphExecMemsetNodeSetParams_params; + +typedef struct cuGraphExecHostNodeSetParams_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + const CUDA_HOST_NODE_PARAMS *nodeParams; +} cuGraphExecHostNodeSetParams_params; + +typedef struct cuGraphExecChildGraphNodeSetParams_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + CUgraph childGraph; +} cuGraphExecChildGraphNodeSetParams_params; + +typedef struct cuGraphExecEventRecordNodeSetEvent_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + CUevent event; +} cuGraphExecEventRecordNodeSetEvent_params; + +typedef struct cuGraphExecEventWaitNodeSetEvent_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + CUevent event; +} cuGraphExecEventWaitNodeSetEvent_params; + +typedef struct cuGraphExecExternalSemaphoresSignalNodeSetParams_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS *nodeParams; +} cuGraphExecExternalSemaphoresSignalNodeSetParams_params; + +typedef struct cuGraphExecExternalSemaphoresWaitNodeSetParams_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + const CUDA_EXT_SEM_WAIT_NODE_PARAMS *nodeParams; +} cuGraphExecExternalSemaphoresWaitNodeSetParams_params; + +typedef struct cuGraphNodeSetEnabled_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + unsigned int isEnabled; +} cuGraphNodeSetEnabled_params; + +typedef struct cuGraphNodeGetEnabled_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + unsigned int *isEnabled; +} cuGraphNodeGetEnabled_params; + +typedef struct cuGraphUpload_ptsz_params_st { + CUgraphExec hGraphExec; + CUstream hStream; +} cuGraphUpload_ptsz_params; + +typedef struct cuGraphLaunch_ptsz_params_st { + CUgraphExec hGraphExec; + CUstream hStream; +} cuGraphLaunch_ptsz_params; + +typedef struct cuGraphExecDestroy_params_st { + CUgraphExec hGraphExec; +} cuGraphExecDestroy_params; + +typedef struct cuGraphDestroy_params_st { + CUgraph hGraph; +} cuGraphDestroy_params; + +typedef struct cuGraphExecUpdate_v2_params_st { + CUgraphExec hGraphExec; + CUgraph hGraph; + CUgraphExecUpdateResultInfo *resultInfo; +} cuGraphExecUpdate_v2_params; + +typedef struct cuGraphKernelNodeCopyAttributes_params_st { + CUgraphNode dst; + CUgraphNode src; +} cuGraphKernelNodeCopyAttributes_params; + +typedef struct cuGraphKernelNodeGetAttribute_params_st { + CUgraphNode hNode; + CUkernelNodeAttrID attr; + CUkernelNodeAttrValue *value_out; +} cuGraphKernelNodeGetAttribute_params; + +typedef struct cuGraphKernelNodeSetAttribute_params_st { + CUgraphNode hNode; + CUkernelNodeAttrID attr; + const CUkernelNodeAttrValue *value; +} cuGraphKernelNodeSetAttribute_params; + +typedef struct cuGraphDebugDotPrint_params_st { + CUgraph hGraph; + const char *path; + unsigned int flags; +} cuGraphDebugDotPrint_params; + +typedef struct cuUserObjectCreate_params_st { + CUuserObject *object_out; + void *ptr; + CUhostFn destroy; + unsigned int initialRefcount; + unsigned int flags; +} cuUserObjectCreate_params; + +typedef struct cuUserObjectRetain_params_st { + CUuserObject object; + unsigned int count; +} cuUserObjectRetain_params; + +typedef struct cuUserObjectRelease_params_st { + CUuserObject object; + unsigned int count; +} cuUserObjectRelease_params; + +typedef struct cuGraphRetainUserObject_params_st { + CUgraph graph; + CUuserObject object; + unsigned int count; + unsigned int flags; +} cuGraphRetainUserObject_params; + +typedef struct cuGraphReleaseUserObject_params_st { + CUgraph graph; + CUuserObject object; + unsigned int count; +} cuGraphReleaseUserObject_params; + +typedef struct cuOccupancyMaxActiveBlocksPerMultiprocessor_params_st { + int *numBlocks; + CUfunction func; + int blockSize; + size_t dynamicSMemSize; +} cuOccupancyMaxActiveBlocksPerMultiprocessor_params; + +typedef struct cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_params_st { + int *numBlocks; + CUfunction func; + int blockSize; + size_t dynamicSMemSize; + unsigned int flags; +} cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_params; + +typedef struct cuOccupancyMaxPotentialBlockSize_params_st { + int *minGridSize; + int *blockSize; + CUfunction func; + CUoccupancyB2DSize blockSizeToDynamicSMemSize; + size_t dynamicSMemSize; + int blockSizeLimit; +} cuOccupancyMaxPotentialBlockSize_params; + +typedef struct cuOccupancyMaxPotentialBlockSizeWithFlags_params_st { + int *minGridSize; + int *blockSize; + CUfunction func; + CUoccupancyB2DSize blockSizeToDynamicSMemSize; + size_t dynamicSMemSize; + int blockSizeLimit; + unsigned int flags; +} cuOccupancyMaxPotentialBlockSizeWithFlags_params; + +typedef struct cuOccupancyAvailableDynamicSMemPerBlock_params_st { + size_t *dynamicSmemSize; + CUfunction func; + int numBlocks; + int blockSize; +} cuOccupancyAvailableDynamicSMemPerBlock_params; + +typedef struct cuOccupancyMaxPotentialClusterSize_params_st { + int *clusterSize; + CUfunction func; + const CUlaunchConfig *config; +} cuOccupancyMaxPotentialClusterSize_params; + +typedef struct cuOccupancyMaxActiveClusters_params_st { + int *numClusters; + CUfunction func; + const CUlaunchConfig *config; +} cuOccupancyMaxActiveClusters_params; + +typedef struct cuTexRefSetArray_params_st { + CUtexref hTexRef; + CUarray hArray; + unsigned int Flags; +} cuTexRefSetArray_params; + +typedef struct cuTexRefSetMipmappedArray_params_st { + CUtexref hTexRef; + CUmipmappedArray hMipmappedArray; + unsigned int Flags; +} cuTexRefSetMipmappedArray_params; + +typedef struct cuTexRefSetAddress_v2_params_st { + size_t *ByteOffset; + CUtexref hTexRef; + CUdeviceptr dptr; + size_t bytes; +} cuTexRefSetAddress_v2_params; + +typedef struct cuTexRefSetAddress2D_v3_params_st { + CUtexref hTexRef; + const CUDA_ARRAY_DESCRIPTOR *desc; + CUdeviceptr dptr; + size_t Pitch; +} cuTexRefSetAddress2D_v3_params; + +typedef struct cuTexRefSetFormat_params_st { + CUtexref hTexRef; + CUarray_format fmt; + int NumPackedComponents; +} cuTexRefSetFormat_params; + +typedef struct cuTexRefSetAddressMode_params_st { + CUtexref hTexRef; + int dim; + CUaddress_mode am; +} cuTexRefSetAddressMode_params; + +typedef struct cuTexRefSetFilterMode_params_st { + CUtexref hTexRef; + CUfilter_mode fm; +} cuTexRefSetFilterMode_params; + +typedef struct cuTexRefSetMipmapFilterMode_params_st { + CUtexref hTexRef; + CUfilter_mode fm; +} cuTexRefSetMipmapFilterMode_params; + +typedef struct cuTexRefSetMipmapLevelBias_params_st { + CUtexref hTexRef; + float bias; +} cuTexRefSetMipmapLevelBias_params; + +typedef struct cuTexRefSetMipmapLevelClamp_params_st { + CUtexref hTexRef; + float minMipmapLevelClamp; + float maxMipmapLevelClamp; +} cuTexRefSetMipmapLevelClamp_params; + +typedef struct cuTexRefSetMaxAnisotropy_params_st { + CUtexref hTexRef; + unsigned int maxAniso; +} cuTexRefSetMaxAnisotropy_params; + +typedef struct cuTexRefSetBorderColor_params_st { + CUtexref hTexRef; + float *pBorderColor; +} cuTexRefSetBorderColor_params; + +typedef struct cuTexRefSetFlags_params_st { + CUtexref hTexRef; + unsigned int Flags; +} cuTexRefSetFlags_params; + +typedef struct cuTexRefGetAddress_v2_params_st { + CUdeviceptr *pdptr; + CUtexref hTexRef; +} cuTexRefGetAddress_v2_params; + +typedef struct cuTexRefGetArray_params_st { + CUarray *phArray; + CUtexref hTexRef; +} cuTexRefGetArray_params; + +typedef struct cuTexRefGetMipmappedArray_params_st { + CUmipmappedArray *phMipmappedArray; + CUtexref hTexRef; +} cuTexRefGetMipmappedArray_params; + +typedef struct cuTexRefGetAddressMode_params_st { + CUaddress_mode *pam; + CUtexref hTexRef; + int dim; +} cuTexRefGetAddressMode_params; + +typedef struct cuTexRefGetFilterMode_params_st { + CUfilter_mode *pfm; + CUtexref hTexRef; +} cuTexRefGetFilterMode_params; + +typedef struct cuTexRefGetFormat_params_st { + CUarray_format *pFormat; + int *pNumChannels; + CUtexref hTexRef; +} cuTexRefGetFormat_params; + +typedef struct cuTexRefGetMipmapFilterMode_params_st { + CUfilter_mode *pfm; + CUtexref hTexRef; +} cuTexRefGetMipmapFilterMode_params; + +typedef struct cuTexRefGetMipmapLevelBias_params_st { + float *pbias; + CUtexref hTexRef; +} cuTexRefGetMipmapLevelBias_params; + +typedef struct cuTexRefGetMipmapLevelClamp_params_st { + float *pminMipmapLevelClamp; + float *pmaxMipmapLevelClamp; + CUtexref hTexRef; +} cuTexRefGetMipmapLevelClamp_params; + +typedef struct cuTexRefGetMaxAnisotropy_params_st { + int *pmaxAniso; + CUtexref hTexRef; +} cuTexRefGetMaxAnisotropy_params; + +typedef struct cuTexRefGetBorderColor_params_st { + float *pBorderColor; + CUtexref hTexRef; +} cuTexRefGetBorderColor_params; + +typedef struct cuTexRefGetFlags_params_st { + unsigned int *pFlags; + CUtexref hTexRef; +} cuTexRefGetFlags_params; + +typedef struct cuTexRefCreate_params_st { + CUtexref *pTexRef; +} cuTexRefCreate_params; + +typedef struct cuTexRefDestroy_params_st { + CUtexref hTexRef; +} cuTexRefDestroy_params; + +typedef struct cuSurfRefSetArray_params_st { + CUsurfref hSurfRef; + CUarray hArray; + unsigned int Flags; +} cuSurfRefSetArray_params; + +typedef struct cuSurfRefGetArray_params_st { + CUarray *phArray; + CUsurfref hSurfRef; +} cuSurfRefGetArray_params; + +typedef struct cuTexObjectCreate_params_st { + CUtexObject *pTexObject; + const CUDA_RESOURCE_DESC *pResDesc; + const CUDA_TEXTURE_DESC *pTexDesc; + const CUDA_RESOURCE_VIEW_DESC *pResViewDesc; +} cuTexObjectCreate_params; + +typedef struct cuTexObjectDestroy_params_st { + CUtexObject texObject; +} cuTexObjectDestroy_params; + +typedef struct cuTexObjectGetResourceDesc_params_st { + CUDA_RESOURCE_DESC *pResDesc; + CUtexObject texObject; +} cuTexObjectGetResourceDesc_params; + +typedef struct cuTexObjectGetTextureDesc_params_st { + CUDA_TEXTURE_DESC *pTexDesc; + CUtexObject texObject; +} cuTexObjectGetTextureDesc_params; + +typedef struct cuTexObjectGetResourceViewDesc_params_st { + CUDA_RESOURCE_VIEW_DESC *pResViewDesc; + CUtexObject texObject; +} cuTexObjectGetResourceViewDesc_params; + +typedef struct cuSurfObjectCreate_params_st { + CUsurfObject *pSurfObject; + const CUDA_RESOURCE_DESC *pResDesc; +} cuSurfObjectCreate_params; + +typedef struct cuSurfObjectDestroy_params_st { + CUsurfObject surfObject; +} cuSurfObjectDestroy_params; + +typedef struct cuSurfObjectGetResourceDesc_params_st { + CUDA_RESOURCE_DESC *pResDesc; + CUsurfObject surfObject; +} cuSurfObjectGetResourceDesc_params; + +typedef struct cuTensorMapEncodeTiled_params_st { + CUtensorMap *tensorMap; + CUtensorMapDataType tensorDataType; + cuuint32_t tensorRank; + void *globalAddress; + const cuuint64_t *globalDim; + const cuuint64_t *globalStrides; + const cuuint32_t *boxDim; + const cuuint32_t *elementStrides; + CUtensorMapInterleave interleave; + CUtensorMapSwizzle swizzle; + CUtensorMapL2promotion l2Promotion; + CUtensorMapFloatOOBfill oobFill; +} cuTensorMapEncodeTiled_params; + +typedef struct cuTensorMapEncodeIm2col_params_st { + CUtensorMap *tensorMap; + CUtensorMapDataType tensorDataType; + cuuint32_t tensorRank; + void *globalAddress; + const cuuint64_t *globalDim; + const cuuint64_t *globalStrides; + const int *pixelBoxLowerCorner; + const int *pixelBoxUpperCorner; + cuuint32_t channelsPerPixel; + cuuint32_t pixelsPerColumn; + const cuuint32_t *elementStrides; + CUtensorMapInterleave interleave; + CUtensorMapSwizzle swizzle; + CUtensorMapL2promotion l2Promotion; + CUtensorMapFloatOOBfill oobFill; +} cuTensorMapEncodeIm2col_params; + +typedef struct cuTensorMapReplaceAddress_params_st { + CUtensorMap *tensorMap; + void *globalAddress; +} cuTensorMapReplaceAddress_params; + +typedef struct cuDeviceCanAccessPeer_params_st { + int *canAccessPeer; + CUdevice dev; + CUdevice peerDev; +} cuDeviceCanAccessPeer_params; + +typedef struct cuCtxEnablePeerAccess_params_st { + CUcontext peerContext; + unsigned int Flags; +} cuCtxEnablePeerAccess_params; + +typedef struct cuCtxDisablePeerAccess_params_st { + CUcontext peerContext; +} cuCtxDisablePeerAccess_params; + +typedef struct cuDeviceGetP2PAttribute_params_st { + int *value; + CUdevice_P2PAttribute attrib; + CUdevice srcDevice; + CUdevice dstDevice; +} cuDeviceGetP2PAttribute_params; + +typedef struct cuGraphicsUnregisterResource_params_st { + CUgraphicsResource resource; +} cuGraphicsUnregisterResource_params; + +typedef struct cuGraphicsSubResourceGetMappedArray_params_st { + CUarray *pArray; + CUgraphicsResource resource; + unsigned int arrayIndex; + unsigned int mipLevel; +} cuGraphicsSubResourceGetMappedArray_params; + +typedef struct cuGraphicsResourceGetMappedMipmappedArray_params_st { + CUmipmappedArray *pMipmappedArray; + CUgraphicsResource resource; +} cuGraphicsResourceGetMappedMipmappedArray_params; + +typedef struct cuGraphicsResourceGetMappedPointer_v2_params_st { + CUdeviceptr *pDevPtr; + size_t *pSize; + CUgraphicsResource resource; +} cuGraphicsResourceGetMappedPointer_v2_params; + +typedef struct cuGraphicsResourceSetMapFlags_v2_params_st { + CUgraphicsResource resource; + unsigned int flags; +} cuGraphicsResourceSetMapFlags_v2_params; + +typedef struct cuGraphicsMapResources_ptsz_params_st { + unsigned int count; + CUgraphicsResource *resources; + CUstream hStream; +} cuGraphicsMapResources_ptsz_params; + +typedef struct cuGraphicsUnmapResources_ptsz_params_st { + unsigned int count; + CUgraphicsResource *resources; + CUstream hStream; +} cuGraphicsUnmapResources_ptsz_params; + +typedef struct cuGetProcAddress_v2_params_st { + const char *symbol; + void **pfn; + int cudaVersion; + cuuint64_t flags; + CUdriverProcAddressQueryResult *symbolStatus; +} cuGetProcAddress_v2_params; + +typedef struct cuCoredumpGetAttribute_params_st { + CUcoredumpSettings attrib; + void *value; + size_t *size; +} cuCoredumpGetAttribute_params; + +typedef struct cuCoredumpGetAttributeGlobal_params_st { + CUcoredumpSettings attrib; + void *value; + size_t *size; +} cuCoredumpGetAttributeGlobal_params; + +typedef struct cuCoredumpSetAttribute_params_st { + CUcoredumpSettings attrib; + void *value; + size_t *size; +} cuCoredumpSetAttribute_params; + +typedef struct cuCoredumpSetAttributeGlobal_params_st { + CUcoredumpSettings attrib; + void *value; + size_t *size; +} cuCoredumpSetAttributeGlobal_params; + +typedef struct cuGetExportTable_params_st { + const void **ppExportTable; + const CUuuid *pExportTableId; +} cuGetExportTable_params; + +typedef struct cuMemHostRegister_params_st { + void *p; + size_t bytesize; + unsigned int Flags; +} cuMemHostRegister_params; + +typedef struct cuGraphicsResourceSetMapFlags_params_st { + CUgraphicsResource resource; + unsigned int flags; +} cuGraphicsResourceSetMapFlags_params; + +typedef struct cuLinkCreate_params_st { + unsigned int numOptions; + CUjit_option *options; + void **optionValues; + CUlinkState *stateOut; +} cuLinkCreate_params; + +typedef struct cuLinkAddData_params_st { + CUlinkState state; + CUjitInputType type; + void *data; + size_t size; + const char *name; + unsigned int numOptions; + CUjit_option *options; + void **optionValues; +} cuLinkAddData_params; + +typedef struct cuLinkAddFile_params_st { + CUlinkState state; + CUjitInputType type; + const char *path; + unsigned int numOptions; + CUjit_option *options; + void **optionValues; +} cuLinkAddFile_params; + +typedef struct cuTexRefSetAddress2D_v2_params_st { + CUtexref hTexRef; + const CUDA_ARRAY_DESCRIPTOR *desc; + CUdeviceptr dptr; + size_t Pitch; +} cuTexRefSetAddress2D_v2_params; + +typedef struct cuDeviceTotalMem_params_st { + unsigned int *bytes; + CUdevice dev; +} cuDeviceTotalMem_params; + +typedef struct cuCtxCreate_params_st { + CUcontext *pctx; + unsigned int flags; + CUdevice dev; +} cuCtxCreate_params; + +typedef struct cuModuleGetGlobal_params_st { + CUdeviceptr_v1 *dptr; + unsigned int *bytes; + CUmodule hmod; + const char *name; +} cuModuleGetGlobal_params; + +typedef struct cuMemGetInfo_params_st { + unsigned int *free; + unsigned int *total; +} cuMemGetInfo_params; + +typedef struct cuMemAlloc_params_st { + CUdeviceptr_v1 *dptr; + unsigned int bytesize; +} cuMemAlloc_params; + +typedef struct cuMemAllocPitch_params_st { + CUdeviceptr_v1 *dptr; + unsigned int *pPitch; + unsigned int WidthInBytes; + unsigned int Height; + unsigned int ElementSizeBytes; +} cuMemAllocPitch_params; + +typedef struct cuMemFree_params_st { + CUdeviceptr_v1 dptr; +} cuMemFree_params; + +typedef struct cuMemGetAddressRange_params_st { + CUdeviceptr_v1 *pbase; + unsigned int *psize; + CUdeviceptr_v1 dptr; +} cuMemGetAddressRange_params; + +typedef struct cuMemAllocHost_params_st { + void **pp; + unsigned int bytesize; +} cuMemAllocHost_params; + +typedef struct cuMemHostGetDevicePointer_params_st { + CUdeviceptr_v1 *pdptr; + void *p; + unsigned int Flags; +} cuMemHostGetDevicePointer_params; + +typedef struct cuMemcpyHtoD_params_st { + CUdeviceptr_v1 dstDevice; + const void *srcHost; + unsigned int ByteCount; +} cuMemcpyHtoD_params; + +typedef struct cuMemcpyDtoH_params_st { + void *dstHost; + CUdeviceptr_v1 srcDevice; + unsigned int ByteCount; +} cuMemcpyDtoH_params; + +typedef struct cuMemcpyDtoD_params_st { + CUdeviceptr_v1 dstDevice; + CUdeviceptr_v1 srcDevice; + unsigned int ByteCount; +} cuMemcpyDtoD_params; + +typedef struct cuMemcpyDtoA_params_st { + CUarray dstArray; + unsigned int dstOffset; + CUdeviceptr_v1 srcDevice; + unsigned int ByteCount; +} cuMemcpyDtoA_params; + +typedef struct cuMemcpyAtoD_params_st { + CUdeviceptr_v1 dstDevice; + CUarray srcArray; + unsigned int srcOffset; + unsigned int ByteCount; +} cuMemcpyAtoD_params; + +typedef struct cuMemcpyHtoA_params_st { + CUarray dstArray; + unsigned int dstOffset; + const void *srcHost; + unsigned int ByteCount; +} cuMemcpyHtoA_params; + +typedef struct cuMemcpyAtoH_params_st { + void *dstHost; + CUarray srcArray; + unsigned int srcOffset; + unsigned int ByteCount; +} cuMemcpyAtoH_params; + +typedef struct cuMemcpyAtoA_params_st { + CUarray dstArray; + unsigned int dstOffset; + CUarray srcArray; + unsigned int srcOffset; + unsigned int ByteCount; +} cuMemcpyAtoA_params; + +typedef struct cuMemcpyHtoAAsync_params_st { + CUarray dstArray; + unsigned int dstOffset; + const void *srcHost; + unsigned int ByteCount; + CUstream hStream; +} cuMemcpyHtoAAsync_params; + +typedef struct cuMemcpyAtoHAsync_params_st { + void *dstHost; + CUarray srcArray; + unsigned int srcOffset; + unsigned int ByteCount; + CUstream hStream; +} cuMemcpyAtoHAsync_params; + +typedef struct cuMemcpy2D_params_st { + const CUDA_MEMCPY2D_v1 *pCopy; +} cuMemcpy2D_params; + +typedef struct cuMemcpy2DUnaligned_params_st { + const CUDA_MEMCPY2D_v1 *pCopy; +} cuMemcpy2DUnaligned_params; + +typedef struct cuMemcpy3D_params_st { + const CUDA_MEMCPY3D_v1 *pCopy; +} cuMemcpy3D_params; + +typedef struct cuMemcpyHtoDAsync_params_st { + CUdeviceptr_v1 dstDevice; + const void *srcHost; + unsigned int ByteCount; + CUstream hStream; +} cuMemcpyHtoDAsync_params; + +typedef struct cuMemcpyDtoHAsync_params_st { + void *dstHost; + CUdeviceptr_v1 srcDevice; + unsigned int ByteCount; + CUstream hStream; +} cuMemcpyDtoHAsync_params; + +typedef struct cuMemcpyDtoDAsync_params_st { + CUdeviceptr_v1 dstDevice; + CUdeviceptr_v1 srcDevice; + unsigned int ByteCount; + CUstream hStream; +} cuMemcpyDtoDAsync_params; + +typedef struct cuMemcpy2DAsync_params_st { + const CUDA_MEMCPY2D_v1 *pCopy; + CUstream hStream; +} cuMemcpy2DAsync_params; + +typedef struct cuMemcpy3DAsync_params_st { + const CUDA_MEMCPY3D_v1 *pCopy; + CUstream hStream; +} cuMemcpy3DAsync_params; + +typedef struct cuMemsetD8_params_st { + CUdeviceptr_v1 dstDevice; + unsigned char uc; + unsigned int N; +} cuMemsetD8_params; + +typedef struct cuMemsetD16_params_st { + CUdeviceptr_v1 dstDevice; + unsigned short us; + unsigned int N; +} cuMemsetD16_params; + +typedef struct cuMemsetD32_params_st { + CUdeviceptr_v1 dstDevice; + unsigned int ui; + unsigned int N; +} cuMemsetD32_params; + +typedef struct cuMemsetD2D8_params_st { + CUdeviceptr_v1 dstDevice; + unsigned int dstPitch; + unsigned char uc; + unsigned int Width; + unsigned int Height; +} cuMemsetD2D8_params; + +typedef struct cuMemsetD2D16_params_st { + CUdeviceptr_v1 dstDevice; + unsigned int dstPitch; + unsigned short us; + unsigned int Width; + unsigned int Height; +} cuMemsetD2D16_params; + +typedef struct cuMemsetD2D32_params_st { + CUdeviceptr_v1 dstDevice; + unsigned int dstPitch; + unsigned int ui; + unsigned int Width; + unsigned int Height; +} cuMemsetD2D32_params; + +typedef struct cuArrayCreate_params_st { + CUarray *pHandle; + const CUDA_ARRAY_DESCRIPTOR_v1 *pAllocateArray; +} cuArrayCreate_params; + +typedef struct cuArrayGetDescriptor_params_st { + CUDA_ARRAY_DESCRIPTOR_v1 *pArrayDescriptor; + CUarray hArray; +} cuArrayGetDescriptor_params; + +typedef struct cuArray3DCreate_params_st { + CUarray *pHandle; + const CUDA_ARRAY3D_DESCRIPTOR_v1 *pAllocateArray; +} cuArray3DCreate_params; + +typedef struct cuArray3DGetDescriptor_params_st { + CUDA_ARRAY3D_DESCRIPTOR_v1 *pArrayDescriptor; + CUarray hArray; +} cuArray3DGetDescriptor_params; + +typedef struct cuTexRefSetAddress_params_st { + unsigned int *ByteOffset; + CUtexref hTexRef; + CUdeviceptr_v1 dptr; + unsigned int bytes; +} cuTexRefSetAddress_params; + +typedef struct cuTexRefSetAddress2D_params_st { + CUtexref hTexRef; + const CUDA_ARRAY_DESCRIPTOR_v1 *desc; + CUdeviceptr_v1 dptr; + unsigned int Pitch; +} cuTexRefSetAddress2D_params; + +typedef struct cuTexRefGetAddress_params_st { + CUdeviceptr_v1 *pdptr; + CUtexref hTexRef; +} cuTexRefGetAddress_params; + +typedef struct cuGraphicsResourceGetMappedPointer_params_st { + CUdeviceptr_v1 *pDevPtr; + unsigned int *pSize; + CUgraphicsResource resource; +} cuGraphicsResourceGetMappedPointer_params; + +typedef struct cuCtxDestroy_params_st { + CUcontext ctx; +} cuCtxDestroy_params; + +typedef struct cuCtxPopCurrent_params_st { + CUcontext *pctx; +} cuCtxPopCurrent_params; + +typedef struct cuCtxPushCurrent_params_st { + CUcontext ctx; +} cuCtxPushCurrent_params; + +typedef struct cuStreamDestroy_params_st { + CUstream hStream; +} cuStreamDestroy_params; + +typedef struct cuEventDestroy_params_st { + CUevent hEvent; +} cuEventDestroy_params; + +typedef struct cuDevicePrimaryCtxRelease_params_st { + CUdevice dev; +} cuDevicePrimaryCtxRelease_params; + +typedef struct cuDevicePrimaryCtxReset_params_st { + CUdevice dev; +} cuDevicePrimaryCtxReset_params; + +typedef struct cuDevicePrimaryCtxSetFlags_params_st { + CUdevice dev; + unsigned int flags; +} cuDevicePrimaryCtxSetFlags_params; + +typedef struct cuMemcpyHtoD_v2_params_st { + CUdeviceptr dstDevice; + const void *srcHost; + size_t ByteCount; +} cuMemcpyHtoD_v2_params; + +typedef struct cuMemcpyDtoH_v2_params_st { + void *dstHost; + CUdeviceptr srcDevice; + size_t ByteCount; +} cuMemcpyDtoH_v2_params; + +typedef struct cuMemcpyDtoD_v2_params_st { + CUdeviceptr dstDevice; + CUdeviceptr srcDevice; + size_t ByteCount; +} cuMemcpyDtoD_v2_params; + +typedef struct cuMemcpyDtoA_v2_params_st { + CUarray dstArray; + size_t dstOffset; + CUdeviceptr srcDevice; + size_t ByteCount; +} cuMemcpyDtoA_v2_params; + +typedef struct cuMemcpyAtoD_v2_params_st { + CUdeviceptr dstDevice; + CUarray srcArray; + size_t srcOffset; + size_t ByteCount; +} cuMemcpyAtoD_v2_params; + +typedef struct cuMemcpyHtoA_v2_params_st { + CUarray dstArray; + size_t dstOffset; + const void *srcHost; + size_t ByteCount; +} cuMemcpyHtoA_v2_params; + +typedef struct cuMemcpyAtoH_v2_params_st { + void *dstHost; + CUarray srcArray; + size_t srcOffset; + size_t ByteCount; +} cuMemcpyAtoH_v2_params; + +typedef struct cuMemcpyAtoA_v2_params_st { + CUarray dstArray; + size_t dstOffset; + CUarray srcArray; + size_t srcOffset; + size_t ByteCount; +} cuMemcpyAtoA_v2_params; + +typedef struct cuMemcpyHtoAAsync_v2_params_st { + CUarray dstArray; + size_t dstOffset; + const void *srcHost; + size_t ByteCount; + CUstream hStream; +} cuMemcpyHtoAAsync_v2_params; + +typedef struct cuMemcpyAtoHAsync_v2_params_st { + void *dstHost; + CUarray srcArray; + size_t srcOffset; + size_t ByteCount; + CUstream hStream; +} cuMemcpyAtoHAsync_v2_params; + +typedef struct cuMemcpy2D_v2_params_st { + const CUDA_MEMCPY2D *pCopy; +} cuMemcpy2D_v2_params; + +typedef struct cuMemcpy2DUnaligned_v2_params_st { + const CUDA_MEMCPY2D *pCopy; +} cuMemcpy2DUnaligned_v2_params; + +typedef struct cuMemcpy3D_v2_params_st { + const CUDA_MEMCPY3D *pCopy; +} cuMemcpy3D_v2_params; + +typedef struct cuMemcpyHtoDAsync_v2_params_st { + CUdeviceptr dstDevice; + const void *srcHost; + size_t ByteCount; + CUstream hStream; +} cuMemcpyHtoDAsync_v2_params; + +typedef struct cuMemcpyDtoHAsync_v2_params_st { + void *dstHost; + CUdeviceptr srcDevice; + size_t ByteCount; + CUstream hStream; +} cuMemcpyDtoHAsync_v2_params; + +typedef struct cuMemcpyDtoDAsync_v2_params_st { + CUdeviceptr dstDevice; + CUdeviceptr srcDevice; + size_t ByteCount; + CUstream hStream; +} cuMemcpyDtoDAsync_v2_params; + +typedef struct cuMemcpy2DAsync_v2_params_st { + const CUDA_MEMCPY2D *pCopy; + CUstream hStream; +} cuMemcpy2DAsync_v2_params; + +typedef struct cuMemcpy3DAsync_v2_params_st { + const CUDA_MEMCPY3D *pCopy; + CUstream hStream; +} cuMemcpy3DAsync_v2_params; + +typedef struct cuMemsetD8_v2_params_st { + CUdeviceptr dstDevice; + unsigned char uc; + size_t N; +} cuMemsetD8_v2_params; + +typedef struct cuMemsetD16_v2_params_st { + CUdeviceptr dstDevice; + unsigned short us; + size_t N; +} cuMemsetD16_v2_params; + +typedef struct cuMemsetD32_v2_params_st { + CUdeviceptr dstDevice; + unsigned int ui; + size_t N; +} cuMemsetD32_v2_params; + +typedef struct cuMemsetD2D8_v2_params_st { + CUdeviceptr dstDevice; + size_t dstPitch; + unsigned char uc; + size_t Width; + size_t Height; +} cuMemsetD2D8_v2_params; + +typedef struct cuMemsetD2D16_v2_params_st { + CUdeviceptr dstDevice; + size_t dstPitch; + unsigned short us; + size_t Width; + size_t Height; +} cuMemsetD2D16_v2_params; + +typedef struct cuMemsetD2D32_v2_params_st { + CUdeviceptr dstDevice; + size_t dstPitch; + unsigned int ui; + size_t Width; + size_t Height; +} cuMemsetD2D32_v2_params; + +typedef struct cuMemcpy_params_st { + CUdeviceptr dst; + CUdeviceptr src; + size_t ByteCount; +} cuMemcpy_params; + +typedef struct cuMemcpyAsync_params_st { + CUdeviceptr dst; + CUdeviceptr src; + size_t ByteCount; + CUstream hStream; +} cuMemcpyAsync_params; + +typedef struct cuMemcpyPeer_params_st { + CUdeviceptr dstDevice; + CUcontext dstContext; + CUdeviceptr srcDevice; + CUcontext srcContext; + size_t ByteCount; +} cuMemcpyPeer_params; + +typedef struct cuMemcpyPeerAsync_params_st { + CUdeviceptr dstDevice; + CUcontext dstContext; + CUdeviceptr srcDevice; + CUcontext srcContext; + size_t ByteCount; + CUstream hStream; +} cuMemcpyPeerAsync_params; + +typedef struct cuMemcpy3DPeer_params_st { + const CUDA_MEMCPY3D_PEER *pCopy; +} cuMemcpy3DPeer_params; + +typedef struct cuMemcpy3DPeerAsync_params_st { + const CUDA_MEMCPY3D_PEER *pCopy; + CUstream hStream; +} cuMemcpy3DPeerAsync_params; + +typedef struct cuMemsetD8Async_params_st { + CUdeviceptr dstDevice; + unsigned char uc; + size_t N; + CUstream hStream; +} cuMemsetD8Async_params; + +typedef struct cuMemsetD16Async_params_st { + CUdeviceptr dstDevice; + unsigned short us; + size_t N; + CUstream hStream; +} cuMemsetD16Async_params; + +typedef struct cuMemsetD32Async_params_st { + CUdeviceptr dstDevice; + unsigned int ui; + size_t N; + CUstream hStream; +} cuMemsetD32Async_params; + +typedef struct cuMemsetD2D8Async_params_st { + CUdeviceptr dstDevice; + size_t dstPitch; + unsigned char uc; + size_t Width; + size_t Height; + CUstream hStream; +} cuMemsetD2D8Async_params; + +typedef struct cuMemsetD2D16Async_params_st { + CUdeviceptr dstDevice; + size_t dstPitch; + unsigned short us; + size_t Width; + size_t Height; + CUstream hStream; +} cuMemsetD2D16Async_params; + +typedef struct cuMemsetD2D32Async_params_st { + CUdeviceptr dstDevice; + size_t dstPitch; + unsigned int ui; + size_t Width; + size_t Height; + CUstream hStream; +} cuMemsetD2D32Async_params; + +typedef struct cuStreamGetPriority_params_st { + CUstream hStream; + int *priority; +} cuStreamGetPriority_params; + +typedef struct cuStreamGetId_params_st { + CUstream hStream; + unsigned long long *streamId; +} cuStreamGetId_params; + +typedef struct cuStreamGetFlags_params_st { + CUstream hStream; + unsigned int *flags; +} cuStreamGetFlags_params; + +typedef struct cuStreamGetCtx_params_st { + CUstream hStream; + CUcontext *pctx; +} cuStreamGetCtx_params; + +typedef struct cuStreamWaitEvent_params_st { + CUstream hStream; + CUevent hEvent; + unsigned int Flags; +} cuStreamWaitEvent_params; + +typedef struct cuStreamAddCallback_params_st { + CUstream hStream; + CUstreamCallback callback; + void *userData; + unsigned int flags; +} cuStreamAddCallback_params; + +typedef struct cuStreamAttachMemAsync_params_st { + CUstream hStream; + CUdeviceptr dptr; + size_t length; + unsigned int flags; +} cuStreamAttachMemAsync_params; + +typedef struct cuStreamQuery_params_st { + CUstream hStream; +} cuStreamQuery_params; + +typedef struct cuStreamSynchronize_params_st { + CUstream hStream; +} cuStreamSynchronize_params; + +typedef struct cuEventRecord_params_st { + CUevent hEvent; + CUstream hStream; +} cuEventRecord_params; + +typedef struct cuEventRecordWithFlags_params_st { + CUevent hEvent; + CUstream hStream; + unsigned int flags; +} cuEventRecordWithFlags_params; + +typedef struct cuLaunchKernel_params_st { + CUfunction f; + unsigned int gridDimX; + unsigned int gridDimY; + unsigned int gridDimZ; + unsigned int blockDimX; + unsigned int blockDimY; + unsigned int blockDimZ; + unsigned int sharedMemBytes; + CUstream hStream; + void **kernelParams; + void **extra; +} cuLaunchKernel_params; + +typedef struct cuLaunchKernelEx_params_st { + const CUlaunchConfig *config; + CUfunction f; + void **kernelParams; + void **extra; +} cuLaunchKernelEx_params; + +typedef struct cuLaunchHostFunc_params_st { + CUstream hStream; + CUhostFn fn; + void *userData; +} cuLaunchHostFunc_params; + +typedef struct cuGraphicsMapResources_params_st { + unsigned int count; + CUgraphicsResource *resources; + CUstream hStream; +} cuGraphicsMapResources_params; + +typedef struct cuGraphicsUnmapResources_params_st { + unsigned int count; + CUgraphicsResource *resources; + CUstream hStream; +} cuGraphicsUnmapResources_params; + +typedef struct cuStreamWriteValue32_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint32_t value; + unsigned int flags; +} cuStreamWriteValue32_params; + +typedef struct cuStreamWaitValue32_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint32_t value; + unsigned int flags; +} cuStreamWaitValue32_params; + +typedef struct cuStreamWriteValue64_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint64_t value; + unsigned int flags; +} cuStreamWriteValue64_params; + +typedef struct cuStreamWaitValue64_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint64_t value; + unsigned int flags; +} cuStreamWaitValue64_params; + +typedef struct cuStreamBatchMemOp_params_st { + CUstream stream; + unsigned int count; + CUstreamBatchMemOpParams *paramArray; + unsigned int flags; +} cuStreamBatchMemOp_params; + +typedef struct cuStreamWriteValue32_ptsz_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint32_t value; + unsigned int flags; +} cuStreamWriteValue32_ptsz_params; + +typedef struct cuStreamWaitValue32_ptsz_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint32_t value; + unsigned int flags; +} cuStreamWaitValue32_ptsz_params; + +typedef struct cuStreamWriteValue64_ptsz_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint64_t value; + unsigned int flags; +} cuStreamWriteValue64_ptsz_params; + +typedef struct cuStreamWaitValue64_ptsz_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint64_t value; + unsigned int flags; +} cuStreamWaitValue64_ptsz_params; + +typedef struct cuStreamBatchMemOp_ptsz_params_st { + CUstream stream; + unsigned int count; + CUstreamBatchMemOpParams *paramArray; + unsigned int flags; +} cuStreamBatchMemOp_ptsz_params; + +typedef struct cuStreamWriteValue32_v2_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint32_t value; + unsigned int flags; +} cuStreamWriteValue32_v2_params; + +typedef struct cuStreamWaitValue32_v2_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint32_t value; + unsigned int flags; +} cuStreamWaitValue32_v2_params; + +typedef struct cuStreamWriteValue64_v2_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint64_t value; + unsigned int flags; +} cuStreamWriteValue64_v2_params; + +typedef struct cuStreamWaitValue64_v2_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint64_t value; + unsigned int flags; +} cuStreamWaitValue64_v2_params; + +typedef struct cuStreamBatchMemOp_v2_params_st { + CUstream stream; + unsigned int count; + CUstreamBatchMemOpParams *paramArray; + unsigned int flags; +} cuStreamBatchMemOp_v2_params; + +typedef struct cuMemPrefetchAsync_params_st { + CUdeviceptr devPtr; + size_t count; + CUdevice dstDevice; + CUstream hStream; +} cuMemPrefetchAsync_params; + +typedef struct cuLaunchCooperativeKernel_params_st { + CUfunction f; + unsigned int gridDimX; + unsigned int gridDimY; + unsigned int gridDimZ; + unsigned int blockDimX; + unsigned int blockDimY; + unsigned int blockDimZ; + unsigned int sharedMemBytes; + CUstream hStream; + void **kernelParams; +} cuLaunchCooperativeKernel_params; + +typedef struct cuSignalExternalSemaphoresAsync_params_st { + const CUexternalSemaphore *extSemArray; + const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS *paramsArray; + unsigned int numExtSems; + CUstream stream; +} cuSignalExternalSemaphoresAsync_params; + +typedef struct cuWaitExternalSemaphoresAsync_params_st { + const CUexternalSemaphore *extSemArray; + const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS *paramsArray; + unsigned int numExtSems; + CUstream stream; +} cuWaitExternalSemaphoresAsync_params; + +typedef struct cuStreamBeginCapture_params_st { + CUstream hStream; +} cuStreamBeginCapture_params; + +typedef struct cuStreamBeginCapture_ptsz_params_st { + CUstream hStream; +} cuStreamBeginCapture_ptsz_params; + +typedef struct cuStreamBeginCapture_v2_params_st { + CUstream hStream; + CUstreamCaptureMode mode; +} cuStreamBeginCapture_v2_params; + +typedef struct cuStreamEndCapture_params_st { + CUstream hStream; + CUgraph *phGraph; +} cuStreamEndCapture_params; + +typedef struct cuStreamIsCapturing_params_st { + CUstream hStream; + CUstreamCaptureStatus *captureStatus; +} cuStreamIsCapturing_params; + +typedef struct cuStreamGetCaptureInfo_params_st { + CUstream hStream; + CUstreamCaptureStatus *captureStatus_out; + cuuint64_t *id_out; +} cuStreamGetCaptureInfo_params; + +typedef struct cuStreamGetCaptureInfo_ptsz_params_st { + CUstream hStream; + CUstreamCaptureStatus *captureStatus_out; + cuuint64_t *id_out; +} cuStreamGetCaptureInfo_ptsz_params; + +typedef struct cuStreamGetCaptureInfo_v2_params_st { + CUstream hStream; + CUstreamCaptureStatus *captureStatus_out; + cuuint64_t *id_out; + CUgraph *graph_out; + const CUgraphNode **dependencies_out; + size_t *numDependencies_out; +} cuStreamGetCaptureInfo_v2_params; + +typedef struct cuGraphAddKernelNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + const CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams; +} cuGraphAddKernelNode_params; + +typedef struct cuGraphKernelNodeGetParams_params_st { + CUgraphNode hNode; + CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams; +} cuGraphKernelNodeGetParams_params; + +typedef struct cuGraphKernelNodeSetParams_params_st { + CUgraphNode hNode; + const CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams; +} cuGraphKernelNodeSetParams_params; + +typedef struct cuGraphExecKernelNodeSetParams_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + const CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams; +} cuGraphExecKernelNodeSetParams_params; + +typedef struct cuGraphInstantiateWithParams_params_st { + CUgraphExec *phGraphExec; + CUgraph hGraph; + CUDA_GRAPH_INSTANTIATE_PARAMS *instantiateParams; +} cuGraphInstantiateWithParams_params; + +typedef struct cuGraphExecUpdate_params_st { + CUgraphExec hGraphExec; + CUgraph hGraph; + CUgraphNode *hErrorNode_out; + CUgraphExecUpdateResult *updateResult_out; +} cuGraphExecUpdate_params; + +typedef struct cuGraphUpload_params_st { + CUgraphExec hGraph; + CUstream hStream; +} cuGraphUpload_params; + +typedef struct cuGraphLaunch_params_st { + CUgraphExec hGraph; + CUstream hStream; +} cuGraphLaunch_params; + +typedef struct cuStreamCopyAttributes_params_st { + CUstream dstStream; + CUstream srcStream; +} cuStreamCopyAttributes_params; + +typedef struct cuStreamGetAttribute_params_st { + CUstream hStream; + CUstreamAttrID attr; + CUstreamAttrValue *value; +} cuStreamGetAttribute_params; + +typedef struct cuStreamSetAttribute_params_st { + CUstream hStream; + CUstreamAttrID attr; + const CUstreamAttrValue *param; +} cuStreamSetAttribute_params; + +typedef struct cuIpcOpenMemHandle_params_st { + CUdeviceptr *pdptr; + CUipcMemHandle handle; + unsigned int Flags; +} cuIpcOpenMemHandle_params; + +typedef struct cuGraphInstantiate_params_st { + CUgraphExec *phGraphExec; + CUgraph hGraph; + CUgraphNode *phErrorNode; + char *logBuffer; + size_t bufferSize; +} cuGraphInstantiate_params; + +typedef struct cuGraphInstantiate_v2_params_st { + CUgraphExec *phGraphExec; + CUgraph hGraph; + CUgraphNode *phErrorNode; + char *logBuffer; + size_t bufferSize; +} cuGraphInstantiate_v2_params; + +typedef struct cuMemMapArrayAsync_params_st { + CUarrayMapInfo *mapInfoList; + unsigned int count; + CUstream hStream; +} cuMemMapArrayAsync_params; + +typedef struct cuMemFreeAsync_params_st { + CUdeviceptr dptr; + CUstream hStream; +} cuMemFreeAsync_params; + +typedef struct cuMemAllocAsync_params_st { + CUdeviceptr *dptr; + size_t bytesize; + CUstream hStream; +} cuMemAllocAsync_params; + +typedef struct cuMemAllocFromPoolAsync_params_st { + CUdeviceptr *dptr; + size_t bytesize; + CUmemoryPool pool; + CUstream hStream; +} cuMemAllocFromPoolAsync_params; + +typedef struct cuStreamUpdateCaptureDependencies_params_st { + CUstream hStream; + CUgraphNode *dependencies; + size_t numDependencies; + unsigned int flags; +} cuStreamUpdateCaptureDependencies_params; + +typedef struct cuGetProcAddress_params_st { + const char *symbol; + void **pfn; + int cudaVersion; + cuuint64_t flags; +} cuGetProcAddress_params; diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_runtime_api_meta.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_runtime_api_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..79754539e19f2b2f940350f95690c847f405e1a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_runtime_api_meta.h @@ -0,0 +1,2126 @@ +// This file is generated. Any changes you make will be lost during the next clean build. + +// CUDA public interface, for type definitions and api function prototypes +#include "cuda_runtime_api.h" + +// ************************************************************************* +// Definitions of structs to hold parameters for each function +// ************************************************************************* + +// Currently used parameter trace structures +typedef struct cudaDeviceSetLimit_v3020_params_st { + enum cudaLimit limit; + size_t value; +} cudaDeviceSetLimit_v3020_params; + +typedef struct cudaDeviceGetLimit_v3020_params_st { + size_t *pValue; + enum cudaLimit limit; +} cudaDeviceGetLimit_v3020_params; + +typedef struct cudaDeviceGetTexture1DLinearMaxWidth_v11010_params_st { + size_t *maxWidthInElements; + const struct cudaChannelFormatDesc *fmtDesc; + int device; +} cudaDeviceGetTexture1DLinearMaxWidth_v11010_params; + +typedef struct cudaDeviceGetCacheConfig_v3020_params_st { + enum cudaFuncCache *pCacheConfig; +} cudaDeviceGetCacheConfig_v3020_params; + +typedef struct cudaDeviceGetStreamPriorityRange_v5050_params_st { + int *leastPriority; + int *greatestPriority; +} cudaDeviceGetStreamPriorityRange_v5050_params; + +typedef struct cudaDeviceSetCacheConfig_v3020_params_st { + enum cudaFuncCache cacheConfig; +} cudaDeviceSetCacheConfig_v3020_params; + +typedef struct cudaDeviceGetSharedMemConfig_v4020_params_st { + enum cudaSharedMemConfig *pConfig; +} cudaDeviceGetSharedMemConfig_v4020_params; + +typedef struct cudaDeviceSetSharedMemConfig_v4020_params_st { + enum cudaSharedMemConfig config; +} cudaDeviceSetSharedMemConfig_v4020_params; + +typedef struct cudaDeviceGetByPCIBusId_v4010_params_st { + int *device; + const char *pciBusId; +} cudaDeviceGetByPCIBusId_v4010_params; + +typedef struct cudaDeviceGetPCIBusId_v4010_params_st { + char *pciBusId; + int len; + int device; +} cudaDeviceGetPCIBusId_v4010_params; + +typedef struct cudaIpcGetEventHandle_v4010_params_st { + cudaIpcEventHandle_t *handle; + cudaEvent_t event; +} cudaIpcGetEventHandle_v4010_params; + +typedef struct cudaIpcOpenEventHandle_v4010_params_st { + cudaEvent_t *event; + cudaIpcEventHandle_t handle; +} cudaIpcOpenEventHandle_v4010_params; + +typedef struct cudaIpcGetMemHandle_v4010_params_st { + cudaIpcMemHandle_t *handle; + void *devPtr; +} cudaIpcGetMemHandle_v4010_params; + +typedef struct cudaIpcOpenMemHandle_v4010_params_st { + void **devPtr; + cudaIpcMemHandle_t handle; + unsigned int flags; +} cudaIpcOpenMemHandle_v4010_params; + +typedef struct cudaIpcCloseMemHandle_v4010_params_st { + void *devPtr; +} cudaIpcCloseMemHandle_v4010_params; + +typedef struct cudaDeviceFlushGPUDirectRDMAWrites_v11030_params_st { + enum cudaFlushGPUDirectRDMAWritesTarget target; + enum cudaFlushGPUDirectRDMAWritesScope scope; +} cudaDeviceFlushGPUDirectRDMAWrites_v11030_params; + +typedef struct cudaGetErrorName_v6050_params_st { + cudaError_t error; +} cudaGetErrorName_v6050_params; + +typedef struct cudaGetErrorString_v3020_params_st { + cudaError_t error; +} cudaGetErrorString_v3020_params; + +typedef struct cudaGetDeviceCount_v3020_params_st { + int *count; +} cudaGetDeviceCount_v3020_params; + +typedef struct cudaGetDeviceProperties_v2_v12000_params_st { + struct cudaDeviceProp *prop; + int device; +} cudaGetDeviceProperties_v2_v12000_params; + +typedef struct cudaDeviceGetAttribute_v5000_params_st { + int *value; + enum cudaDeviceAttr attr; + int device; +} cudaDeviceGetAttribute_v5000_params; + +typedef struct cudaDeviceGetDefaultMemPool_v11020_params_st { + cudaMemPool_t *memPool; + int device; +} cudaDeviceGetDefaultMemPool_v11020_params; + +typedef struct cudaDeviceSetMemPool_v11020_params_st { + int device; + cudaMemPool_t memPool; +} cudaDeviceSetMemPool_v11020_params; + +typedef struct cudaDeviceGetMemPool_v11020_params_st { + cudaMemPool_t *memPool; + int device; +} cudaDeviceGetMemPool_v11020_params; + +typedef struct cudaDeviceGetNvSciSyncAttributes_v10020_params_st { + void *nvSciSyncAttrList; + int device; + int flags; +} cudaDeviceGetNvSciSyncAttributes_v10020_params; + +typedef struct cudaDeviceGetP2PAttribute_v8000_params_st { + int *value; + enum cudaDeviceP2PAttr attr; + int srcDevice; + int dstDevice; +} cudaDeviceGetP2PAttribute_v8000_params; + +typedef struct cudaChooseDevice_v3020_params_st { + int *device; + const struct cudaDeviceProp *prop; +} cudaChooseDevice_v3020_params; + +typedef struct cudaInitDevice_v12000_params_st { + int device; + unsigned int deviceFlags; + unsigned int flags; +} cudaInitDevice_v12000_params; + +typedef struct cudaSetDevice_v3020_params_st { + int device; +} cudaSetDevice_v3020_params; + +typedef struct cudaGetDevice_v3020_params_st { + int *device; +} cudaGetDevice_v3020_params; + +typedef struct cudaSetValidDevices_v3020_params_st { + int *device_arr; + int len; +} cudaSetValidDevices_v3020_params; + +typedef struct cudaSetDeviceFlags_v3020_params_st { + unsigned int flags; +} cudaSetDeviceFlags_v3020_params; + +typedef struct cudaGetDeviceFlags_v7000_params_st { + unsigned int *flags; +} cudaGetDeviceFlags_v7000_params; + +typedef struct cudaStreamCreate_v3020_params_st { + cudaStream_t *pStream; +} cudaStreamCreate_v3020_params; + +typedef struct cudaStreamCreateWithFlags_v5000_params_st { + cudaStream_t *pStream; + unsigned int flags; +} cudaStreamCreateWithFlags_v5000_params; + +typedef struct cudaStreamCreateWithPriority_v5050_params_st { + cudaStream_t *pStream; + unsigned int flags; + int priority; +} cudaStreamCreateWithPriority_v5050_params; + +typedef struct cudaStreamGetPriority_ptsz_v7000_params_st { + cudaStream_t hStream; + int *priority; +} cudaStreamGetPriority_ptsz_v7000_params; + +typedef struct cudaStreamGetFlags_ptsz_v7000_params_st { + cudaStream_t hStream; + unsigned int *flags; +} cudaStreamGetFlags_ptsz_v7000_params; + +typedef struct cudaStreamGetId_ptsz_v12000_params_st { + cudaStream_t hStream; + unsigned long long *streamId; +} cudaStreamGetId_ptsz_v12000_params; + +typedef struct cudaStreamCopyAttributes_ptsz_v11000_params_st { + cudaStream_t dst; + cudaStream_t src; +} cudaStreamCopyAttributes_ptsz_v11000_params; + +typedef struct cudaStreamGetAttribute_ptsz_v11000_params_st { + cudaStream_t hStream; + cudaStreamAttrID attr; + cudaStreamAttrValue *value_out; +} cudaStreamGetAttribute_ptsz_v11000_params; + +typedef struct cudaStreamSetAttribute_ptsz_v11000_params_st { + cudaStream_t hStream; + cudaStreamAttrID attr; + const cudaStreamAttrValue *value; +} cudaStreamSetAttribute_ptsz_v11000_params; + +typedef struct cudaStreamDestroy_v5050_params_st { + cudaStream_t stream; +} cudaStreamDestroy_v5050_params; + +typedef struct cudaStreamWaitEvent_ptsz_v7000_params_st { + cudaStream_t stream; + cudaEvent_t event; + unsigned int flags; +} cudaStreamWaitEvent_ptsz_v7000_params; + +typedef struct cudaStreamAddCallback_ptsz_v7000_params_st { + cudaStream_t stream; + cudaStreamCallback_t callback; + void *userData; + unsigned int flags; +} cudaStreamAddCallback_ptsz_v7000_params; + +typedef struct cudaStreamSynchronize_ptsz_v7000_params_st { + cudaStream_t stream; +} cudaStreamSynchronize_ptsz_v7000_params; + +typedef struct cudaStreamQuery_ptsz_v7000_params_st { + cudaStream_t stream; +} cudaStreamQuery_ptsz_v7000_params; + +typedef struct cudaStreamAttachMemAsync_ptsz_v7000_params_st { + cudaStream_t stream; + void *devPtr; + size_t length; + unsigned int flags; +} cudaStreamAttachMemAsync_ptsz_v7000_params; + +typedef struct cudaStreamBeginCapture_ptsz_v10000_params_st { + cudaStream_t stream; + enum cudaStreamCaptureMode mode; +} cudaStreamBeginCapture_ptsz_v10000_params; + +typedef struct cudaThreadExchangeStreamCaptureMode_v10010_params_st { + enum cudaStreamCaptureMode *mode; +} cudaThreadExchangeStreamCaptureMode_v10010_params; + +typedef struct cudaStreamEndCapture_ptsz_v10000_params_st { + cudaStream_t stream; + cudaGraph_t *pGraph; +} cudaStreamEndCapture_ptsz_v10000_params; + +typedef struct cudaStreamIsCapturing_ptsz_v10000_params_st { + cudaStream_t stream; + enum cudaStreamCaptureStatus *pCaptureStatus; +} cudaStreamIsCapturing_ptsz_v10000_params; + +typedef struct cudaStreamGetCaptureInfo_v2_ptsz_v11030_params_st { + cudaStream_t stream; + enum cudaStreamCaptureStatus *captureStatus_out; + unsigned long long *id_out; + cudaGraph_t *graph_out; + const cudaGraphNode_t **dependencies_out; + size_t *numDependencies_out; +} cudaStreamGetCaptureInfo_v2_ptsz_v11030_params; + +typedef struct cudaStreamUpdateCaptureDependencies_v11030_params_st { + cudaStream_t stream; + cudaGraphNode_t *dependencies; + size_t numDependencies; + unsigned int flags; +} cudaStreamUpdateCaptureDependencies_v11030_params; + +typedef struct cudaEventCreate_v3020_params_st { + cudaEvent_t *event; +} cudaEventCreate_v3020_params; + +typedef struct cudaEventCreateWithFlags_v3020_params_st { + cudaEvent_t *event; + unsigned int flags; +} cudaEventCreateWithFlags_v3020_params; + +typedef struct cudaEventRecord_ptsz_v7000_params_st { + cudaEvent_t event; + cudaStream_t stream; +} cudaEventRecord_ptsz_v7000_params; + +typedef struct cudaEventRecordWithFlags_ptsz_v11010_params_st { + cudaEvent_t event; + cudaStream_t stream; + unsigned int flags; +} cudaEventRecordWithFlags_ptsz_v11010_params; + +typedef struct cudaEventQuery_v3020_params_st { + cudaEvent_t event; +} cudaEventQuery_v3020_params; + +typedef struct cudaEventSynchronize_v3020_params_st { + cudaEvent_t event; +} cudaEventSynchronize_v3020_params; + +typedef struct cudaEventDestroy_v3020_params_st { + cudaEvent_t event; +} cudaEventDestroy_v3020_params; + +typedef struct cudaEventElapsedTime_v3020_params_st { + float *ms; + cudaEvent_t start; + cudaEvent_t end; +} cudaEventElapsedTime_v3020_params; + +typedef struct cudaImportExternalMemory_v10000_params_st { + cudaExternalMemory_t *extMem_out; + const struct cudaExternalMemoryHandleDesc *memHandleDesc; +} cudaImportExternalMemory_v10000_params; + +typedef struct cudaExternalMemoryGetMappedBuffer_v10000_params_st { + void **devPtr; + cudaExternalMemory_t extMem; + const struct cudaExternalMemoryBufferDesc *bufferDesc; +} cudaExternalMemoryGetMappedBuffer_v10000_params; + +typedef struct cudaExternalMemoryGetMappedMipmappedArray_v10000_params_st { + cudaMipmappedArray_t *mipmap; + cudaExternalMemory_t extMem; + const struct cudaExternalMemoryMipmappedArrayDesc *mipmapDesc; +} cudaExternalMemoryGetMappedMipmappedArray_v10000_params; + +typedef struct cudaDestroyExternalMemory_v10000_params_st { + cudaExternalMemory_t extMem; +} cudaDestroyExternalMemory_v10000_params; + +typedef struct cudaImportExternalSemaphore_v10000_params_st { + cudaExternalSemaphore_t *extSem_out; + const struct cudaExternalSemaphoreHandleDesc *semHandleDesc; +} cudaImportExternalSemaphore_v10000_params; + +typedef struct cudaSignalExternalSemaphoresAsync_v2_ptsz_v11020_params_st { + const cudaExternalSemaphore_t *extSemArray; + const struct cudaExternalSemaphoreSignalParams *paramsArray; + unsigned int numExtSems; + cudaStream_t stream; +} cudaSignalExternalSemaphoresAsync_v2_ptsz_v11020_params; + +typedef struct cudaWaitExternalSemaphoresAsync_v2_ptsz_v11020_params_st { + const cudaExternalSemaphore_t *extSemArray; + const struct cudaExternalSemaphoreWaitParams *paramsArray; + unsigned int numExtSems; + cudaStream_t stream; +} cudaWaitExternalSemaphoresAsync_v2_ptsz_v11020_params; + +typedef struct cudaDestroyExternalSemaphore_v10000_params_st { + cudaExternalSemaphore_t extSem; +} cudaDestroyExternalSemaphore_v10000_params; + +typedef struct cudaLaunchKernel_ptsz_v7000_params_st { + const void *func; + dim3 gridDim; + dim3 blockDim; + void **args; + size_t sharedMem; + cudaStream_t stream; +} cudaLaunchKernel_ptsz_v7000_params; + +typedef struct cudaLaunchKernelExC_ptsz_v11060_params_st { + const cudaLaunchConfig_t *config; + const void *func; + void **args; +} cudaLaunchKernelExC_ptsz_v11060_params; + +typedef struct cudaLaunchCooperativeKernel_ptsz_v9000_params_st { + const void *func; + dim3 gridDim; + dim3 blockDim; + void **args; + size_t sharedMem; + cudaStream_t stream; +} cudaLaunchCooperativeKernel_ptsz_v9000_params; + +typedef struct cudaLaunchCooperativeKernelMultiDevice_v9000_params_st { + struct cudaLaunchParams *launchParamsList; + unsigned int numDevices; + unsigned int flags; +} cudaLaunchCooperativeKernelMultiDevice_v9000_params; + +typedef struct cudaFuncSetCacheConfig_v3020_params_st { + const void *func; + enum cudaFuncCache cacheConfig; +} cudaFuncSetCacheConfig_v3020_params; + +typedef struct cudaFuncSetSharedMemConfig_v4020_params_st { + const void *func; + enum cudaSharedMemConfig config; +} cudaFuncSetSharedMemConfig_v4020_params; + +typedef struct cudaFuncGetAttributes_v3020_params_st { + struct cudaFuncAttributes *attr; + const void *func; +} cudaFuncGetAttributes_v3020_params; + +typedef struct cudaFuncSetAttribute_v9000_params_st { + const void *func; + enum cudaFuncAttribute attr; + int value; +} cudaFuncSetAttribute_v9000_params; + +typedef struct cudaLaunchHostFunc_ptsz_v10000_params_st { + cudaStream_t stream; + cudaHostFn_t fn; + void *userData; +} cudaLaunchHostFunc_ptsz_v10000_params; + +typedef struct cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6050_params_st { + int *numBlocks; + const void *func; + int blockSize; + size_t dynamicSMemSize; +} cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6050_params; + +typedef struct cudaOccupancyAvailableDynamicSMemPerBlock_v10200_params_st { + size_t *dynamicSmemSize; + const void *func; + int numBlocks; + int blockSize; +} cudaOccupancyAvailableDynamicSMemPerBlock_v10200_params; + +typedef struct cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000_params_st { + int *numBlocks; + const void *func; + int blockSize; + size_t dynamicSMemSize; + unsigned int flags; +} cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000_params; + +typedef struct cudaOccupancyMaxPotentialClusterSize_v11070_params_st { + int *clusterSize; + const void *func; + const cudaLaunchConfig_t *launchConfig; +} cudaOccupancyMaxPotentialClusterSize_v11070_params; + +typedef struct cudaOccupancyMaxActiveClusters_v11070_params_st { + int *numClusters; + const void *func; + const cudaLaunchConfig_t *launchConfig; +} cudaOccupancyMaxActiveClusters_v11070_params; + +typedef struct cudaMallocManaged_v6000_params_st { + void **devPtr; + size_t size; + unsigned int flags; +} cudaMallocManaged_v6000_params; + +typedef struct cudaMalloc_v3020_params_st { + void **devPtr; + size_t size; +} cudaMalloc_v3020_params; + +typedef struct cudaMallocHost_v3020_params_st { + void **ptr; + size_t size; +} cudaMallocHost_v3020_params; + +typedef struct cudaMallocPitch_v3020_params_st { + void **devPtr; + size_t *pitch; + size_t width; + size_t height; +} cudaMallocPitch_v3020_params; + +typedef struct cudaMallocArray_v3020_params_st { + cudaArray_t *array; + const struct cudaChannelFormatDesc *desc; + size_t width; + size_t height; + unsigned int flags; +} cudaMallocArray_v3020_params; + +typedef struct cudaFree_v3020_params_st { + void *devPtr; +} cudaFree_v3020_params; + +typedef struct cudaFreeHost_v3020_params_st { + void *ptr; +} cudaFreeHost_v3020_params; + +typedef struct cudaFreeArray_v3020_params_st { + cudaArray_t array; +} cudaFreeArray_v3020_params; + +typedef struct cudaFreeMipmappedArray_v5000_params_st { + cudaMipmappedArray_t mipmappedArray; +} cudaFreeMipmappedArray_v5000_params; + +typedef struct cudaHostAlloc_v3020_params_st { + void **pHost; + size_t size; + unsigned int flags; +} cudaHostAlloc_v3020_params; + +typedef struct cudaHostRegister_v4000_params_st { + void *ptr; + size_t size; + unsigned int flags; +} cudaHostRegister_v4000_params; + +typedef struct cudaHostUnregister_v4000_params_st { + void *ptr; +} cudaHostUnregister_v4000_params; + +typedef struct cudaHostGetDevicePointer_v3020_params_st { + void **pDevice; + void *pHost; + unsigned int flags; +} cudaHostGetDevicePointer_v3020_params; + +typedef struct cudaHostGetFlags_v3020_params_st { + unsigned int *pFlags; + void *pHost; +} cudaHostGetFlags_v3020_params; + +typedef struct cudaMalloc3D_v3020_params_st { + struct cudaPitchedPtr *pitchedDevPtr; + struct cudaExtent extent; +} cudaMalloc3D_v3020_params; + +typedef struct cudaMalloc3DArray_v3020_params_st { + cudaArray_t *array; + const struct cudaChannelFormatDesc *desc; + struct cudaExtent extent; + unsigned int flags; +} cudaMalloc3DArray_v3020_params; + +typedef struct cudaMallocMipmappedArray_v5000_params_st { + cudaMipmappedArray_t *mipmappedArray; + const struct cudaChannelFormatDesc *desc; + struct cudaExtent extent; + unsigned int numLevels; + unsigned int flags; +} cudaMallocMipmappedArray_v5000_params; + +typedef struct cudaGetMipmappedArrayLevel_v5000_params_st { + cudaArray_t *levelArray; + cudaMipmappedArray_const_t mipmappedArray; + unsigned int level; +} cudaGetMipmappedArrayLevel_v5000_params; + +typedef struct cudaMemcpy3D_ptds_v7000_params_st { + const struct cudaMemcpy3DParms *p; +} cudaMemcpy3D_ptds_v7000_params; + +typedef struct cudaMemcpy3DPeer_ptds_v7000_params_st { + const struct cudaMemcpy3DPeerParms *p; +} cudaMemcpy3DPeer_ptds_v7000_params; + +typedef struct cudaMemcpy3DAsync_ptsz_v7000_params_st { + const struct cudaMemcpy3DParms *p; + cudaStream_t stream; +} cudaMemcpy3DAsync_ptsz_v7000_params; + +typedef struct cudaMemcpy3DPeerAsync_ptsz_v7000_params_st { + const struct cudaMemcpy3DPeerParms *p; + cudaStream_t stream; +} cudaMemcpy3DPeerAsync_ptsz_v7000_params; + +typedef struct cudaMemGetInfo_v3020_params_st { + size_t *free; + size_t *total; +} cudaMemGetInfo_v3020_params; + +typedef struct cudaArrayGetInfo_v4010_params_st { + struct cudaChannelFormatDesc *desc; + struct cudaExtent *extent; + unsigned int *flags; + cudaArray_t array; +} cudaArrayGetInfo_v4010_params; + +typedef struct cudaArrayGetPlane_v11020_params_st { + cudaArray_t *pPlaneArray; + cudaArray_t hArray; + unsigned int planeIdx; +} cudaArrayGetPlane_v11020_params; + +typedef struct cudaArrayGetMemoryRequirements_v11060_params_st { + struct cudaArrayMemoryRequirements *memoryRequirements; + cudaArray_t array; + int device; +} cudaArrayGetMemoryRequirements_v11060_params; + +typedef struct cudaMipmappedArrayGetMemoryRequirements_v11060_params_st { + struct cudaArrayMemoryRequirements *memoryRequirements; + cudaMipmappedArray_t mipmap; + int device; +} cudaMipmappedArrayGetMemoryRequirements_v11060_params; + +typedef struct cudaArrayGetSparseProperties_v11010_params_st { + struct cudaArraySparseProperties *sparseProperties; + cudaArray_t array; +} cudaArrayGetSparseProperties_v11010_params; + +typedef struct cudaMipmappedArrayGetSparseProperties_v11010_params_st { + struct cudaArraySparseProperties *sparseProperties; + cudaMipmappedArray_t mipmap; +} cudaMipmappedArrayGetSparseProperties_v11010_params; + +typedef struct cudaMemcpy_ptds_v7000_params_st { + void *dst; + const void *src; + size_t count; + enum cudaMemcpyKind kind; +} cudaMemcpy_ptds_v7000_params; + +typedef struct cudaMemcpyPeer_v4000_params_st { + void *dst; + int dstDevice; + const void *src; + int srcDevice; + size_t count; +} cudaMemcpyPeer_v4000_params; + +typedef struct cudaMemcpy2D_ptds_v7000_params_st { + void *dst; + size_t dpitch; + const void *src; + size_t spitch; + size_t width; + size_t height; + enum cudaMemcpyKind kind; +} cudaMemcpy2D_ptds_v7000_params; + +typedef struct cudaMemcpy2DToArray_ptds_v7000_params_st { + cudaArray_t dst; + size_t wOffset; + size_t hOffset; + const void *src; + size_t spitch; + size_t width; + size_t height; + enum cudaMemcpyKind kind; +} cudaMemcpy2DToArray_ptds_v7000_params; + +typedef struct cudaMemcpy2DFromArray_ptds_v7000_params_st { + void *dst; + size_t dpitch; + cudaArray_const_t src; + size_t wOffset; + size_t hOffset; + size_t width; + size_t height; + enum cudaMemcpyKind kind; +} cudaMemcpy2DFromArray_ptds_v7000_params; + +typedef struct cudaMemcpy2DArrayToArray_ptds_v7000_params_st { + cudaArray_t dst; + size_t wOffsetDst; + size_t hOffsetDst; + cudaArray_const_t src; + size_t wOffsetSrc; + size_t hOffsetSrc; + size_t width; + size_t height; + enum cudaMemcpyKind kind; +} cudaMemcpy2DArrayToArray_ptds_v7000_params; + +typedef struct cudaMemcpyToSymbol_ptds_v7000_params_st { + const void *symbol; + const void *src; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; +} cudaMemcpyToSymbol_ptds_v7000_params; + +typedef struct cudaMemcpyFromSymbol_ptds_v7000_params_st { + void *dst; + const void *symbol; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; +} cudaMemcpyFromSymbol_ptds_v7000_params; + +typedef struct cudaMemcpyAsync_ptsz_v7000_params_st { + void *dst; + const void *src; + size_t count; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpyAsync_ptsz_v7000_params; + +typedef struct cudaMemcpyPeerAsync_v4000_params_st { + void *dst; + int dstDevice; + const void *src; + int srcDevice; + size_t count; + cudaStream_t stream; +} cudaMemcpyPeerAsync_v4000_params; + +typedef struct cudaMemcpy2DAsync_ptsz_v7000_params_st { + void *dst; + size_t dpitch; + const void *src; + size_t spitch; + size_t width; + size_t height; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpy2DAsync_ptsz_v7000_params; + +typedef struct cudaMemcpy2DToArrayAsync_ptsz_v7000_params_st { + cudaArray_t dst; + size_t wOffset; + size_t hOffset; + const void *src; + size_t spitch; + size_t width; + size_t height; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpy2DToArrayAsync_ptsz_v7000_params; + +typedef struct cudaMemcpy2DFromArrayAsync_ptsz_v7000_params_st { + void *dst; + size_t dpitch; + cudaArray_const_t src; + size_t wOffset; + size_t hOffset; + size_t width; + size_t height; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpy2DFromArrayAsync_ptsz_v7000_params; + +typedef struct cudaMemcpyToSymbolAsync_ptsz_v7000_params_st { + const void *symbol; + const void *src; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpyToSymbolAsync_ptsz_v7000_params; + +typedef struct cudaMemcpyFromSymbolAsync_ptsz_v7000_params_st { + void *dst; + const void *symbol; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpyFromSymbolAsync_ptsz_v7000_params; + +typedef struct cudaMemset_ptds_v7000_params_st { + void *devPtr; + int value; + size_t count; +} cudaMemset_ptds_v7000_params; + +typedef struct cudaMemset2D_ptds_v7000_params_st { + void *devPtr; + size_t pitch; + int value; + size_t width; + size_t height; +} cudaMemset2D_ptds_v7000_params; + +typedef struct cudaMemset3D_ptds_v7000_params_st { + struct cudaPitchedPtr pitchedDevPtr; + int value; + struct cudaExtent extent; +} cudaMemset3D_ptds_v7000_params; + +typedef struct cudaMemsetAsync_ptsz_v7000_params_st { + void *devPtr; + int value; + size_t count; + cudaStream_t stream; +} cudaMemsetAsync_ptsz_v7000_params; + +typedef struct cudaMemset2DAsync_ptsz_v7000_params_st { + void *devPtr; + size_t pitch; + int value; + size_t width; + size_t height; + cudaStream_t stream; +} cudaMemset2DAsync_ptsz_v7000_params; + +typedef struct cudaMemset3DAsync_ptsz_v7000_params_st { + struct cudaPitchedPtr pitchedDevPtr; + int value; + struct cudaExtent extent; + cudaStream_t stream; +} cudaMemset3DAsync_ptsz_v7000_params; + +typedef struct cudaGetSymbolAddress_v3020_params_st { + void **devPtr; + const void *symbol; +} cudaGetSymbolAddress_v3020_params; + +typedef struct cudaGetSymbolSize_v3020_params_st { + size_t *size; + const void *symbol; +} cudaGetSymbolSize_v3020_params; + +typedef struct cudaMemPrefetchAsync_ptsz_v8000_params_st { + const void *devPtr; + size_t count; + int dstDevice; + cudaStream_t stream; +} cudaMemPrefetchAsync_ptsz_v8000_params; + +typedef struct cudaMemAdvise_v8000_params_st { + const void *devPtr; + size_t count; + enum cudaMemoryAdvise advice; + int device; +} cudaMemAdvise_v8000_params; + +typedef struct cudaMemRangeGetAttribute_v8000_params_st { + void *data; + size_t dataSize; + enum cudaMemRangeAttribute attribute; + const void *devPtr; + size_t count; +} cudaMemRangeGetAttribute_v8000_params; + +typedef struct cudaMemRangeGetAttributes_v8000_params_st { + void **data; + size_t *dataSizes; + enum cudaMemRangeAttribute *attributes; + size_t numAttributes; + const void *devPtr; + size_t count; +} cudaMemRangeGetAttributes_v8000_params; + +typedef struct cudaMemcpyToArray_ptds_v7000_params_st { + cudaArray_t dst; + size_t wOffset; + size_t hOffset; + const void *src; + size_t count; + enum cudaMemcpyKind kind; +} cudaMemcpyToArray_ptds_v7000_params; + +typedef struct cudaMemcpyFromArray_ptds_v7000_params_st { + void *dst; + cudaArray_const_t src; + size_t wOffset; + size_t hOffset; + size_t count; + enum cudaMemcpyKind kind; +} cudaMemcpyFromArray_ptds_v7000_params; + +typedef struct cudaMemcpyArrayToArray_ptds_v7000_params_st { + cudaArray_t dst; + size_t wOffsetDst; + size_t hOffsetDst; + cudaArray_const_t src; + size_t wOffsetSrc; + size_t hOffsetSrc; + size_t count; + enum cudaMemcpyKind kind; +} cudaMemcpyArrayToArray_ptds_v7000_params; + +typedef struct cudaMemcpyToArrayAsync_ptsz_v7000_params_st { + cudaArray_t dst; + size_t wOffset; + size_t hOffset; + const void *src; + size_t count; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpyToArrayAsync_ptsz_v7000_params; + +typedef struct cudaMemcpyFromArrayAsync_ptsz_v7000_params_st { + void *dst; + cudaArray_const_t src; + size_t wOffset; + size_t hOffset; + size_t count; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpyFromArrayAsync_ptsz_v7000_params; + +typedef struct cudaMallocAsync_ptsz_v11020_params_st { + void **devPtr; + size_t size; + cudaStream_t hStream; +} cudaMallocAsync_ptsz_v11020_params; + +typedef struct cudaFreeAsync_ptsz_v11020_params_st { + void *devPtr; + cudaStream_t hStream; +} cudaFreeAsync_ptsz_v11020_params; + +typedef struct cudaMemPoolTrimTo_v11020_params_st { + cudaMemPool_t memPool; + size_t minBytesToKeep; +} cudaMemPoolTrimTo_v11020_params; + +typedef struct cudaMemPoolSetAttribute_v11020_params_st { + cudaMemPool_t memPool; + enum cudaMemPoolAttr attr; + void *value; +} cudaMemPoolSetAttribute_v11020_params; + +typedef struct cudaMemPoolGetAttribute_v11020_params_st { + cudaMemPool_t memPool; + enum cudaMemPoolAttr attr; + void *value; +} cudaMemPoolGetAttribute_v11020_params; + +typedef struct cudaMemPoolSetAccess_v11020_params_st { + cudaMemPool_t memPool; + const struct cudaMemAccessDesc *descList; + size_t count; +} cudaMemPoolSetAccess_v11020_params; + +typedef struct cudaMemPoolGetAccess_v11020_params_st { + enum cudaMemAccessFlags *flags; + cudaMemPool_t memPool; + struct cudaMemLocation *location; +} cudaMemPoolGetAccess_v11020_params; + +typedef struct cudaMemPoolCreate_v11020_params_st { + cudaMemPool_t *memPool; + const struct cudaMemPoolProps *poolProps; +} cudaMemPoolCreate_v11020_params; + +typedef struct cudaMemPoolDestroy_v11020_params_st { + cudaMemPool_t memPool; +} cudaMemPoolDestroy_v11020_params; + +typedef struct cudaMallocFromPoolAsync_ptsz_v11020_params_st { + void **ptr; + size_t size; + cudaMemPool_t memPool; + cudaStream_t stream; +} cudaMallocFromPoolAsync_ptsz_v11020_params; + +typedef struct cudaMemPoolExportToShareableHandle_v11020_params_st { + void *shareableHandle; + cudaMemPool_t memPool; + enum cudaMemAllocationHandleType handleType; + unsigned int flags; +} cudaMemPoolExportToShareableHandle_v11020_params; + +typedef struct cudaMemPoolImportFromShareableHandle_v11020_params_st { + cudaMemPool_t *memPool; + void *shareableHandle; + enum cudaMemAllocationHandleType handleType; + unsigned int flags; +} cudaMemPoolImportFromShareableHandle_v11020_params; + +typedef struct cudaMemPoolExportPointer_v11020_params_st { + struct cudaMemPoolPtrExportData *exportData; + void *ptr; +} cudaMemPoolExportPointer_v11020_params; + +typedef struct cudaMemPoolImportPointer_v11020_params_st { + void **ptr; + cudaMemPool_t memPool; + struct cudaMemPoolPtrExportData *exportData; +} cudaMemPoolImportPointer_v11020_params; + +typedef struct cudaPointerGetAttributes_v4000_params_st { + struct cudaPointerAttributes *attributes; + const void *ptr; +} cudaPointerGetAttributes_v4000_params; + +typedef struct cudaDeviceCanAccessPeer_v4000_params_st { + int *canAccessPeer; + int device; + int peerDevice; +} cudaDeviceCanAccessPeer_v4000_params; + +typedef struct cudaDeviceEnablePeerAccess_v4000_params_st { + int peerDevice; + unsigned int flags; +} cudaDeviceEnablePeerAccess_v4000_params; + +typedef struct cudaDeviceDisablePeerAccess_v4000_params_st { + int peerDevice; +} cudaDeviceDisablePeerAccess_v4000_params; + +typedef struct cudaGraphicsUnregisterResource_v3020_params_st { + cudaGraphicsResource_t resource; +} cudaGraphicsUnregisterResource_v3020_params; + +typedef struct cudaGraphicsResourceSetMapFlags_v3020_params_st { + cudaGraphicsResource_t resource; + unsigned int flags; +} cudaGraphicsResourceSetMapFlags_v3020_params; + +typedef struct cudaGraphicsMapResources_v3020_params_st { + int count; + cudaGraphicsResource_t *resources; + cudaStream_t stream; +} cudaGraphicsMapResources_v3020_params; + +typedef struct cudaGraphicsUnmapResources_v3020_params_st { + int count; + cudaGraphicsResource_t *resources; + cudaStream_t stream; +} cudaGraphicsUnmapResources_v3020_params; + +typedef struct cudaGraphicsResourceGetMappedPointer_v3020_params_st { + void **devPtr; + size_t *size; + cudaGraphicsResource_t resource; +} cudaGraphicsResourceGetMappedPointer_v3020_params; + +typedef struct cudaGraphicsSubResourceGetMappedArray_v3020_params_st { + cudaArray_t *array; + cudaGraphicsResource_t resource; + unsigned int arrayIndex; + unsigned int mipLevel; +} cudaGraphicsSubResourceGetMappedArray_v3020_params; + +typedef struct cudaGraphicsResourceGetMappedMipmappedArray_v5000_params_st { + cudaMipmappedArray_t *mipmappedArray; + cudaGraphicsResource_t resource; +} cudaGraphicsResourceGetMappedMipmappedArray_v5000_params; + +typedef struct cudaGetChannelDesc_v3020_params_st { + struct cudaChannelFormatDesc *desc; + cudaArray_const_t array; +} cudaGetChannelDesc_v3020_params; + +typedef struct cudaCreateChannelDesc_v3020_params_st { + int x; + int y; + int z; + int w; + enum cudaChannelFormatKind f; +} cudaCreateChannelDesc_v3020_params; + +typedef struct cudaCreateTextureObject_v5000_params_st { + cudaTextureObject_t *pTexObject; + const struct cudaResourceDesc *pResDesc; + const struct cudaTextureDesc *pTexDesc; + const struct cudaResourceViewDesc *pResViewDesc; +} cudaCreateTextureObject_v5000_params; + +typedef struct cudaDestroyTextureObject_v5000_params_st { + cudaTextureObject_t texObject; +} cudaDestroyTextureObject_v5000_params; + +typedef struct cudaGetTextureObjectResourceDesc_v5000_params_st { + struct cudaResourceDesc *pResDesc; + cudaTextureObject_t texObject; +} cudaGetTextureObjectResourceDesc_v5000_params; + +typedef struct cudaGetTextureObjectTextureDesc_v5000_params_st { + struct cudaTextureDesc *pTexDesc; + cudaTextureObject_t texObject; +} cudaGetTextureObjectTextureDesc_v5000_params; + +typedef struct cudaGetTextureObjectResourceViewDesc_v5000_params_st { + struct cudaResourceViewDesc *pResViewDesc; + cudaTextureObject_t texObject; +} cudaGetTextureObjectResourceViewDesc_v5000_params; + +typedef struct cudaCreateSurfaceObject_v5000_params_st { + cudaSurfaceObject_t *pSurfObject; + const struct cudaResourceDesc *pResDesc; +} cudaCreateSurfaceObject_v5000_params; + +typedef struct cudaDestroySurfaceObject_v5000_params_st { + cudaSurfaceObject_t surfObject; +} cudaDestroySurfaceObject_v5000_params; + +typedef struct cudaGetSurfaceObjectResourceDesc_v5000_params_st { + struct cudaResourceDesc *pResDesc; + cudaSurfaceObject_t surfObject; +} cudaGetSurfaceObjectResourceDesc_v5000_params; + +typedef struct cudaDriverGetVersion_v3020_params_st { + int *driverVersion; +} cudaDriverGetVersion_v3020_params; + +typedef struct cudaRuntimeGetVersion_v3020_params_st { + int *runtimeVersion; +} cudaRuntimeGetVersion_v3020_params; + +typedef struct cudaGraphCreate_v10000_params_st { + cudaGraph_t *pGraph; + unsigned int flags; +} cudaGraphCreate_v10000_params; + +typedef struct cudaGraphAddKernelNode_v10000_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + const struct cudaKernelNodeParams *pNodeParams; +} cudaGraphAddKernelNode_v10000_params; + +typedef struct cudaGraphKernelNodeGetParams_v10000_params_st { + cudaGraphNode_t node; + struct cudaKernelNodeParams *pNodeParams; +} cudaGraphKernelNodeGetParams_v10000_params; + +typedef struct cudaGraphKernelNodeSetParams_v10000_params_st { + cudaGraphNode_t node; + const struct cudaKernelNodeParams *pNodeParams; +} cudaGraphKernelNodeSetParams_v10000_params; + +typedef struct cudaGraphKernelNodeCopyAttributes_v11000_params_st { + cudaGraphNode_t hSrc; + cudaGraphNode_t hDst; +} cudaGraphKernelNodeCopyAttributes_v11000_params; + +typedef struct cudaGraphKernelNodeGetAttribute_v11000_params_st { + cudaGraphNode_t hNode; + cudaKernelNodeAttrID attr; + cudaKernelNodeAttrValue *value_out; +} cudaGraphKernelNodeGetAttribute_v11000_params; + +typedef struct cudaGraphKernelNodeSetAttribute_v11000_params_st { + cudaGraphNode_t hNode; + cudaKernelNodeAttrID attr; + const cudaKernelNodeAttrValue *value; +} cudaGraphKernelNodeSetAttribute_v11000_params; + +typedef struct cudaGraphAddMemcpyNode_v10000_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + const struct cudaMemcpy3DParms *pCopyParams; +} cudaGraphAddMemcpyNode_v10000_params; + +typedef struct cudaGraphAddMemcpyNodeToSymbol_v11010_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + const void *symbol; + const void *src; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; +} cudaGraphAddMemcpyNodeToSymbol_v11010_params; + +typedef struct cudaGraphAddMemcpyNodeFromSymbol_v11010_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + void *dst; + const void *symbol; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; +} cudaGraphAddMemcpyNodeFromSymbol_v11010_params; + +typedef struct cudaGraphAddMemcpyNode1D_v11010_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + void *dst; + const void *src; + size_t count; + enum cudaMemcpyKind kind; +} cudaGraphAddMemcpyNode1D_v11010_params; + +typedef struct cudaGraphMemcpyNodeGetParams_v10000_params_st { + cudaGraphNode_t node; + struct cudaMemcpy3DParms *pNodeParams; +} cudaGraphMemcpyNodeGetParams_v10000_params; + +typedef struct cudaGraphMemcpyNodeSetParams_v10000_params_st { + cudaGraphNode_t node; + const struct cudaMemcpy3DParms *pNodeParams; +} cudaGraphMemcpyNodeSetParams_v10000_params; + +typedef struct cudaGraphMemcpyNodeSetParamsToSymbol_v11010_params_st { + cudaGraphNode_t node; + const void *symbol; + const void *src; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; +} cudaGraphMemcpyNodeSetParamsToSymbol_v11010_params; + +typedef struct cudaGraphMemcpyNodeSetParamsFromSymbol_v11010_params_st { + cudaGraphNode_t node; + void *dst; + const void *symbol; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; +} cudaGraphMemcpyNodeSetParamsFromSymbol_v11010_params; + +typedef struct cudaGraphMemcpyNodeSetParams1D_v11010_params_st { + cudaGraphNode_t node; + void *dst; + const void *src; + size_t count; + enum cudaMemcpyKind kind; +} cudaGraphMemcpyNodeSetParams1D_v11010_params; + +typedef struct cudaGraphAddMemsetNode_v10000_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + const struct cudaMemsetParams *pMemsetParams; +} cudaGraphAddMemsetNode_v10000_params; + +typedef struct cudaGraphMemsetNodeGetParams_v10000_params_st { + cudaGraphNode_t node; + struct cudaMemsetParams *pNodeParams; +} cudaGraphMemsetNodeGetParams_v10000_params; + +typedef struct cudaGraphMemsetNodeSetParams_v10000_params_st { + cudaGraphNode_t node; + const struct cudaMemsetParams *pNodeParams; +} cudaGraphMemsetNodeSetParams_v10000_params; + +typedef struct cudaGraphAddHostNode_v10000_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + const struct cudaHostNodeParams *pNodeParams; +} cudaGraphAddHostNode_v10000_params; + +typedef struct cudaGraphHostNodeGetParams_v10000_params_st { + cudaGraphNode_t node; + struct cudaHostNodeParams *pNodeParams; +} cudaGraphHostNodeGetParams_v10000_params; + +typedef struct cudaGraphHostNodeSetParams_v10000_params_st { + cudaGraphNode_t node; + const struct cudaHostNodeParams *pNodeParams; +} cudaGraphHostNodeSetParams_v10000_params; + +typedef struct cudaGraphAddChildGraphNode_v10000_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + cudaGraph_t childGraph; +} cudaGraphAddChildGraphNode_v10000_params; + +typedef struct cudaGraphChildGraphNodeGetGraph_v10000_params_st { + cudaGraphNode_t node; + cudaGraph_t *pGraph; +} cudaGraphChildGraphNodeGetGraph_v10000_params; + +typedef struct cudaGraphAddEmptyNode_v10000_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; +} cudaGraphAddEmptyNode_v10000_params; + +typedef struct cudaGraphAddEventRecordNode_v11010_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + cudaEvent_t event; +} cudaGraphAddEventRecordNode_v11010_params; + +typedef struct cudaGraphEventRecordNodeGetEvent_v11010_params_st { + cudaGraphNode_t node; + cudaEvent_t *event_out; +} cudaGraphEventRecordNodeGetEvent_v11010_params; + +typedef struct cudaGraphEventRecordNodeSetEvent_v11010_params_st { + cudaGraphNode_t node; + cudaEvent_t event; +} cudaGraphEventRecordNodeSetEvent_v11010_params; + +typedef struct cudaGraphAddEventWaitNode_v11010_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + cudaEvent_t event; +} cudaGraphAddEventWaitNode_v11010_params; + +typedef struct cudaGraphEventWaitNodeGetEvent_v11010_params_st { + cudaGraphNode_t node; + cudaEvent_t *event_out; +} cudaGraphEventWaitNodeGetEvent_v11010_params; + +typedef struct cudaGraphEventWaitNodeSetEvent_v11010_params_st { + cudaGraphNode_t node; + cudaEvent_t event; +} cudaGraphEventWaitNodeSetEvent_v11010_params; + +typedef struct cudaGraphAddExternalSemaphoresSignalNode_v11020_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + const struct cudaExternalSemaphoreSignalNodeParams *nodeParams; +} cudaGraphAddExternalSemaphoresSignalNode_v11020_params; + +typedef struct cudaGraphExternalSemaphoresSignalNodeGetParams_v11020_params_st { + cudaGraphNode_t hNode; + struct cudaExternalSemaphoreSignalNodeParams *params_out; +} cudaGraphExternalSemaphoresSignalNodeGetParams_v11020_params; + +typedef struct cudaGraphExternalSemaphoresSignalNodeSetParams_v11020_params_st { + cudaGraphNode_t hNode; + const struct cudaExternalSemaphoreSignalNodeParams *nodeParams; +} cudaGraphExternalSemaphoresSignalNodeSetParams_v11020_params; + +typedef struct cudaGraphAddExternalSemaphoresWaitNode_v11020_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + const struct cudaExternalSemaphoreWaitNodeParams *nodeParams; +} cudaGraphAddExternalSemaphoresWaitNode_v11020_params; + +typedef struct cudaGraphExternalSemaphoresWaitNodeGetParams_v11020_params_st { + cudaGraphNode_t hNode; + struct cudaExternalSemaphoreWaitNodeParams *params_out; +} cudaGraphExternalSemaphoresWaitNodeGetParams_v11020_params; + +typedef struct cudaGraphExternalSemaphoresWaitNodeSetParams_v11020_params_st { + cudaGraphNode_t hNode; + const struct cudaExternalSemaphoreWaitNodeParams *nodeParams; +} cudaGraphExternalSemaphoresWaitNodeSetParams_v11020_params; + +typedef struct cudaGraphAddMemAllocNode_v11040_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + struct cudaMemAllocNodeParams *nodeParams; +} cudaGraphAddMemAllocNode_v11040_params; + +typedef struct cudaGraphMemAllocNodeGetParams_v11040_params_st { + cudaGraphNode_t node; + struct cudaMemAllocNodeParams *params_out; +} cudaGraphMemAllocNodeGetParams_v11040_params; + +typedef struct cudaGraphAddMemFreeNode_v11040_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + void *dptr; +} cudaGraphAddMemFreeNode_v11040_params; + +typedef struct cudaGraphMemFreeNodeGetParams_v11040_params_st { + cudaGraphNode_t node; + void *dptr_out; +} cudaGraphMemFreeNodeGetParams_v11040_params; + +typedef struct cudaDeviceGraphMemTrim_v11040_params_st { + int device; +} cudaDeviceGraphMemTrim_v11040_params; + +typedef struct cudaDeviceGetGraphMemAttribute_v11040_params_st { + int device; + enum cudaGraphMemAttributeType attr; + void *value; +} cudaDeviceGetGraphMemAttribute_v11040_params; + +typedef struct cudaDeviceSetGraphMemAttribute_v11040_params_st { + int device; + enum cudaGraphMemAttributeType attr; + void *value; +} cudaDeviceSetGraphMemAttribute_v11040_params; + +typedef struct cudaGraphClone_v10000_params_st { + cudaGraph_t *pGraphClone; + cudaGraph_t originalGraph; +} cudaGraphClone_v10000_params; + +typedef struct cudaGraphNodeFindInClone_v10000_params_st { + cudaGraphNode_t *pNode; + cudaGraphNode_t originalNode; + cudaGraph_t clonedGraph; +} cudaGraphNodeFindInClone_v10000_params; + +typedef struct cudaGraphNodeGetType_v10000_params_st { + cudaGraphNode_t node; + enum cudaGraphNodeType *pType; +} cudaGraphNodeGetType_v10000_params; + +typedef struct cudaGraphGetNodes_v10000_params_st { + cudaGraph_t graph; + cudaGraphNode_t *nodes; + size_t *numNodes; +} cudaGraphGetNodes_v10000_params; + +typedef struct cudaGraphGetRootNodes_v10000_params_st { + cudaGraph_t graph; + cudaGraphNode_t *pRootNodes; + size_t *pNumRootNodes; +} cudaGraphGetRootNodes_v10000_params; + +typedef struct cudaGraphGetEdges_v10000_params_st { + cudaGraph_t graph; + cudaGraphNode_t *from; + cudaGraphNode_t *to; + size_t *numEdges; +} cudaGraphGetEdges_v10000_params; + +typedef struct cudaGraphNodeGetDependencies_v10000_params_st { + cudaGraphNode_t node; + cudaGraphNode_t *pDependencies; + size_t *pNumDependencies; +} cudaGraphNodeGetDependencies_v10000_params; + +typedef struct cudaGraphNodeGetDependentNodes_v10000_params_st { + cudaGraphNode_t node; + cudaGraphNode_t *pDependentNodes; + size_t *pNumDependentNodes; +} cudaGraphNodeGetDependentNodes_v10000_params; + +typedef struct cudaGraphAddDependencies_v10000_params_st { + cudaGraph_t graph; + const cudaGraphNode_t *from; + const cudaGraphNode_t *to; + size_t numDependencies; +} cudaGraphAddDependencies_v10000_params; + +typedef struct cudaGraphRemoveDependencies_v10000_params_st { + cudaGraph_t graph; + const cudaGraphNode_t *from; + const cudaGraphNode_t *to; + size_t numDependencies; +} cudaGraphRemoveDependencies_v10000_params; + +typedef struct cudaGraphDestroyNode_v10000_params_st { + cudaGraphNode_t node; +} cudaGraphDestroyNode_v10000_params; + +typedef struct cudaGraphInstantiate_v12000_params_st { + cudaGraphExec_t *pGraphExec; + cudaGraph_t graph; + unsigned long long flags; +} cudaGraphInstantiate_v12000_params; + +typedef struct cudaGraphInstantiateWithFlags_v11040_params_st { + cudaGraphExec_t *pGraphExec; + cudaGraph_t graph; + unsigned long long flags; +} cudaGraphInstantiateWithFlags_v11040_params; + +typedef struct cudaGraphInstantiateWithParams_ptsz_v12000_params_st { + cudaGraphExec_t *pGraphExec; + cudaGraph_t graph; + cudaGraphInstantiateParams *instantiateParams; +} cudaGraphInstantiateWithParams_ptsz_v12000_params; + +typedef struct cudaGraphExecGetFlags_v12000_params_st { + cudaGraphExec_t graphExec; + unsigned long long *flags; +} cudaGraphExecGetFlags_v12000_params; + +typedef struct cudaGraphExecKernelNodeSetParams_v10010_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t node; + const struct cudaKernelNodeParams *pNodeParams; +} cudaGraphExecKernelNodeSetParams_v10010_params; + +typedef struct cudaGraphExecMemcpyNodeSetParams_v10020_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t node; + const struct cudaMemcpy3DParms *pNodeParams; +} cudaGraphExecMemcpyNodeSetParams_v10020_params; + +typedef struct cudaGraphExecMemcpyNodeSetParamsToSymbol_v11010_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t node; + const void *symbol; + const void *src; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; +} cudaGraphExecMemcpyNodeSetParamsToSymbol_v11010_params; + +typedef struct cudaGraphExecMemcpyNodeSetParamsFromSymbol_v11010_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t node; + void *dst; + const void *symbol; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; +} cudaGraphExecMemcpyNodeSetParamsFromSymbol_v11010_params; + +typedef struct cudaGraphExecMemcpyNodeSetParams1D_v11010_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t node; + void *dst; + const void *src; + size_t count; + enum cudaMemcpyKind kind; +} cudaGraphExecMemcpyNodeSetParams1D_v11010_params; + +typedef struct cudaGraphExecMemsetNodeSetParams_v10020_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t node; + const struct cudaMemsetParams *pNodeParams; +} cudaGraphExecMemsetNodeSetParams_v10020_params; + +typedef struct cudaGraphExecHostNodeSetParams_v10020_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t node; + const struct cudaHostNodeParams *pNodeParams; +} cudaGraphExecHostNodeSetParams_v10020_params; + +typedef struct cudaGraphExecChildGraphNodeSetParams_v11010_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t node; + cudaGraph_t childGraph; +} cudaGraphExecChildGraphNodeSetParams_v11010_params; + +typedef struct cudaGraphExecEventRecordNodeSetEvent_v11010_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t hNode; + cudaEvent_t event; +} cudaGraphExecEventRecordNodeSetEvent_v11010_params; + +typedef struct cudaGraphExecEventWaitNodeSetEvent_v11010_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t hNode; + cudaEvent_t event; +} cudaGraphExecEventWaitNodeSetEvent_v11010_params; + +typedef struct cudaGraphExecExternalSemaphoresSignalNodeSetParams_v11020_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t hNode; + const struct cudaExternalSemaphoreSignalNodeParams *nodeParams; +} cudaGraphExecExternalSemaphoresSignalNodeSetParams_v11020_params; + +typedef struct cudaGraphExecExternalSemaphoresWaitNodeSetParams_v11020_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t hNode; + const struct cudaExternalSemaphoreWaitNodeParams *nodeParams; +} cudaGraphExecExternalSemaphoresWaitNodeSetParams_v11020_params; + +typedef struct cudaGraphNodeSetEnabled_v11060_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t hNode; + unsigned int isEnabled; +} cudaGraphNodeSetEnabled_v11060_params; + +typedef struct cudaGraphNodeGetEnabled_v11060_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t hNode; + unsigned int *isEnabled; +} cudaGraphNodeGetEnabled_v11060_params; + +typedef struct cudaGraphExecUpdate_v10020_params_st { + cudaGraphExec_t hGraphExec; + cudaGraph_t hGraph; + cudaGraphExecUpdateResultInfo *resultInfo; +} cudaGraphExecUpdate_v10020_params; + +typedef struct cudaGraphUpload_ptsz_v10000_params_st { + cudaGraphExec_t graphExec; + cudaStream_t stream; +} cudaGraphUpload_ptsz_v10000_params; + +typedef struct cudaGraphLaunch_ptsz_v10000_params_st { + cudaGraphExec_t graphExec; + cudaStream_t stream; +} cudaGraphLaunch_ptsz_v10000_params; + +typedef struct cudaGraphExecDestroy_v10000_params_st { + cudaGraphExec_t graphExec; +} cudaGraphExecDestroy_v10000_params; + +typedef struct cudaGraphDestroy_v10000_params_st { + cudaGraph_t graph; +} cudaGraphDestroy_v10000_params; + +typedef struct cudaGraphDebugDotPrint_v11030_params_st { + cudaGraph_t graph; + const char *path; + unsigned int flags; +} cudaGraphDebugDotPrint_v11030_params; + +typedef struct cudaUserObjectCreate_v11030_params_st { + cudaUserObject_t *object_out; + void *ptr; + cudaHostFn_t destroy; + unsigned int initialRefcount; + unsigned int flags; +} cudaUserObjectCreate_v11030_params; + +typedef struct cudaUserObjectRetain_v11030_params_st { + cudaUserObject_t object; + unsigned int count; +} cudaUserObjectRetain_v11030_params; + +typedef struct cudaUserObjectRelease_v11030_params_st { + cudaUserObject_t object; + unsigned int count; +} cudaUserObjectRelease_v11030_params; + +typedef struct cudaGraphRetainUserObject_v11030_params_st { + cudaGraph_t graph; + cudaUserObject_t object; + unsigned int count; + unsigned int flags; +} cudaGraphRetainUserObject_v11030_params; + +typedef struct cudaGraphReleaseUserObject_v11030_params_st { + cudaGraph_t graph; + cudaUserObject_t object; + unsigned int count; +} cudaGraphReleaseUserObject_v11030_params; + +typedef struct cudaGetDriverEntryPoint_ptsz_v11030_params_st { + const char *symbol; + void **funcPtr; + unsigned long long flags; + enum cudaDriverEntryPointQueryResult *driverStatus; +} cudaGetDriverEntryPoint_ptsz_v11030_params; + +typedef struct cudaGetFuncBySymbol_v11000_params_st { + cudaFunction_t *functionPtr; + const void *symbolPtr; +} cudaGetFuncBySymbol_v11000_params; + +typedef struct cudaGetKernel_v12000_params_st { + cudaKernel_t *kernelPtr; + const void *entryFuncAddr; +} cudaGetKernel_v12000_params; + +typedef struct cudaMemcpy_v3020_params_st { + void *dst; + const void *src; + size_t count; + enum cudaMemcpyKind kind; +} cudaMemcpy_v3020_params; + +typedef struct cudaMemcpyToSymbol_v3020_params_st { + const void *symbol; + const void *src; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; +} cudaMemcpyToSymbol_v3020_params; + +typedef struct cudaMemcpyFromSymbol_v3020_params_st { + void *dst; + const void *symbol; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; +} cudaMemcpyFromSymbol_v3020_params; + +typedef struct cudaMemcpy2D_v3020_params_st { + void *dst; + size_t dpitch; + const void *src; + size_t spitch; + size_t width; + size_t height; + enum cudaMemcpyKind kind; +} cudaMemcpy2D_v3020_params; + +typedef struct cudaMemcpyToArray_v3020_params_st { + cudaArray_t dst; + size_t wOffset; + size_t hOffset; + const void *src; + size_t count; + enum cudaMemcpyKind kind; +} cudaMemcpyToArray_v3020_params; + +typedef struct cudaMemcpy2DToArray_v3020_params_st { + cudaArray_t dst; + size_t wOffset; + size_t hOffset; + const void *src; + size_t spitch; + size_t width; + size_t height; + enum cudaMemcpyKind kind; +} cudaMemcpy2DToArray_v3020_params; + +typedef struct cudaMemcpyFromArray_v3020_params_st { + void *dst; + cudaArray_const_t src; + size_t wOffset; + size_t hOffset; + size_t count; + enum cudaMemcpyKind kind; +} cudaMemcpyFromArray_v3020_params; + +typedef struct cudaMemcpy2DFromArray_v3020_params_st { + void *dst; + size_t dpitch; + cudaArray_const_t src; + size_t wOffset; + size_t hOffset; + size_t width; + size_t height; + enum cudaMemcpyKind kind; +} cudaMemcpy2DFromArray_v3020_params; + +typedef struct cudaMemcpyArrayToArray_v3020_params_st { + cudaArray_t dst; + size_t wOffsetDst; + size_t hOffsetDst; + cudaArray_const_t src; + size_t wOffsetSrc; + size_t hOffsetSrc; + size_t count; + enum cudaMemcpyKind kind; +} cudaMemcpyArrayToArray_v3020_params; + +typedef struct cudaMemcpy2DArrayToArray_v3020_params_st { + cudaArray_t dst; + size_t wOffsetDst; + size_t hOffsetDst; + cudaArray_const_t src; + size_t wOffsetSrc; + size_t hOffsetSrc; + size_t width; + size_t height; + enum cudaMemcpyKind kind; +} cudaMemcpy2DArrayToArray_v3020_params; + +typedef struct cudaMemcpy3D_v3020_params_st { + const struct cudaMemcpy3DParms *p; +} cudaMemcpy3D_v3020_params; + +typedef struct cudaMemcpy3DPeer_v4000_params_st { + const struct cudaMemcpy3DPeerParms *p; +} cudaMemcpy3DPeer_v4000_params; + +typedef struct cudaMemset_v3020_params_st { + void *devPtr; + int value; + size_t count; +} cudaMemset_v3020_params; + +typedef struct cudaMemset2D_v3020_params_st { + void *devPtr; + size_t pitch; + int value; + size_t width; + size_t height; +} cudaMemset2D_v3020_params; + +typedef struct cudaMemset3D_v3020_params_st { + struct cudaPitchedPtr pitchedDevPtr; + int value; + struct cudaExtent extent; +} cudaMemset3D_v3020_params; + +typedef struct cudaMemcpyAsync_v3020_params_st { + void *dst; + const void *src; + size_t count; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpyAsync_v3020_params; + +typedef struct cudaMemcpyToSymbolAsync_v3020_params_st { + const void *symbol; + const void *src; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpyToSymbolAsync_v3020_params; + +typedef struct cudaMemcpyFromSymbolAsync_v3020_params_st { + void *dst; + const void *symbol; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpyFromSymbolAsync_v3020_params; + +typedef struct cudaMemcpy2DAsync_v3020_params_st { + void *dst; + size_t dpitch; + const void *src; + size_t spitch; + size_t width; + size_t height; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpy2DAsync_v3020_params; + +typedef struct cudaMemcpyToArrayAsync_v3020_params_st { + cudaArray_t dst; + size_t wOffset; + size_t hOffset; + const void *src; + size_t count; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpyToArrayAsync_v3020_params; + +typedef struct cudaMemcpy2DToArrayAsync_v3020_params_st { + cudaArray_t dst; + size_t wOffset; + size_t hOffset; + const void *src; + size_t spitch; + size_t width; + size_t height; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpy2DToArrayAsync_v3020_params; + +typedef struct cudaMemcpyFromArrayAsync_v3020_params_st { + void *dst; + cudaArray_const_t src; + size_t wOffset; + size_t hOffset; + size_t count; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpyFromArrayAsync_v3020_params; + +typedef struct cudaMemcpy2DFromArrayAsync_v3020_params_st { + void *dst; + size_t dpitch; + cudaArray_const_t src; + size_t wOffset; + size_t hOffset; + size_t width; + size_t height; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpy2DFromArrayAsync_v3020_params; + +typedef struct cudaMemcpy3DAsync_v3020_params_st { + const struct cudaMemcpy3DParms *p; + cudaStream_t stream; +} cudaMemcpy3DAsync_v3020_params; + +typedef struct cudaMemcpy3DPeerAsync_v4000_params_st { + const struct cudaMemcpy3DPeerParms *p; + cudaStream_t stream; +} cudaMemcpy3DPeerAsync_v4000_params; + +typedef struct cudaMemsetAsync_v3020_params_st { + void *devPtr; + int value; + size_t count; + cudaStream_t stream; +} cudaMemsetAsync_v3020_params; + +typedef struct cudaMemset2DAsync_v3020_params_st { + void *devPtr; + size_t pitch; + int value; + size_t width; + size_t height; + cudaStream_t stream; +} cudaMemset2DAsync_v3020_params; + +typedef struct cudaMemset3DAsync_v3020_params_st { + struct cudaPitchedPtr pitchedDevPtr; + int value; + struct cudaExtent extent; + cudaStream_t stream; +} cudaMemset3DAsync_v3020_params; + +typedef struct cudaStreamQuery_v3020_params_st { + cudaStream_t stream; +} cudaStreamQuery_v3020_params; + +typedef struct cudaStreamGetFlags_v5050_params_st { + cudaStream_t hStream; + unsigned int *flags; +} cudaStreamGetFlags_v5050_params; + +typedef struct cudaStreamGetId_v12000_params_st { + cudaStream_t hStream; + unsigned long long *streamId; +} cudaStreamGetId_v12000_params; + +typedef struct cudaStreamGetPriority_v5050_params_st { + cudaStream_t hStream; + int *priority; +} cudaStreamGetPriority_v5050_params; + +typedef struct cudaEventRecord_v3020_params_st { + cudaEvent_t event; + cudaStream_t stream; +} cudaEventRecord_v3020_params; + +typedef struct cudaEventRecordWithFlags_v11010_params_st { + cudaEvent_t event; + cudaStream_t stream; + unsigned int flags; +} cudaEventRecordWithFlags_v11010_params; + +typedef struct cudaStreamWaitEvent_v3020_params_st { + cudaStream_t stream; + cudaEvent_t event; + unsigned int flags; +} cudaStreamWaitEvent_v3020_params; + +typedef struct cudaStreamAddCallback_v5000_params_st { + cudaStream_t stream; + cudaStreamCallback_t callback; + void *userData; + unsigned int flags; +} cudaStreamAddCallback_v5000_params; + +typedef struct cudaStreamAttachMemAsync_v6000_params_st { + cudaStream_t stream; + void *devPtr; + size_t length; + unsigned int flags; +} cudaStreamAttachMemAsync_v6000_params; + +typedef struct cudaStreamSynchronize_v3020_params_st { + cudaStream_t stream; +} cudaStreamSynchronize_v3020_params; + +typedef struct cudaLaunchKernel_v7000_params_st { + const void *func; + dim3 gridDim; + dim3 blockDim; + void **args; + size_t sharedMem; + cudaStream_t stream; +} cudaLaunchKernel_v7000_params; + +typedef struct cudaLaunchKernelExC_v11060_params_st { + const cudaLaunchConfig_t *config; + const void *func; + void **args; +} cudaLaunchKernelExC_v11060_params; + +typedef struct cudaLaunchCooperativeKernel_v9000_params_st { + const void *func; + dim3 gridDim; + dim3 blockDim; + void **args; + size_t sharedMem; + cudaStream_t stream; +} cudaLaunchCooperativeKernel_v9000_params; + +typedef struct cudaLaunchHostFunc_v10000_params_st { + cudaStream_t stream; + cudaHostFn_t fn; + void *userData; +} cudaLaunchHostFunc_v10000_params; + +typedef struct cudaMemPrefetchAsync_v8000_params_st { + const void *devPtr; + size_t count; + int dstDevice; + cudaStream_t stream; +} cudaMemPrefetchAsync_v8000_params; + +typedef struct cudaSignalExternalSemaphoresAsync_v10000_params_st { + const cudaExternalSemaphore_t *extSemArray; + const struct cudaExternalSemaphoreSignalParams_v1 *paramsArray; + unsigned int numExtSems; + cudaStream_t stream; +} cudaSignalExternalSemaphoresAsync_v10000_params; + +typedef struct cudaSignalExternalSemaphoresAsync_ptsz_v10000_params_st { + const cudaExternalSemaphore_t *extSemArray; + const struct cudaExternalSemaphoreSignalParams_v1 *paramsArray; + unsigned int numExtSems; + cudaStream_t stream; +} cudaSignalExternalSemaphoresAsync_ptsz_v10000_params; + +typedef struct cudaSignalExternalSemaphoresAsync_v2_v11020_params_st { + const cudaExternalSemaphore_t *extSemArray; + const struct cudaExternalSemaphoreSignalParams *paramsArray; + unsigned int numExtSems; + cudaStream_t stream; +} cudaSignalExternalSemaphoresAsync_v2_v11020_params; + +typedef struct cudaWaitExternalSemaphoresAsync_v10000_params_st { + const cudaExternalSemaphore_t *extSemArray; + const struct cudaExternalSemaphoreWaitParams_v1 *paramsArray; + unsigned int numExtSems; + cudaStream_t stream; +} cudaWaitExternalSemaphoresAsync_v10000_params; + +typedef struct cudaWaitExternalSemaphoresAsync_ptsz_v10000_params_st { + const cudaExternalSemaphore_t *extSemArray; + const struct cudaExternalSemaphoreWaitParams_v1 *paramsArray; + unsigned int numExtSems; + cudaStream_t stream; +} cudaWaitExternalSemaphoresAsync_ptsz_v10000_params; + +typedef struct cudaWaitExternalSemaphoresAsync_v2_v11020_params_st { + const cudaExternalSemaphore_t *extSemArray; + const struct cudaExternalSemaphoreWaitParams *paramsArray; + unsigned int numExtSems; + cudaStream_t stream; +} cudaWaitExternalSemaphoresAsync_v2_v11020_params; + +typedef struct cudaGraphInstantiateWithParams_v12000_params_st { + cudaGraphExec_t *pGraphExec; + cudaGraph_t graph; + cudaGraphInstantiateParams *instantiateParams; +} cudaGraphInstantiateWithParams_v12000_params; + +typedef struct cudaGraphUpload_v10000_params_st { + cudaGraphExec_t graphExec; + cudaStream_t stream; +} cudaGraphUpload_v10000_params; + +typedef struct cudaGraphLaunch_v10000_params_st { + cudaGraphExec_t graphExec; + cudaStream_t stream; +} cudaGraphLaunch_v10000_params; + +typedef struct cudaStreamBeginCapture_v10000_params_st { + cudaStream_t stream; + enum cudaStreamCaptureMode mode; +} cudaStreamBeginCapture_v10000_params; + +typedef struct cudaStreamEndCapture_v10000_params_st { + cudaStream_t stream; + cudaGraph_t *pGraph; +} cudaStreamEndCapture_v10000_params; + +typedef struct cudaStreamIsCapturing_v10000_params_st { + cudaStream_t stream; + enum cudaStreamCaptureStatus *pCaptureStatus; +} cudaStreamIsCapturing_v10000_params; + +typedef struct cudaStreamGetCaptureInfo_v10010_params_st { + cudaStream_t stream; + enum cudaStreamCaptureStatus *captureStatus_out; + unsigned long long *id_out; +} cudaStreamGetCaptureInfo_v10010_params; + +typedef struct cudaStreamGetCaptureInfo_ptsz_v10010_params_st { + cudaStream_t stream; + enum cudaStreamCaptureStatus *captureStatus_out; + unsigned long long *id_out; +} cudaStreamGetCaptureInfo_ptsz_v10010_params; + +typedef struct cudaStreamGetCaptureInfo_v2_v11030_params_st { + cudaStream_t stream; + enum cudaStreamCaptureStatus *captureStatus_out; + unsigned long long *id_out; + cudaGraph_t *graph_out; + const cudaGraphNode_t **dependencies_out; + size_t *numDependencies_out; +} cudaStreamGetCaptureInfo_v2_v11030_params; + +typedef struct cudaStreamUpdateCaptureDependencies_ptsz_v11030_params_st { + cudaStream_t stream; + cudaGraphNode_t *dependencies; + size_t numDependencies; + unsigned int flags; +} cudaStreamUpdateCaptureDependencies_ptsz_v11030_params; + +typedef struct cudaStreamCopyAttributes_v11000_params_st { + cudaStream_t dstStream; + cudaStream_t srcStream; +} cudaStreamCopyAttributes_v11000_params; + +typedef struct cudaStreamGetAttribute_v11000_params_st { + cudaStream_t stream; + cudaStreamAttrID attr; + cudaStreamAttrValue *value; +} cudaStreamGetAttribute_v11000_params; + +typedef struct cudaStreamSetAttribute_v11000_params_st { + cudaStream_t stream; + cudaStreamAttrID attr; + const cudaStreamAttrValue *param; +} cudaStreamSetAttribute_v11000_params; + +typedef struct cudaMallocAsync_v11020_params_st { + void **devPtr; + size_t size; + cudaStream_t hStream; +} cudaMallocAsync_v11020_params; + +typedef struct cudaFreeAsync_v11020_params_st { + void *devPtr; + cudaStream_t hStream; +} cudaFreeAsync_v11020_params; + +typedef struct cudaMallocFromPoolAsync_v11020_params_st { + void **ptr; + size_t size; + cudaMemPool_t memPool; + cudaStream_t stream; +} cudaMallocFromPoolAsync_v11020_params; + +typedef struct cudaGetDriverEntryPoint_v11030_params_st { + const char *symbol; + void **funcPtr; + unsigned long long flags; + enum cudaDriverEntryPointQueryResult *driverStatus; +} cudaGetDriverEntryPoint_v11030_params; + +typedef struct cudaGetDeviceProperties_v3020_params_st { + struct cudaDeviceProp *prop; + int device; +} cudaGetDeviceProperties_v3020_params; + +// Parameter trace structures for removed functions + + +// End of parameter trace structures diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_vdpau_interop_meta.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_vdpau_interop_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..88e79d1957925c4bbacd381e9461d5072de88f24 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_vdpau_interop_meta.h @@ -0,0 +1,38 @@ +// This file is generated. Any changes you make will be lost during the next clean build. + +// CUDA public interface, for type definitions and api function prototypes +#include "cuda_vdpau_interop.h" + +// ************************************************************************* +// Definitions of structs to hold parameters for each function +// ************************************************************************* + +// Currently used parameter trace structures +typedef struct cudaVDPAUGetDevice_v3020_params_st { + int *device; + VdpDevice vdpDevice; + VdpGetProcAddress *vdpGetProcAddress; +} cudaVDPAUGetDevice_v3020_params; + +typedef struct cudaVDPAUSetVDPAUDevice_v3020_params_st { + int device; + VdpDevice vdpDevice; + VdpGetProcAddress *vdpGetProcAddress; +} cudaVDPAUSetVDPAUDevice_v3020_params; + +typedef struct cudaGraphicsVDPAURegisterVideoSurface_v3020_params_st { + struct cudaGraphicsResource **resource; + VdpVideoSurface vdpSurface; + unsigned int flags; +} cudaGraphicsVDPAURegisterVideoSurface_v3020_params; + +typedef struct cudaGraphicsVDPAURegisterOutputSurface_v3020_params_st { + struct cudaGraphicsResource **resource; + VdpOutputSurface vdpSurface; + unsigned int flags; +} cudaGraphicsVDPAURegisterOutputSurface_v3020_params; + +// Parameter trace structures for removed functions + + +// End of parameter trace structures diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cudart_removed_meta.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cudart_removed_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..a0fc27a71bb3fc883db9fe7562eea3f28145430d --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cudart_removed_meta.h @@ -0,0 +1,162 @@ +// This file is generated. Any changes you make will be lost during the next clean build. + +// CUDA public interface, for type definitions and api function prototypes +#include "cudart_removed.h" + +// ************************************************************************* +// Definitions of structs to hold parameters for each function +// ************************************************************************* + +// Currently used parameter trace structures +typedef struct cudaStreamDestroy_v3020_params_st { + cudaStream_t stream; +} cudaStreamDestroy_v3020_params; + +typedef struct cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6000_params_st { + int *numBlocks; + const void *func; + size_t numDynamicSmemBytes; +} cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6000_params; + +typedef struct cudaConfigureCall_v3020_params_st { + dim3 gridDim; + dim3 blockDim; + size_t sharedMem __dv; + cudaStream_t stream __dv; +} cudaConfigureCall_v3020_params; + +typedef struct cudaSetupArgument_v3020_params_st { + const void *arg; + size_t size; + size_t offset; +} cudaSetupArgument_v3020_params; + +typedef struct cudaLaunch_v3020_params_st { + const void *func; +} cudaLaunch_v3020_params; + +typedef struct cudaLaunch_ptsz_v7000_params_st { + const void *func; +} cudaLaunch_ptsz_v7000_params; + +typedef struct cudaStreamSetFlags_v10200_params_st { + cudaStream_t hStream; + unsigned int flags; +} cudaStreamSetFlags_v10200_params; + +typedef struct cudaStreamSetFlags_ptsz_v10200_params_st { + cudaStream_t hStream; + unsigned int flags; +} cudaStreamSetFlags_ptsz_v10200_params; + +typedef struct cudaProfilerInitialize_v4000_params_st { + const char *configFile; + const char *outputFile; + cudaOutputMode_t outputMode; +} cudaProfilerInitialize_v4000_params; + +typedef struct cudaThreadSetLimit_v3020_params_st { + enum cudaLimit limit; + size_t value; +} cudaThreadSetLimit_v3020_params; + +typedef struct cudaThreadGetLimit_v3020_params_st { + size_t *pValue; + enum cudaLimit limit; +} cudaThreadGetLimit_v3020_params; + +typedef struct cudaThreadGetCacheConfig_v3020_params_st { + enum cudaFuncCache *pCacheConfig; +} cudaThreadGetCacheConfig_v3020_params; + +typedef struct cudaThreadSetCacheConfig_v3020_params_st { + enum cudaFuncCache cacheConfig; +} cudaThreadSetCacheConfig_v3020_params; + +typedef struct cudaSetDoubleForDevice_v3020_params_st { + double *d; +} cudaSetDoubleForDevice_v3020_params; + +typedef struct cudaSetDoubleForHost_v3020_params_st { + double *d; +} cudaSetDoubleForHost_v3020_params; + +typedef struct cudaCreateTextureObject_v2_v11080_params_st { + cudaTextureObject_t *pTexObject; + const struct cudaResourceDesc *pResDesc; + const struct cudaTextureDesc *pTexDesc; + const struct cudaResourceViewDesc *pResViewDesc; +} cudaCreateTextureObject_v2_v11080_params; + +typedef struct cudaGetTextureObjectTextureDesc_v2_v11080_params_st { + struct cudaTextureDesc *pTexDesc; + cudaTextureObject_t texObject; +} cudaGetTextureObjectTextureDesc_v2_v11080_params; + +typedef struct cudaBindTexture_v3020_params_st { + size_t *offset; + const struct textureReference *texref; + const void *devPtr; + const struct cudaChannelFormatDesc *desc; + size_t size __dv; +} cudaBindTexture_v3020_params; + +typedef struct cudaBindTexture2D_v3020_params_st { + size_t *offset; + const struct textureReference *texref; + const void *devPtr; + const struct cudaChannelFormatDesc *desc; + size_t width; + size_t height; + size_t pitch; +} cudaBindTexture2D_v3020_params; + +typedef struct cudaBindTextureToArray_v3020_params_st { + const struct textureReference *texref; + cudaArray_const_t array; + const struct cudaChannelFormatDesc *desc; +} cudaBindTextureToArray_v3020_params; + +typedef struct cudaBindTextureToMipmappedArray_v5000_params_st { + const struct textureReference *texref; + cudaMipmappedArray_const_t mipmappedArray; + const struct cudaChannelFormatDesc *desc; +} cudaBindTextureToMipmappedArray_v5000_params; + +typedef struct cudaUnbindTexture_v3020_params_st { + const struct textureReference *texref; +} cudaUnbindTexture_v3020_params; + +typedef struct cudaGetTextureAlignmentOffset_v3020_params_st { + size_t *offset; + const struct textureReference *texref; +} cudaGetTextureAlignmentOffset_v3020_params; + +typedef struct cudaGetTextureReference_v3020_params_st { + const struct textureReference **texref; + const void *symbol; +} cudaGetTextureReference_v3020_params; + +typedef struct cudaBindSurfaceToArray_v3020_params_st { + const struct surfaceReference *surfref; + cudaArray_const_t array; + const struct cudaChannelFormatDesc *desc; +} cudaBindSurfaceToArray_v3020_params; + +typedef struct cudaGetSurfaceReference_v3020_params_st { + const struct surfaceReference **surfref; + const void *symbol; +} cudaGetSurfaceReference_v3020_params; + +typedef struct cudaGraphInstantiate_v10000_params_st { + cudaGraphExec_t *pGraphExec; + cudaGraph_t graph; + cudaGraphNode_t *pErrorNode; + char *pLogBuffer; + size_t bufferSize; +} cudaGraphInstantiate_v10000_params; + +// Parameter trace structures for removed functions + + +// End of parameter trace structures diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_nvtx_meta.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_nvtx_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..ed8877e21f0651fe1564151090850694eb495cfb --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_nvtx_meta.h @@ -0,0 +1,247 @@ +/* + * Copyright 2013-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility push(default) +#endif + +// ************************************************************************* +// Definitions of structs to hold parameters for each function +// ************************************************************************* + +typedef struct nvtxMarkEx_params_st { + const nvtxEventAttributes_t* eventAttrib; +} nvtxMarkEx_params; + +typedef struct nvtxMarkA_params_st { + const char* message; +} nvtxMarkA_params; + +typedef struct nvtxMarkW_params_st { + const wchar_t* message; +} nvtxMarkW_params; + +typedef struct nvtxRangeStartEx_params_st { + const nvtxEventAttributes_t* eventAttrib; +} nvtxRangeStartEx_params; + +typedef struct nvtxRangeStartA_params_st { + const char* message; +} nvtxRangeStartA_params; + +typedef struct nvtxRangeStartW_params_st { + const wchar_t* message; +} nvtxRangeStartW_params; + +typedef struct nvtxRangeEnd_params_st { + nvtxRangeId_t id; +} nvtxRangeEnd_params; + +typedef struct nvtxRangePushEx_params_st { + const nvtxEventAttributes_t* eventAttrib; +} nvtxRangePushEx_params; + +typedef struct nvtxRangePushA_params_st { + const char* message; +} nvtxRangePushA_params; + +typedef struct nvtxRangePushW_params_st { + const wchar_t* message; +} nvtxRangePushW_params; + +typedef struct nvtxRangePop_params_st { + /* WAR: Windows compiler doesn't allow empty structs */ + /* This field shouldn't be used */ + void *dummy; +} nvtxRangePop_params; + +typedef struct nvtxNameCategoryA_params_st { + uint32_t category; + const char* name; +} nvtxNameCategoryA_params; + +typedef struct nvtxNameCategoryW_params_st { + uint32_t category; + const wchar_t* name; +} nvtxNameCategoryW_params; + +typedef struct nvtxNameOsThreadA_params_st { + uint32_t threadId; + const char* name; +} nvtxNameOsThreadA_params; + +typedef struct nvtxNameOsThreadW_params_st { + uint32_t threadId; + const wchar_t* name; +} nvtxNameOsThreadW_params; + +typedef struct nvtxNameCuDeviceA_params_st { + CUdevice device; + const char* name; +} nvtxNameCuDeviceA_params; + +typedef struct nvtxNameCuDeviceW_params_st { + CUdevice device; + const wchar_t* name; +} nvtxNameCuDeviceW_params; + +typedef struct nvtxNameCuContextA_params_st { + CUcontext context; + const char* name; +} nvtxNameCuContextA_params; + +typedef struct nvtxNameCuContextW_params_st { + CUcontext context; + const wchar_t* name; +} nvtxNameCuContextW_params; + +typedef struct nvtxNameCuStreamA_params_st { + CUstream stream; + const char* name; +} nvtxNameCuStreamA_params; + +typedef struct nvtxNameCuStreamW_params_st { + CUstream stream; + const wchar_t* name; +} nvtxNameCuStreamW_params; + +typedef struct nvtxNameCuEventA_params_st { + CUevent event; + const char* name; +} nvtxNameCuEventA_params; + +typedef struct nvtxNameCuEventW_params_st { + CUevent event; + const wchar_t* name; +} nvtxNameCuEventW_params; + +typedef struct nvtxNameCudaDeviceA_params_st { + int device; + const char* name; +} nvtxNameCudaDeviceA_params; + +typedef struct nvtxNameCudaDeviceW_params_st { + int device; + const wchar_t* name; +} nvtxNameCudaDeviceW_params; + +typedef struct nvtxNameCudaStreamA_params_st { + cudaStream_t stream; + const char* name; +} nvtxNameCudaStreamA_params; + +typedef struct nvtxNameCudaStreamW_params_st { + cudaStream_t stream; + const wchar_t* name; +} nvtxNameCudaStreamW_params; + +typedef struct nvtxNameCudaEventA_params_st { + cudaEvent_t event; + const char* name; +} nvtxNameCudaEventA_params; + +typedef struct nvtxNameCudaEventW_params_st { + cudaEvent_t event; + const wchar_t* name; +} nvtxNameCudaEventW_params; + +typedef struct nvtxDomainCreateA_params_st { + const char* name; +} nvtxDomainCreateA_params; + +typedef struct nvtxDomainDestroy_params_st { + nvtxDomainHandle_t domain; +} nvtxDomainDestroy_params; + +typedef struct nvtxDomainMarkEx_params_st { + nvtxDomainHandle_t domain; + nvtxMarkEx_params core; +} nvtxDomainMarkEx_params; + +typedef struct nvtxDomainRangeStartEx_params_st { + nvtxDomainHandle_t domain; + nvtxRangeStartEx_params core; +} nvtxDomainRangeStartEx_params; + +typedef struct nvtxDomainRangeEnd_params_st { + nvtxDomainHandle_t domain; + nvtxRangeEnd_params core; +} nvtxDomainRangeEnd_params; + +typedef struct nvtxDomainRangePushEx_params_st { + nvtxDomainHandle_t domain; + nvtxRangePushEx_params core; +} nvtxDomainRangePushEx_params; + +typedef struct nvtxDomainRangePop_params_st { + nvtxDomainHandle_t domain; +} nvtxDomainRangePop_params; + +typedef struct nvtxSyncUserCreate_params_st { + nvtxDomainHandle_t domain; + const nvtxSyncUserAttributes_t* attribs; +} nvtxSyncUserCreate_params; + +typedef struct nvtxSyncUserCommon_params_st { + nvtxSyncUser_t handle; +} nvtxSyncUserCommon_params; + +typedef struct nvtxDomainRegisterStringA_params_st { + nvtxDomainHandle_t domain; + const char* string; +} nvtxDomainRegisterStringA_params; + +typedef struct nvtxDomainRegisterStringW_params_st { + nvtxDomainHandle_t domain; + const char* string; +} nvtxDomainRegisterStringW_params; + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility pop +#endif diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_common.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_common.h new file mode 100644 index 0000000000000000000000000000000000000000..fe16c9ebd55c9b8ee112e17889a7dc2b3ca7d68e --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_common.h @@ -0,0 +1,273 @@ +#ifndef NVPERF_COMMON_H +#define NVPERF_COMMON_H + +/* + * Copyright 2014-2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO USER: + * + * This source code is subject to NVIDIA ownership rights under U.S. and + * international Copyright laws. + * + * This software and the information contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions + * of a form of NVIDIA software license agreement. + * + * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE + * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR + * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH + * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, + * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS + * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE + * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE + * OR PERFORMANCE OF THIS SOURCE CODE. + * + * U.S. Government End Users. This source code is a "commercial item" as + * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of + * "commercial computer software" and "commercial computer software + * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) + * and is provided to the U.S. Government only as a commercial end item. + * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through + * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the + * source code with only those rights set forth herein. + * + * Any use of this source code in individual and commercial software must + * include, in the user documentation and internal comments to the code, + * the above Disclaimer and U.S. Government End Users Notice. + */ + +#include +#include + +#if defined(__GNUC__) && defined(NVPA_SHARED_LIB) + #pragma GCC visibility push(default) + #if !defined(NVPW_LOCAL) + #define NVPW_LOCAL __attribute__ ((visibility ("hidden"))) + #endif +#else + #if !defined(NVPW_LOCAL) + #define NVPW_LOCAL + #endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @file nvperf_common.h + */ + +#ifndef NVPERF_NVPA_STATUS_DEFINED +#define NVPERF_NVPA_STATUS_DEFINED + + /// Error codes. + typedef enum NVPA_Status + { + /// Success + NVPA_STATUS_SUCCESS = 0, + /// Generic error. + NVPA_STATUS_ERROR = 1, + /// Internal error. Please file a bug! + NVPA_STATUS_INTERNAL_ERROR = 2, + /// NVPW_InitializeTarget() has not been called yet. + NVPA_STATUS_NOT_INITIALIZED = 3, + /// The NvPerf DLL/DSO could not be loaded during NVPW_Initialize*. + NVPA_STATUS_NOT_LOADED = 4, + /// The function was not found in this version of the NvPerf DLL/DSO. + NVPA_STATUS_FUNCTION_NOT_FOUND = 5, + /// The request was intentionally not supported. + NVPA_STATUS_NOT_SUPPORTED = 6, + /// The request was not implemented by this version. + NVPA_STATUS_NOT_IMPLEMENTED = 7, + /// Invalid argument. + NVPA_STATUS_INVALID_ARGUMENT = 8, + /// UNUSED + NVPA_STATUS_INVALID_METRIC_ID = 9, + /// No driver has been loaded via NVPW_*_LoadDriver(). + NVPA_STATUS_DRIVER_NOT_LOADED = 10, + /// Failed memory allocation. + NVPA_STATUS_OUT_OF_MEMORY = 11, + /// UNUSED + NVPA_STATUS_INVALID_THREAD_STATE = 12, + /// UNUSED + NVPA_STATUS_FAILED_CONTEXT_ALLOC = 13, + /// The specified GPU is not supported. + NVPA_STATUS_UNSUPPORTED_GPU = 14, + /// The installed NVIDIA driver is too old. + NVPA_STATUS_INSUFFICIENT_DRIVER_VERSION = 15, + /// UNUSED + NVPA_STATUS_OBJECT_NOT_REGISTERED = 16, + /// Profiling permission not granted; see https://developer.nvidia.com/nvidia-development-tools-solutions- + /// ERR_NVGPUCTRPERM-permission-issue-performance-counters + NVPA_STATUS_INSUFFICIENT_PRIVILEGE = 17, + /// UNUSED + NVPA_STATUS_INVALID_CONTEXT_STATE = 18, + /// UNUSED + NVPA_STATUS_INVALID_OBJECT_STATE = 19, + /// The request could not be fulfilled because a system resource is already in use. + NVPA_STATUS_RESOURCE_UNAVAILABLE = 20, + /// UNUSED + NVPA_STATUS_DRIVER_LOADED_TOO_LATE = 21, + /// The provided buffer is not large enough. + NVPA_STATUS_INSUFFICIENT_SPACE = 22, + /// UNUSED + NVPA_STATUS_OBJECT_MISMATCH = 23, + /// Virtualized GPU (vGPU) is not supported. + NVPA_STATUS_VIRTUALIZED_DEVICE_NOT_SUPPORTED = 24, + /// Profiling permission was not granted or the device was disabled. + NVPA_STATUS_PROFILING_NOT_ALLOWED = 25, + NVPA_STATUS__COUNT + } NVPA_Status; + + +#endif // NVPERF_NVPA_STATUS_DEFINED + + +#ifndef NVPERF_NVPA_ACTIVITY_KIND_DEFINED +#define NVPERF_NVPA_ACTIVITY_KIND_DEFINED + + /// The configuration's activity-kind dictates which types of data may be collected. + typedef enum NVPA_ActivityKind + { + /// Invalid value. + NVPA_ACTIVITY_KIND_INVALID = 0, + /// A workload-centric activity for serialized and pipelined collection. + /// + /// Profiler is capable of collecting both serialized and pipelined metrics. The library introduces any + /// synchronization required to collect serialized metrics. + NVPA_ACTIVITY_KIND_PROFILER, + /// A realtime activity for sampling counters from the CPU or GPU. + NVPA_ACTIVITY_KIND_REALTIME_SAMPLED, + /// A realtime activity for profiling counters from the CPU or GPU without CPU/GPU synchronizations. + NVPA_ACTIVITY_KIND_REALTIME_PROFILER, + NVPA_ACTIVITY_KIND__COUNT + } NVPA_ActivityKind; + + +#endif // NVPERF_NVPA_ACTIVITY_KIND_DEFINED + + +#ifndef NVPERF_NVPA_BOOL_DEFINED +#define NVPERF_NVPA_BOOL_DEFINED + /// The type used for boolean values. + typedef uint8_t NVPA_Bool; +#endif // NVPERF_NVPA_BOOL_DEFINED + +#ifndef NVPA_STRUCT_SIZE +#define NVPA_STRUCT_SIZE(type_, lastfield_) (offsetof(type_, lastfield_) + sizeof(((type_*)0)->lastfield_)) +#endif // NVPA_STRUCT_SIZE + +#ifndef NVPW_FIELD_EXISTS +#define NVPW_FIELD_EXISTS(pParams_, name_) \ + ((pParams_)->structSize >= (size_t)((const uint8_t*)(&(pParams_)->name_) + sizeof(pParams_)->name_ - (const uint8_t*)(pParams_))) +#endif // NVPW_FIELD_EXISTS + + +#ifndef NVPERF_NVPA_GETPROCADDRESS_DEFINED +#define NVPERF_NVPA_GETPROCADDRESS_DEFINED + +typedef NVPA_Status (*NVPA_GenericFn)(void); + + + /// + /// Gets the address of an NvPerf API function. + /// + /// \return A function pointer to the function, or NULL if the function is not available. + /// + /// \param pFunctionName [in] Name of the function to retrieve. + NVPA_GenericFn NVPA_GetProcAddress(const char* pFunctionName); + +#endif + +#ifndef NVPERF_NVPW_SETLIBRARYLOADPATHS_DEFINED +#define NVPERF_NVPW_SETLIBRARYLOADPATHS_DEFINED + + + typedef struct NVPW_SetLibraryLoadPaths_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] number of paths in ppPaths + size_t numPaths; + /// [in] array of null-terminated paths + const char** ppPaths; + } NVPW_SetLibraryLoadPaths_Params; +#define NVPW_SetLibraryLoadPaths_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_SetLibraryLoadPaths_Params, ppPaths) + + /// Sets library search path for \ref NVPW_InitializeHost() and \ref NVPW_InitializeTarget(). + /// \ref NVPW_InitializeHost() and \ref NVPW_InitializeTarget load the NvPerf DLL/DSO. This function sets + /// ordered paths that will be searched with the LoadLibrary() or dlopen() call. + /// If load paths are set by this function, the default set of load paths + /// will not be attempted. + /// Each path must point at a directory (not a file name). + /// This function is not thread-safe. + /// Example Usage: + /// \code + /// const char* paths[] = { + /// "path1", "path2", etc + /// }; + /// NVPW_SetLibraryLoadPaths_Params params{NVPW_SetLibraryLoadPaths_Params_STRUCT_SIZE}; + /// params.numPaths = sizeof(paths)/sizeof(paths[0]); + /// params.ppPaths = paths; + /// NVPW_SetLibraryLoadPaths(¶ms); + /// NVPW_InitializeHost(); + /// params.numPaths = 0; + /// params.ppPaths = NULL; + /// NVPW_SetLibraryLoadPaths(¶ms); + /// \endcode + NVPA_Status NVPW_SetLibraryLoadPaths(NVPW_SetLibraryLoadPaths_Params* pParams); + + typedef struct NVPW_SetLibraryLoadPathsW_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] number of paths in ppwPaths + size_t numPaths; + /// [in] array of null-terminated paths + const wchar_t** ppwPaths; + } NVPW_SetLibraryLoadPathsW_Params; +#define NVPW_SetLibraryLoadPathsW_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_SetLibraryLoadPathsW_Params, ppwPaths) + + /// Sets library search path for \ref NVPW_InitializeHost() and \ref NVPW_InitializeTarget(). + /// \ref NVPW_InitializeHost() and \ref NVPW_InitializeTarget load the NvPerf DLL/DSO. This function sets + /// ordered paths that will be searched with the LoadLibrary() or dlopen() call. + /// If load paths are set by this function, the default set of load paths + /// will not be attempted. + /// Each path must point at a directory (not a file name). + /// This function is not thread-safe. + /// Example Usage: + /// \code + /// const wchar_t* wpaths[] = { + /// L"path1", L"path2", etc + /// }; + /// NVPW_SetLibraryLoadPathsW_Params params{NVPW_SetLibraryLoadPathsW_Params_STRUCT_SIZE}; + /// params.numPaths = sizeof(wpaths)/sizeof(wpaths[0]); + /// params.ppwPaths = wpaths; + /// NVPW_SetLibraryLoadPathsW(¶ms); + /// NVPW_InitializeHost(); + /// params.numPaths = 0; + /// params.ppwPaths = NULL; + /// NVPW_SetLibraryLoadPathsW(¶ms); + /// \endcode + NVPA_Status NVPW_SetLibraryLoadPathsW(NVPW_SetLibraryLoadPathsW_Params* pParams); + +#endif + + + +#ifdef __cplusplus +} // extern "C" +#endif + +#if defined(__GNUC__) && defined(NVPA_SHARED_LIB) + #pragma GCC visibility pop +#endif + +#endif // NVPERF_COMMON_H diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_cuda_host.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_cuda_host.h new file mode 100644 index 0000000000000000000000000000000000000000..dc8413768ddfd03fa30aa98e782121e8622a8820 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_cuda_host.h @@ -0,0 +1,197 @@ +#ifndef NVPERF_CUDA_HOST_H +#define NVPERF_CUDA_HOST_H + +/* + * Copyright 2014-2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO USER: + * + * This source code is subject to NVIDIA ownership rights under U.S. and + * international Copyright laws. + * + * This software and the information contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions + * of a form of NVIDIA software license agreement. + * + * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE + * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR + * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH + * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, + * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS + * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE + * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE + * OR PERFORMANCE OF THIS SOURCE CODE. + * + * U.S. Government End Users. This source code is a "commercial item" as + * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of + * "commercial computer software" and "commercial computer software + * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) + * and is provided to the U.S. Government only as a commercial end item. + * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through + * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the + * source code with only those rights set forth herein. + * + * Any use of this source code in individual and commercial software must + * include, in the user documentation and internal comments to the code, + * the above Disclaimer and U.S. Government End Users Notice. + */ + +#include +#include +#include "nvperf_common.h" +#include "nvperf_host.h" + +#if defined(__GNUC__) && defined(NVPA_SHARED_LIB) + #pragma GCC visibility push(default) + #if !defined(NVPW_LOCAL) + #define NVPW_LOCAL __attribute__ ((visibility ("hidden"))) + #endif +#else + #if !defined(NVPW_LOCAL) + #define NVPW_LOCAL + #endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @file nvperf_cuda_host.h + */ + + /// 'NVPA_MetricsContext' and its APIs are deprecated, please use 'NVPW_MetricsEvaluator' and its APIs instead. + typedef struct NVPA_MetricsContext NVPA_MetricsContext; + + typedef struct NVPW_CUDA_MetricsContext_Create_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + const char* pChipName; + /// [out] + struct NVPA_MetricsContext* pMetricsContext; + } NVPW_CUDA_MetricsContext_Create_Params; +#define NVPW_CUDA_MetricsContext_Create_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_MetricsContext_Create_Params, pMetricsContext) + + NVPA_Status NVPW_CUDA_MetricsContext_Create(NVPW_CUDA_MetricsContext_Create_Params* pParams); + + typedef struct NVPW_CUDA_RawMetricsConfig_Create_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + NVPA_ActivityKind activityKind; + /// [in] + const char* pChipName; + /// [out] new NVPA_RawMetricsConfig object + struct NVPA_RawMetricsConfig* pRawMetricsConfig; + } NVPW_CUDA_RawMetricsConfig_Create_Params; +#define NVPW_CUDA_RawMetricsConfig_Create_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_RawMetricsConfig_Create_Params, pRawMetricsConfig) + + NVPA_Status NVPW_CUDA_RawMetricsConfig_Create(NVPW_CUDA_RawMetricsConfig_Create_Params* pParams); + + typedef struct NVPW_CUDA_RawMetricsConfig_Create_V2_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + NVPA_ActivityKind activityKind; + /// [in] accepted for chips supported at the time-of-release. + const char* pChipName; + /// [in] buffer with counter availability image - required for future chip support + const uint8_t* pCounterAvailabilityImage; + /// [out] new NVPA_RawMetricsConfig object + struct NVPA_RawMetricsConfig* pRawMetricsConfig; + } NVPW_CUDA_RawMetricsConfig_Create_V2_Params; +#define NVPW_CUDA_RawMetricsConfig_Create_V2_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_RawMetricsConfig_Create_V2_Params, pRawMetricsConfig) + + /// Use either 'pChipName' or 'pCounterAvailabilityImage'. + NVPA_Status NVPW_CUDA_RawMetricsConfig_Create_V2(NVPW_CUDA_RawMetricsConfig_Create_V2_Params* pParams); + + typedef struct NVPW_CUDA_CounterDataBuilder_Create_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] accepted for chips supported at the time-of-release. + const char* pChipName; + /// [in] buffer with counter availability image - required for future chip support + const uint8_t* pCounterAvailabilityImage; + /// [out] new NVPA_CounterDataBuilder object + struct NVPA_CounterDataBuilder* pCounterDataBuilder; + } NVPW_CUDA_CounterDataBuilder_Create_Params; +#define NVPW_CUDA_CounterDataBuilder_Create_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_CounterDataBuilder_Create_Params, pCounterDataBuilder) + + /// Use either 'pChipName' or 'pCounterAvailabilityImage'. + NVPA_Status NVPW_CUDA_CounterDataBuilder_Create(NVPW_CUDA_CounterDataBuilder_Create_Params* pParams); + + typedef struct NVPW_MetricsEvaluator NVPW_MetricsEvaluator; + + typedef struct NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] accepted for chips supported at the time-of-release. + const char* pChipName; + /// [in] buffer with counter availability image - required for future chip support + const uint8_t* pCounterAvailabilityImage; + /// [out] + size_t scratchBufferSize; + } NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize_Params; +#define NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize_Params, scratchBufferSize) + + /// Use either 'pChipName' or 'pCounterAvailabilityImage'. + NVPA_Status NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize(NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize_Params* pParams); + + typedef struct NVPW_CUDA_MetricsEvaluator_Initialize_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + uint8_t* pScratchBuffer; + /// [in] the size of the 'pScratchBuffer' array, should be at least the size of the 'scratchBufferSize' returned + /// by 'NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize' + size_t scratchBufferSize; + /// [in] accepted for chips supported at the time-of-release. + const char* pChipName; + /// [in] buffer with counter availability image - required for future chip support + const uint8_t* pCounterAvailabilityImage; + /// [in] + const uint8_t* pCounterDataImage; + /// [in] must be provided if 'pCounterDataImage' is not NULL + size_t counterDataImageSize; + /// [out] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + } NVPW_CUDA_MetricsEvaluator_Initialize_Params; +#define NVPW_CUDA_MetricsEvaluator_Initialize_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_MetricsEvaluator_Initialize_Params, pMetricsEvaluator) + + /// Use one of 'pChipName', 'pCounterAvailabilityImage', or 'pCounterDataImage'. 'pChipName' or + /// 'pCounterAvailabilityImage' will create a metrics evaluator based on a virtual device while 'pCounterDataImage' + /// will create a metrics evaluator based on the actual device. + NVPA_Status NVPW_CUDA_MetricsEvaluator_Initialize(NVPW_CUDA_MetricsEvaluator_Initialize_Params* pParams); + + + +#ifdef __cplusplus +} // extern "C" +#endif + +#if defined(__GNUC__) && defined(NVPA_SHARED_LIB) + #pragma GCC visibility pop +#endif + +#endif // NVPERF_CUDA_HOST_H diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_host.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_host.h new file mode 100644 index 0000000000000000000000000000000000000000..e4bdd8cc95f066b9e6e2fd7af26d6f2f5ed11c2d --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_host.h @@ -0,0 +1,1528 @@ +#ifndef NVPERF_HOST_H +#define NVPERF_HOST_H + +/* + * Copyright 2014-2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO USER: + * + * This source code is subject to NVIDIA ownership rights under U.S. and + * international Copyright laws. + * + * This software and the information contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions + * of a form of NVIDIA software license agreement. + * + * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE + * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR + * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH + * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, + * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS + * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE + * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE + * OR PERFORMANCE OF THIS SOURCE CODE. + * + * U.S. Government End Users. This source code is a "commercial item" as + * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of + * "commercial computer software" and "commercial computer software + * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) + * and is provided to the U.S. Government only as a commercial end item. + * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through + * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the + * source code with only those rights set forth herein. + * + * Any use of this source code in individual and commercial software must + * include, in the user documentation and internal comments to the code, + * the above Disclaimer and U.S. Government End Users Notice. + */ + +#include +#include +#include "nvperf_common.h" + +#if defined(__GNUC__) && defined(NVPA_SHARED_LIB) + #pragma GCC visibility push(default) + #if !defined(NVPW_LOCAL) + #define NVPW_LOCAL __attribute__ ((visibility ("hidden"))) + #endif +#else + #if !defined(NVPW_LOCAL) + #define NVPW_LOCAL + #endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @file nvperf_host.h + */ + + +// Guard against multiple definition of NvPerf host types +#ifndef NVPERF_HOST_API_DEFINED +#define NVPERF_HOST_API_DEFINED + + +/***************************************************************************//** + * @name Host Configuration + * @{ + */ + + typedef struct NVPW_InitializeHost_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + } NVPW_InitializeHost_Params; +#define NVPW_InitializeHost_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_InitializeHost_Params, pPriv) + + /// Load the host library. + NVPA_Status NVPW_InitializeHost(NVPW_InitializeHost_Params* pParams); + + typedef struct NVPW_CounterData_CalculateCounterDataImageCopySize_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// The CounterDataPrefix generated from e.g. nvperf2 initdata or + /// NVPW_CounterDataBuilder_GetCounterDataPrefix(). Must be align(8). + const uint8_t* pCounterDataPrefix; + size_t counterDataPrefixSize; + /// max number of ranges that can be profiled + uint32_t maxNumRanges; + /// max number of RangeTree nodes; must be >= maxNumRanges + uint32_t maxNumRangeTreeNodes; + /// max string length of each RangeName, including the trailing NUL character + uint32_t maxRangeNameLength; + const uint8_t* pCounterDataSrc; + /// [out] required size of the copy buffer + size_t copyDataImageCounterSize; + } NVPW_CounterData_CalculateCounterDataImageCopySize_Params; +#define NVPW_CounterData_CalculateCounterDataImageCopySize_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterData_CalculateCounterDataImageCopySize_Params, copyDataImageCounterSize) + + NVPA_Status NVPW_CounterData_CalculateCounterDataImageCopySize(NVPW_CounterData_CalculateCounterDataImageCopySize_Params* pParams); + + typedef struct NVPW_CounterData_InitializeCounterDataImageCopy_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// The CounterDataPrefix generated from e.g. nvperf2 initdata or + /// NVPW_CounterDataBuilder_GetCounterDataPrefix(). Must be align(8). + const uint8_t* pCounterDataPrefix; + size_t counterDataPrefixSize; + /// max number of ranges that can be profiled + uint32_t maxNumRanges; + /// max number of RangeTree nodes; must be >= maxNumRanges + uint32_t maxNumRangeTreeNodes; + /// max string length of each RangeName, including the trailing NUL character + uint32_t maxRangeNameLength; + const uint8_t* pCounterDataSrc; + uint8_t* pCounterDataDst; + } NVPW_CounterData_InitializeCounterDataImageCopy_Params; +#define NVPW_CounterData_InitializeCounterDataImageCopy_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterData_InitializeCounterDataImageCopy_Params, pCounterDataDst) + + NVPA_Status NVPW_CounterData_InitializeCounterDataImageCopy(NVPW_CounterData_InitializeCounterDataImageCopy_Params* pParams); + + typedef struct NVPA_CounterDataCombiner NVPA_CounterDataCombiner; + + typedef struct NVPW_CounterDataCombiner_Create_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// The destination counter data into which the source datas will be combined + uint8_t* pCounterDataDst; + /// [out] The created counter data combiner + NVPA_CounterDataCombiner* pCounterDataCombiner; + } NVPW_CounterDataCombiner_Create_Params; +#define NVPW_CounterDataCombiner_Create_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_Create_Params, pCounterDataCombiner) + + NVPA_Status NVPW_CounterDataCombiner_Create(NVPW_CounterDataCombiner_Create_Params* pParams); + + typedef struct NVPW_CounterDataCombiner_Destroy_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_CounterDataCombiner* pCounterDataCombiner; + } NVPW_CounterDataCombiner_Destroy_Params; +#define NVPW_CounterDataCombiner_Destroy_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_Destroy_Params, pCounterDataCombiner) + + NVPA_Status NVPW_CounterDataCombiner_Destroy(NVPW_CounterDataCombiner_Destroy_Params* pParams); + + typedef struct NVPW_CounterDataCombiner_CreateRange_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_CounterDataCombiner* pCounterDataCombiner; + size_t numDescriptions; + const char* const* ppDescriptions; + /// [out] + size_t rangeIndexDst; + } NVPW_CounterDataCombiner_CreateRange_Params; +#define NVPW_CounterDataCombiner_CreateRange_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_CreateRange_Params, rangeIndexDst) + + NVPA_Status NVPW_CounterDataCombiner_CreateRange(NVPW_CounterDataCombiner_CreateRange_Params* pParams); + + typedef struct NVPW_CounterDataCombiner_CopyIntoRange_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + NVPA_CounterDataCombiner* pCounterDataCombiner; + /// [in] + size_t rangeIndexDst; + /// [in] + const uint8_t* pCounterDataSrc; + /// [in] + size_t rangeIndexSrc; + } NVPW_CounterDataCombiner_CopyIntoRange_Params; +#define NVPW_CounterDataCombiner_CopyIntoRange_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_CopyIntoRange_Params, rangeIndexSrc) + + /// In order to use this API, the source counter data and the destination counter data must have identical counters + NVPA_Status NVPW_CounterDataCombiner_CopyIntoRange(NVPW_CounterDataCombiner_CopyIntoRange_Params* pParams); + + typedef struct NVPW_CounterDataCombiner_AccumulateIntoRange_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_CounterDataCombiner* pCounterDataCombiner; + size_t rangeIndexDst; + uint32_t dstMultiplier; + const uint8_t* pCounterDataSrc; + size_t rangeIndexSrc; + uint32_t srcMultiplier; + } NVPW_CounterDataCombiner_AccumulateIntoRange_Params; +#define NVPW_CounterDataCombiner_AccumulateIntoRange_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_AccumulateIntoRange_Params, srcMultiplier) + + NVPA_Status NVPW_CounterDataCombiner_AccumulateIntoRange(NVPW_CounterDataCombiner_AccumulateIntoRange_Params* pParams); + + typedef struct NVPW_CounterDataCombiner_SumIntoRange_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_CounterDataCombiner* pCounterDataCombiner; + size_t rangeIndexDst; + const uint8_t* pCounterDataSrc; + size_t rangeIndexSrc; + } NVPW_CounterDataCombiner_SumIntoRange_Params; +#define NVPW_CounterDataCombiner_SumIntoRange_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_SumIntoRange_Params, rangeIndexSrc) + + NVPA_Status NVPW_CounterDataCombiner_SumIntoRange(NVPW_CounterDataCombiner_SumIntoRange_Params* pParams); + + typedef struct NVPW_CounterDataCombiner_WeightedSumIntoRange_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_CounterDataCombiner* pCounterDataCombiner; + size_t rangeIndexDst; + double dstMultiplier; + const uint8_t* pCounterDataSrc; + size_t rangeIndexSrc; + double srcMultiplier; + } NVPW_CounterDataCombiner_WeightedSumIntoRange_Params; +#define NVPW_CounterDataCombiner_WeightedSumIntoRange_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_WeightedSumIntoRange_Params, srcMultiplier) + + NVPA_Status NVPW_CounterDataCombiner_WeightedSumIntoRange(NVPW_CounterDataCombiner_WeightedSumIntoRange_Params* pParams); + +/** + * @} + ******************************************************************************/ + +/***************************************************************************//** + * @name Metrics Configuration + * @{ + */ + + typedef struct NVPA_RawMetricsConfig NVPA_RawMetricsConfig; + + typedef struct NVPA_RawMetricRequest + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// in + const char* pMetricName; + /// in + NVPA_Bool isolated; + /// in; ignored by AddMetric but observed by CounterData initialization + NVPA_Bool keepInstances; + } NVPA_RawMetricRequest; +#define NVPA_RAW_METRIC_REQUEST_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPA_RawMetricRequest, keepInstances) + + typedef struct NVPW_GetSupportedChipNames_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [out] + const char* const* ppChipNames; + /// [out] + size_t numChipNames; + } NVPW_GetSupportedChipNames_Params; +#define NVPW_GetSupportedChipNames_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_GetSupportedChipNames_Params, numChipNames) + + NVPA_Status NVPW_GetSupportedChipNames(NVPW_GetSupportedChipNames_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_Destroy_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_RawMetricsConfig* pRawMetricsConfig; + } NVPW_RawMetricsConfig_Destroy_Params; +#define NVPW_RawMetricsConfig_Destroy_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_Destroy_Params, pRawMetricsConfig) + + NVPA_Status NVPW_RawMetricsConfig_Destroy(NVPW_RawMetricsConfig_Destroy_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_SetCounterAvailability_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_RawMetricsConfig* pRawMetricsConfig; + /// [in] buffer with counter availability image + const uint8_t* pCounterAvailabilityImage; + } NVPW_RawMetricsConfig_SetCounterAvailability_Params; +#define NVPW_RawMetricsConfig_SetCounterAvailability_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_SetCounterAvailability_Params, pCounterAvailabilityImage) + + NVPA_Status NVPW_RawMetricsConfig_SetCounterAvailability(NVPW_RawMetricsConfig_SetCounterAvailability_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_BeginPassGroup_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_RawMetricsConfig* pRawMetricsConfig; + size_t maxPassCount; + } NVPW_RawMetricsConfig_BeginPassGroup_Params; +#define NVPW_RawMetricsConfig_BeginPassGroup_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_BeginPassGroup_Params, maxPassCount) + + NVPA_Status NVPW_RawMetricsConfig_BeginPassGroup(NVPW_RawMetricsConfig_BeginPassGroup_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_EndPassGroup_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_RawMetricsConfig* pRawMetricsConfig; + } NVPW_RawMetricsConfig_EndPassGroup_Params; +#define NVPW_RawMetricsConfig_EndPassGroup_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_EndPassGroup_Params, pRawMetricsConfig) + + NVPA_Status NVPW_RawMetricsConfig_EndPassGroup(NVPW_RawMetricsConfig_EndPassGroup_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_GetNumMetrics_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + const NVPA_RawMetricsConfig* pRawMetricsConfig; + /// [out] + size_t numMetrics; + } NVPW_RawMetricsConfig_GetNumMetrics_Params; +#define NVPW_RawMetricsConfig_GetNumMetrics_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GetNumMetrics_Params, numMetrics) + + NVPA_Status NVPW_RawMetricsConfig_GetNumMetrics(NVPW_RawMetricsConfig_GetNumMetrics_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_GetMetricProperties_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + const NVPA_RawMetricsConfig* pRawMetricsConfig; + size_t metricIndex; + /// [out] + const char* pMetricName; + /// [out] + NVPA_Bool supportsPipelined; + /// [out] + NVPA_Bool supportsIsolated; + } NVPW_RawMetricsConfig_GetMetricProperties_Params; +#define NVPW_RawMetricsConfig_GetMetricProperties_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GetMetricProperties_Params, supportsIsolated) + + NVPA_Status NVPW_RawMetricsConfig_GetMetricProperties(NVPW_RawMetricsConfig_GetMetricProperties_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_GetMetricProperties_V2_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + const NVPA_RawMetricsConfig* pRawMetricsConfig; + size_t metricIndex; + /// [out] + const char* pMetricName; + } NVPW_RawMetricsConfig_GetMetricProperties_V2_Params; +#define NVPW_RawMetricsConfig_GetMetricProperties_V2_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GetMetricProperties_V2_Params, pMetricName) + + NVPA_Status NVPW_RawMetricsConfig_GetMetricProperties_V2(NVPW_RawMetricsConfig_GetMetricProperties_V2_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_AddMetrics_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_RawMetricsConfig* pRawMetricsConfig; + const NVPA_RawMetricRequest* pRawMetricRequests; + size_t numMetricRequests; + } NVPW_RawMetricsConfig_AddMetrics_Params; +#define NVPW_RawMetricsConfig_AddMetrics_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_AddMetrics_Params, numMetricRequests) + + NVPA_Status NVPW_RawMetricsConfig_AddMetrics(NVPW_RawMetricsConfig_AddMetrics_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_IsAddMetricsPossible_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + const NVPA_RawMetricsConfig* pRawMetricsConfig; + const NVPA_RawMetricRequest* pRawMetricRequests; + size_t numMetricRequests; + /// [out] + NVPA_Bool isPossible; + } NVPW_RawMetricsConfig_IsAddMetricsPossible_Params; +#define NVPW_RawMetricsConfig_IsAddMetricsPossible_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_IsAddMetricsPossible_Params, isPossible) + + NVPA_Status NVPW_RawMetricsConfig_IsAddMetricsPossible(NVPW_RawMetricsConfig_IsAddMetricsPossible_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_GenerateConfigImage_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_RawMetricsConfig* pRawMetricsConfig; + /// [in] If true, all existing pass groups may be merged to reduce number of passes. + /// If merge was successful, distribution of counters in passes may be updated as a side-effect. The effects + /// will be persistent in pRawMetricsConfig. + NVPA_Bool mergeAllPassGroups; + } NVPW_RawMetricsConfig_GenerateConfigImage_Params; +#define NVPW_RawMetricsConfig_GenerateConfigImage_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GenerateConfigImage_Params, mergeAllPassGroups) + + /// This API may fail if called inside a pass group with `mergeAllPassGroups` = true. + NVPA_Status NVPW_RawMetricsConfig_GenerateConfigImage(NVPW_RawMetricsConfig_GenerateConfigImage_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_GetConfigImage_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + const NVPA_RawMetricsConfig* pRawMetricsConfig; + /// [in] Number of bytes allocated for pBuffer + size_t bytesAllocated; + /// [out] [optional] Buffer receiving the config image + uint8_t* pBuffer; + /// [out] Count of bytes that would be copied into pBuffer + size_t bytesCopied; + } NVPW_RawMetricsConfig_GetConfigImage_Params; +#define NVPW_RawMetricsConfig_GetConfigImage_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GetConfigImage_Params, bytesCopied) + + NVPA_Status NVPW_RawMetricsConfig_GetConfigImage(NVPW_RawMetricsConfig_GetConfigImage_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_GetNumPasses_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + const NVPA_RawMetricsConfig* pRawMetricsConfig; + /// [out] + size_t numPipelinedPasses; + /// [out] + size_t numIsolatedPasses; + } NVPW_RawMetricsConfig_GetNumPasses_Params; +#define NVPW_RawMetricsConfig_GetNumPasses_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GetNumPasses_Params, numIsolatedPasses) + + /// Total num passes = numPipelinedPasses + numIsolatedPasses * numNestingLevels + NVPA_Status NVPW_RawMetricsConfig_GetNumPasses(NVPW_RawMetricsConfig_GetNumPasses_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_GetNumPasses_V2_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + const NVPA_RawMetricsConfig* pRawMetricsConfig; + /// [out] + size_t numPasses; + } NVPW_RawMetricsConfig_GetNumPasses_V2_Params; +#define NVPW_RawMetricsConfig_GetNumPasses_V2_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GetNumPasses_V2_Params, numPasses) + + /// Total num passes = numPasses * numNestingLevels + NVPA_Status NVPW_RawMetricsConfig_GetNumPasses_V2(NVPW_RawMetricsConfig_GetNumPasses_V2_Params* pParams); + + typedef struct NVPW_PeriodicSampler_Config_GetSocEstimatedSampleSize_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] Typically created by e.g. NVPW_RawMetricsConfig_GetConfigImage(), must be align(8). + const uint8_t* pConfig; + /// [in] + size_t configSize; + /// [out] + size_t sampleSize; + } NVPW_PeriodicSampler_Config_GetSocEstimatedSampleSize_Params; +#define NVPW_PeriodicSampler_Config_GetSocEstimatedSampleSize_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_PeriodicSampler_Config_GetSocEstimatedSampleSize_Params, sampleSize) + + /// Estimate per sample records size based on a virtual device + NVPA_Status NVPW_PeriodicSampler_Config_GetSocEstimatedSampleSize(NVPW_PeriodicSampler_Config_GetSocEstimatedSampleSize_Params* pParams); + + typedef struct NVPW_PeriodicSampler_Config_GetGpuEstimatedSampleSize_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] Typically created by e.g. NVPW_RawMetricsConfig_GetConfigImage(), must be align(8). + const uint8_t* pConfig; + /// [in] + size_t configSize; + /// [out] + size_t sampleSize; + } NVPW_PeriodicSampler_Config_GetGpuEstimatedSampleSize_Params; +#define NVPW_PeriodicSampler_Config_GetGpuEstimatedSampleSize_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_PeriodicSampler_Config_GetGpuEstimatedSampleSize_Params, sampleSize) + + /// Estimate per sample records size based on a virtual device + NVPA_Status NVPW_PeriodicSampler_Config_GetGpuEstimatedSampleSize(NVPW_PeriodicSampler_Config_GetGpuEstimatedSampleSize_Params* pParams); + +/** + * @} + ******************************************************************************/ + +/***************************************************************************//** + * @name CounterData Creation + * @{ + */ + + typedef struct NVPA_CounterDataBuilder NVPA_CounterDataBuilder; + + typedef struct NVPW_CounterDataBuilder_Create_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [out] + NVPA_CounterDataBuilder* pCounterDataBuilder; + const char* pChipName; + } NVPW_CounterDataBuilder_Create_Params; +#define NVPW_CounterDataBuilder_Create_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataBuilder_Create_Params, pChipName) + + NVPA_Status NVPW_CounterDataBuilder_Create(NVPW_CounterDataBuilder_Create_Params* pParams); + + typedef struct NVPW_CounterDataBuilder_Destroy_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_CounterDataBuilder* pCounterDataBuilder; + } NVPW_CounterDataBuilder_Destroy_Params; +#define NVPW_CounterDataBuilder_Destroy_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataBuilder_Destroy_Params, pCounterDataBuilder) + + NVPA_Status NVPW_CounterDataBuilder_Destroy(NVPW_CounterDataBuilder_Destroy_Params* pParams); + + typedef struct NVPW_CounterDataBuilder_AddMetrics_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_CounterDataBuilder* pCounterDataBuilder; + const NVPA_RawMetricRequest* pRawMetricRequests; + size_t numMetricRequests; + } NVPW_CounterDataBuilder_AddMetrics_Params; +#define NVPW_CounterDataBuilder_AddMetrics_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataBuilder_AddMetrics_Params, numMetricRequests) + + NVPA_Status NVPW_CounterDataBuilder_AddMetrics(NVPW_CounterDataBuilder_AddMetrics_Params* pParams); + + typedef struct NVPW_CounterDataBuilder_GetCounterDataPrefix_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_CounterDataBuilder* pCounterDataBuilder; + /// [in] Number of bytes allocated for pBuffer + size_t bytesAllocated; + /// [out] [optional] Buffer receiving the counter data prefix + uint8_t* pBuffer; + /// [out] Count of bytes that would be copied to pBuffer + size_t bytesCopied; + } NVPW_CounterDataBuilder_GetCounterDataPrefix_Params; +#define NVPW_CounterDataBuilder_GetCounterDataPrefix_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataBuilder_GetCounterDataPrefix_Params, bytesCopied) + + NVPA_Status NVPW_CounterDataBuilder_GetCounterDataPrefix(NVPW_CounterDataBuilder_GetCounterDataPrefix_Params* pParams); + +/** + * @} + ******************************************************************************/ + +/***************************************************************************//** + * @name MetricsContext - metric configuration and evaluation + * @{ + */ + + /// 'NVPA_MetricsContext' and its APIs are deprecated, please use 'NVPW_MetricsEvaluator' and its APIs instead. + typedef struct NVPA_MetricsContext NVPA_MetricsContext; + + typedef enum NVPA_MetricDetailLevel + { + NVPA_METRIC_DETAIL_LEVEL_INVALID, + NVPA_METRIC_DETAIL_LEVEL_GPU, + NVPA_METRIC_DETAIL_LEVEL_ALL, + NVPA_METRIC_DETAIL_LEVEL_GPU_AND_LEAF_INSTANCES, + NVPA_METRIC_DETAIL_LEVEL__COUNT + } NVPA_MetricDetailLevel; + + typedef struct NVPW_MetricsContext_Destroy_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_MetricsContext* pMetricsContext; + } NVPW_MetricsContext_Destroy_Params; +#define NVPW_MetricsContext_Destroy_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_Destroy_Params, pMetricsContext) + + NVPA_Status NVPW_MetricsContext_Destroy(NVPW_MetricsContext_Destroy_Params* pParams); + + typedef struct NVPW_MetricsContext_RunScript_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_MetricsContext* pMetricsContext; + /// in : if true, upon error, calls PyErr_Print() which causes exceptions to be logged to stderr + NVPA_Bool printErrors; + /// in : the script source code + const char* pSource; + /// in : the filename reported in stack traces; if NULL, uses an auto-generated name + const char* pFileName; + } NVPW_MetricsContext_RunScript_Params; +#define NVPW_MetricsContext_RunScript_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_RunScript_Params, pFileName) + + /// Runs code in the metrics module. Additional metrics can be added through this interface. + /// If printErrors is true, calls PyErr_Print() which causes exceptions to be logged to stderr. + /// Equivalent to: + /// exec(source, metrics.__dict__, metrics.__dict__) + NVPA_Status NVPW_MetricsContext_RunScript(NVPW_MetricsContext_RunScript_Params* pParams); + + typedef struct NVPW_MetricsContext_ExecScript_Begin_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_MetricsContext* pMetricsContext; + /// in : if true, treats pSource as a statement to be eval'd; otherwise, calls exec. + NVPA_Bool isStatement; + /// in : if true, upon error, calls PyErr_Print() which causes exceptions to be logged to stderr + NVPA_Bool printErrors; + /// in : the script source code + const char* pSource; + /// in : the filename reported in stack traces; if NULL, uses an auto-generated name + const char* pFileName; + /// out: if isStatement, points at a string form of the evaluation; if !isStatement, points at + /// str(locals()['result']) + const char* pResultStr; + } NVPW_MetricsContext_ExecScript_Begin_Params; +#define NVPW_MetricsContext_ExecScript_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_ExecScript_Begin_Params, pResultStr) + + /// Executes a script in the metrics module, but does not modify its contents (for ordinary queries). + /// Equivalent to one of: + /// eval(source, metrics.__dict__, {}) # isStatement true + /// exec(source, metrics.__dict__, {}) # isStatement false + NVPA_Status NVPW_MetricsContext_ExecScript_Begin(NVPW_MetricsContext_ExecScript_Begin_Params* pParams); + + typedef struct NVPW_MetricsContext_ExecScript_End_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_MetricsContext* pMetricsContext; + } NVPW_MetricsContext_ExecScript_End_Params; +#define NVPW_MetricsContext_ExecScript_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_ExecScript_End_Params, pMetricsContext) + + /// Cleans up memory internally allocated by NVPW_MetricsContext_ExecScript_Begin. + NVPA_Status NVPW_MetricsContext_ExecScript_End(NVPW_MetricsContext_ExecScript_End_Params* pParams); + + typedef struct NVPW_MetricsContext_GetCounterNames_Begin_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_MetricsContext* pMetricsContext; + /// [out] + size_t numCounters; + /// [out] + const char* const* ppCounterNames; + } NVPW_MetricsContext_GetCounterNames_Begin_Params; +#define NVPW_MetricsContext_GetCounterNames_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetCounterNames_Begin_Params, ppCounterNames) + + /// Outputs (size, pointer) to an array of "const char* pCounterName". The lifetime of the array is tied to + /// MetricsContext. The names are sorted. + /// Impl: lazily creates list + NVPA_Status NVPW_MetricsContext_GetCounterNames_Begin(NVPW_MetricsContext_GetCounterNames_Begin_Params* pParams); + + typedef struct NVPW_MetricsContext_GetCounterNames_End_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_MetricsContext* pMetricsContext; + } NVPW_MetricsContext_GetCounterNames_End_Params; +#define NVPW_MetricsContext_GetCounterNames_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetCounterNames_End_Params, pMetricsContext) + + /// Cleans up memory internally allocated by NVPW_MetricsContext_GetCounterNames_Begin. + NVPA_Status NVPW_MetricsContext_GetCounterNames_End(NVPW_MetricsContext_GetCounterNames_End_Params* pParams); + + typedef struct NVPW_MetricsContext_GetThroughputNames_Begin_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_MetricsContext* pMetricsContext; + /// [out] + size_t numThroughputs; + /// [out] + const char* const* ppThroughputNames; + } NVPW_MetricsContext_GetThroughputNames_Begin_Params; +#define NVPW_MetricsContext_GetThroughputNames_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetThroughputNames_Begin_Params, ppThroughputNames) + + /// Outputs (size, pointer) to an array of "const char* pThroughputName". The lifetime of the array is tied to + /// MetricsContext. The names are sorted. + /// Impl: lazily creates list + NVPA_Status NVPW_MetricsContext_GetThroughputNames_Begin(NVPW_MetricsContext_GetThroughputNames_Begin_Params* pParams); + + typedef struct NVPW_MetricsContext_GetThroughputNames_End_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_MetricsContext* pMetricsContext; + } NVPW_MetricsContext_GetThroughputNames_End_Params; +#define NVPW_MetricsContext_GetThroughputNames_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetThroughputNames_End_Params, pMetricsContext) + + /// Cleans up memory internally allocated by NVPW_MetricsContext_GetThroughputNames_Begin. + NVPA_Status NVPW_MetricsContext_GetThroughputNames_End(NVPW_MetricsContext_GetThroughputNames_End_Params* pParams); + + typedef struct NVPW_MetricsContext_GetRatioNames_Begin_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_MetricsContext* pMetricsContext; + /// [out] + size_t numRatios; + /// [out] + const char* const* ppRatioNames; + } NVPW_MetricsContext_GetRatioNames_Begin_Params; +#define NVPW_MetricsContext_GetRatioNames_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetRatioNames_Begin_Params, ppRatioNames) + + /// Outputs (size, pointer) to an array of "const char* pRatioName". The lifetime of the array is tied to + /// MetricsContext. The names are sorted. + /// Impl: lazily creates list + NVPA_Status NVPW_MetricsContext_GetRatioNames_Begin(NVPW_MetricsContext_GetRatioNames_Begin_Params* pParams); + + typedef struct NVPW_MetricsContext_GetRatioNames_End_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_MetricsContext* pMetricsContext; + } NVPW_MetricsContext_GetRatioNames_End_Params; +#define NVPW_MetricsContext_GetRatioNames_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetRatioNames_End_Params, pMetricsContext) + + /// Cleans up memory internally allocated by NVPW_MetricsContext_GetCounterNames_Begin. + NVPA_Status NVPW_MetricsContext_GetRatioNames_End(NVPW_MetricsContext_GetRatioNames_End_Params* pParams); + + typedef struct NVPW_MetricsContext_GetMetricNames_Begin_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_MetricsContext* pMetricsContext; + /// out: number of elements in array ppMetricNames + size_t numMetrics; + /// out: pointer to array of 'const char* pMetricName' + const char* const* ppMetricNames; + /// in : if true, doesn't enumerate \.peak_{burst, sustained} + NVPA_Bool hidePeakSubMetrics; + /// in : if true, doesn't enumerate \.per_{active,elapsed,region,frame}_cycle + NVPA_Bool hidePerCycleSubMetrics; + /// in : if true, doesn't enumerate \.pct_of_peak_{burst,sustained}_{active,elapsed,region,frame} + NVPA_Bool hidePctOfPeakSubMetrics; + /// in : if false, enumerate \__throughput.pct_of_peak_sustained_elapsed even if hidePctOfPeakSubMetrics + /// is true + NVPA_Bool hidePctOfPeakSubMetricsOnThroughputs; + } NVPW_MetricsContext_GetMetricNames_Begin_Params; +#define NVPW_MetricsContext_GetMetricNames_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetMetricNames_Begin_Params, hidePctOfPeakSubMetricsOnThroughputs) + + /// Outputs (size, pointer) to an array of "const char* pMetricName". The lifetime of the array is tied to + /// MetricsContext. The names are sorted. + /// Enumerates all metrics at all levels. Includes: + /// * counter.{sum,avg,min,max} + /// * throughput.{avg,min,max} + /// * \.peak_{burst, sustained} + /// * \.per_{active,elapsed,region,frame}_cycle + /// * \.pct_of_peak_{burst,sustained}_{active,elapsed,region,frame} + /// * \.per.{other, other_pct} + NVPA_Status NVPW_MetricsContext_GetMetricNames_Begin(NVPW_MetricsContext_GetMetricNames_Begin_Params* pParams); + + typedef struct NVPW_MetricsContext_GetMetricNames_End_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_MetricsContext* pMetricsContext; + } NVPW_MetricsContext_GetMetricNames_End_Params; +#define NVPW_MetricsContext_GetMetricNames_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetMetricNames_End_Params, pMetricsContext) + + /// Cleans up memory internally allocated by NVPW_MetricsContext_GetMetricNames_Begin. + NVPA_Status NVPW_MetricsContext_GetMetricNames_End(NVPW_MetricsContext_GetMetricNames_End_Params* pParams); + + typedef struct NVPW_MetricsContext_GetThroughputBreakdown_Begin_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_MetricsContext* pMetricsContext; + const char* pThroughputName; + const char* const* ppCounterNames; + const char* const* ppSubThroughputNames; + } NVPW_MetricsContext_GetThroughputBreakdown_Begin_Params; +#define NVPW_MetricsContext_GetThroughputBreakdown_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetThroughputBreakdown_Begin_Params, ppSubThroughputNames) + + /// After this function returns, the lifetimes of strings pointed to by {ppCounterNames, ppSubThroughputNames, + /// ppSubMetricNames} are guaranteed until NVPW_MetricsContext_GetThroughputBreakdown_End, or until pMetricsContext + /// is destroyed + NVPA_Status NVPW_MetricsContext_GetThroughputBreakdown_Begin(NVPW_MetricsContext_GetThroughputBreakdown_Begin_Params* pParams); + + typedef struct NVPW_MetricsContext_GetThroughputBreakdown_End_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_MetricsContext* pMetricsContext; + } NVPW_MetricsContext_GetThroughputBreakdown_End_Params; +#define NVPW_MetricsContext_GetThroughputBreakdown_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetThroughputBreakdown_End_Params, pMetricsContext) + + /// Cleans up memory internally allocated by NVPW_MetricsContext_GetThroughputBreakdown_Begin. + NVPA_Status NVPW_MetricsContext_GetThroughputBreakdown_End(NVPW_MetricsContext_GetThroughputBreakdown_End_Params* pParams); + + typedef struct NVPW_MetricsContext_GetMetricProperties_Begin_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_MetricsContext* pMetricsContext; + const char* pMetricName; + /// out + const char* pDescription; + /// out + const char* pDimUnits; + /// out: a NULL-terminated array of pointers to RawMetric names that can be passed to + /// NVPW_RawMetricsConfig_AddMetrics() + const char** ppRawMetricDependencies; + /// out: metric.peak_burst.value.gpu + double gpuBurstRate; + /// out: metric.peak_sustained.value.gpu + double gpuSustainedRate; + /// out: a NULL-terminated array of pointers to RawMetric names that can be passed to + /// NVPW_RawMetricsConfig_AddMetrics(). + const char** ppOptionalRawMetricDependencies; + } NVPW_MetricsContext_GetMetricProperties_Begin_Params; +#define NVPW_MetricsContext_GetMetricProperties_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetMetricProperties_Begin_Params, ppOptionalRawMetricDependencies) + + /// After this function returns, the lifetimes of strings pointed to by pMetricProperties or + /// ppOptionalRawMetricDependencies are guaranteed until NVPW_MetricsContext_GetMetricProperties_End, or until + /// pMetricsContext is destroyed. + NVPA_Status NVPW_MetricsContext_GetMetricProperties_Begin(NVPW_MetricsContext_GetMetricProperties_Begin_Params* pParams); + + typedef struct NVPW_MetricsContext_GetMetricProperties_End_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_MetricsContext* pMetricsContext; + } NVPW_MetricsContext_GetMetricProperties_End_Params; +#define NVPW_MetricsContext_GetMetricProperties_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetMetricProperties_End_Params, pMetricsContext) + + /// Cleans up memory internally allocated by NVPW_MetricsContext_GetMetricProperties_Begin. + NVPA_Status NVPW_MetricsContext_GetMetricProperties_End(NVPW_MetricsContext_GetMetricProperties_End_Params* pParams); + + typedef struct NVPW_MetricsContext_SetCounterData_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_MetricsContext* pMetricsContext; + const uint8_t* pCounterDataImage; + size_t rangeIndex; + NVPA_Bool isolated; + } NVPW_MetricsContext_SetCounterData_Params; +#define NVPW_MetricsContext_SetCounterData_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_SetCounterData_Params, isolated) + + /// Sets data for subsequent evaluation calls. + /// Only one (CounterData, range, isolated) set of counters can be active at a time; subsequent calls will overwrite + /// previous calls' data. + NVPA_Status NVPW_MetricsContext_SetCounterData(NVPW_MetricsContext_SetCounterData_Params* pParams); + + typedef struct NVPW_MetricsContext_SetUserData_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_MetricsContext* pMetricsContext; + /// duration in ns of user defined frame + double frameDuration; + /// duration in ns of user defined region + double regionDuration; + } NVPW_MetricsContext_SetUserData_Params; +#define NVPW_MetricsContext_SetUserData_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_SetUserData_Params, regionDuration) + + /// Sets user data for subsequent evaluation calls. + NVPA_Status NVPW_MetricsContext_SetUserData(NVPW_MetricsContext_SetUserData_Params* pParams); + + typedef struct NVPW_MetricsContext_EvaluateToGpuValues_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_MetricsContext* pMetricsContext; + size_t numMetrics; + const char* const* ppMetricNames; + /// [out] + double* pMetricValues; + } NVPW_MetricsContext_EvaluateToGpuValues_Params; +#define NVPW_MetricsContext_EvaluateToGpuValues_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_EvaluateToGpuValues_Params, pMetricValues) + + /// Evaluate multiple metrics to retrieve their GPU values. + NVPA_Status NVPW_MetricsContext_EvaluateToGpuValues(NVPW_MetricsContext_EvaluateToGpuValues_Params* pParams); + + typedef struct NVPW_MetricsContext_GetMetricSuffix_Begin_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_MetricsContext* pMetricsContext; + /// in: pointer to the metric name + const char* pMetricName; + /// out: number of elements in array ppSuffixes + size_t numSuffixes; + /// out: pointer to array of 'const char* pSuffixes' + const char* const* ppSuffixes; + /// in : if true, doesn't enumerate \.peak_{burst, sustained} + NVPA_Bool hidePeakSubMetrics; + /// in : if true, doesn't enumerate \.per_{active,elapsed,region,frame}_cycle + NVPA_Bool hidePerCycleSubMetrics; + /// in : if true, doesn't enumerate \.pct_of_peak_{burst,sustained}_{active,elapsed,region,frame} + NVPA_Bool hidePctOfPeakSubMetrics; + /// in : if false, enumerate \__throughput.pct_of_peak_sustained_elapsed even if hidePctOfPeakSubMetrics + /// is true + NVPA_Bool hidePctOfPeakSubMetricsOnThroughputs; + } NVPW_MetricsContext_GetMetricSuffix_Begin_Params; +#define NVPW_MetricsContext_GetMetricSuffix_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetMetricSuffix_Begin_Params, hidePctOfPeakSubMetricsOnThroughputs) + + /// Outputs (size, pointer) to an array of "const char* pSuffixes". The lifetime of the array is tied to + /// MetricsContext. + /// return all the suffixes the metric has. the possible suffixes include: + /// * counter.{sum,avg,min,max} + /// * throughput.{avg,min,max} + /// * \.peak_{burst, sustained} + /// * \.per_{active,elapsed,region,frame}_cycle + /// * \.pct_of_peak_{burst,sustained}_{active,elapsed,region,frame} + /// * \.per.{other, other_pct} + NVPA_Status NVPW_MetricsContext_GetMetricSuffix_Begin(NVPW_MetricsContext_GetMetricSuffix_Begin_Params* pParams); + + typedef struct NVPW_MetricsContext_GetMetricSuffix_End_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_MetricsContext* pMetricsContext; + } NVPW_MetricsContext_GetMetricSuffix_End_Params; +#define NVPW_MetricsContext_GetMetricSuffix_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetMetricSuffix_End_Params, pMetricsContext) + + /// Cleans up memory internally allocated by NVPW_MetricsContext_GetMetricSuffix_Begin. + NVPA_Status NVPW_MetricsContext_GetMetricSuffix_End(NVPW_MetricsContext_GetMetricSuffix_End_Params* pParams); + + typedef struct NVPW_MetricsContext_GetMetricBaseNames_Begin_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_MetricsContext* pMetricsContext; + /// out: number of elements in array pMetricsBaseNames + size_t numMetricBaseNames; + /// out: pointer to array of 'const char* pMetricsBaseName' + const char* const* ppMetricBaseNames; + } NVPW_MetricsContext_GetMetricBaseNames_Begin_Params; +#define NVPW_MetricsContext_GetMetricBaseNames_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetMetricBaseNames_Begin_Params, ppMetricBaseNames) + + /// Outputs (size, pointer) to an array of "const char* ppMetricBaseNames". The lifetime of the array is tied to + /// MetricsContext. + /// return all the metric base names. + NVPA_Status NVPW_MetricsContext_GetMetricBaseNames_Begin(NVPW_MetricsContext_GetMetricBaseNames_Begin_Params* pParams); + + typedef struct NVPW_MetricsContext_GetMetricBaseNames_End_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_MetricsContext* pMetricsContext; + } NVPW_MetricsContext_GetMetricBaseNames_End_Params; +#define NVPW_MetricsContext_GetMetricBaseNames_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetMetricBaseNames_End_Params, pMetricsContext) + + /// Cleans up memory internally allocated by NVPW_MetricsContext_GetMetricBaseNames_Begin. + NVPA_Status NVPW_MetricsContext_GetMetricBaseNames_End(NVPW_MetricsContext_GetMetricBaseNames_End_Params* pParams); + +/** + * @} + ******************************************************************************/ + +/***************************************************************************//** + * @name Metrics Evaluator + * @{ + */ + + typedef struct NVPW_MetricsEvaluator NVPW_MetricsEvaluator; + +#ifndef NVPW_DIM_UNIT_DEFINED +#define NVPW_DIM_UNIT_DEFINED + typedef enum NVPW_DimUnitName + { + NVPW_DIM_UNIT_INVALID = 3518299157, + NVPW_DIM_UNIT_UNITLESS = 2126137902, + NVPW_DIM_UNIT_ATTRIBUTES = 3776338729, + NVPW_DIM_UNIT_BYTES = 3797850191, + NVPW_DIM_UNIT_CTAS = 1960564139, + NVPW_DIM_UNIT_DRAM_CYCLES = 2650981327, + NVPW_DIM_UNIT_FBP_CYCLES = 1785238957, + NVPW_DIM_UNIT_FE_OPS = 2919159083, + NVPW_DIM_UNIT_GPC_CYCLES = 1222631184, + NVPW_DIM_UNIT_IDC_REQUESTS = 2012649669, + NVPW_DIM_UNIT_INSTRUCTIONS = 1418625543, + NVPW_DIM_UNIT_KILOBYTES = 1335980302, + NVPW_DIM_UNIT_L1DATA_BANK_ACCESSES = 1479493682, + NVPW_DIM_UNIT_L1DATA_BANK_CONFLICTS = 3433170787, + NVPW_DIM_UNIT_L1TEX_REQUESTS = 1306473767, + NVPW_DIM_UNIT_L1TEX_TAGS = 26573010, + NVPW_DIM_UNIT_L1TEX_WAVEFRONTS = 129373765, + NVPW_DIM_UNIT_L2_REQUESTS = 1143695106, + NVPW_DIM_UNIT_L2_SECTORS = 3424101564, + NVPW_DIM_UNIT_L2_TAGS = 3755612781, + NVPW_DIM_UNIT_NANOSECONDS = 3047500672, + NVPW_DIM_UNIT_NVLRX_CYCLES = 4059934930, + NVPW_DIM_UNIT_NVLTX_CYCLES = 1814350488, + NVPW_DIM_UNIT_PCIE_CYCLES = 1230450943, + NVPW_DIM_UNIT_PERCENT = 1284354694, + NVPW_DIM_UNIT_PIXELS = 4227616663, + NVPW_DIM_UNIT_PIXEL_SHADER_BARRIERS = 3705502518, + NVPW_DIM_UNIT_PRIMITIVES = 2373084002, + NVPW_DIM_UNIT_QUADS = 1539753497, + NVPW_DIM_UNIT_REGISTERS = 2837260947, + NVPW_DIM_UNIT_SAMPLES = 746046551, + NVPW_DIM_UNIT_SECONDS = 1164825258, + NVPW_DIM_UNIT_SYS_CYCLES = 3310821688, + NVPW_DIM_UNIT_TEXELS = 1293214069, + NVPW_DIM_UNIT_THREADS = 164261907, + NVPW_DIM_UNIT_VERTICES = 1873662209, + NVPW_DIM_UNIT_WARPS = 97951949, + NVPW_DIM_UNIT_WORKLOADS = 1728142656 + } NVPW_DimUnitName; +#endif //NVPW_DIM_UNIT_DEFINED + +#ifndef NVPW_HW_UNIT_DEFINED +#define NVPW_HW_UNIT_DEFINED + typedef enum NVPW_HwUnit + { + NVPW_HW_UNIT_INVALID = 3498035701, + NVPW_HW_UNIT_CROP = 2872137846, + NVPW_HW_UNIT_DRAM = 1662616918, + NVPW_HW_UNIT_DRAMC = 1401232876, + NVPW_HW_UNIT_FBP = 2947194306, + NVPW_HW_UNIT_FBPA = 690045803, + NVPW_HW_UNIT_FE = 2204924321, + NVPW_HW_UNIT_GPC = 1911735839, + NVPW_HW_UNIT_GPU = 1014363534, + NVPW_HW_UNIT_GR = 2933618517, + NVPW_HW_UNIT_IDC = 842765289, + NVPW_HW_UNIT_L1TEX = 893940957, + NVPW_HW_UNIT_LTS = 2333266697, + NVPW_HW_UNIT_NVLRX = 3091684901, + NVPW_HW_UNIT_NVLTX = 869679659, + NVPW_HW_UNIT_PCIE = 3433264174, + NVPW_HW_UNIT_PDA = 345193251, + NVPW_HW_UNIT_PES = 804128425, + NVPW_HW_UNIT_PROP = 3339255507, + NVPW_HW_UNIT_RASTER = 187932504, + NVPW_HW_UNIT_SM = 724224710, + NVPW_HW_UNIT_SMSP = 2837616917, + NVPW_HW_UNIT_SYS = 768990063, + NVPW_HW_UNIT_TPC = 1889024613, + NVPW_HW_UNIT_VAF = 753670509, + NVPW_HW_UNIT_VPC = 275561583, + NVPW_HW_UNIT_ZROP = 979500456 + } NVPW_HwUnit; +#endif //NVPW_HW_UNIT_DEFINED + + typedef enum NVPW_RollupOp + { + NVPW_ROLLUP_OP_AVG = 0, + NVPW_ROLLUP_OP_MAX, + NVPW_ROLLUP_OP_MIN, + NVPW_ROLLUP_OP_SUM, + NVPW_ROLLUP_OP__COUNT + } NVPW_RollupOp; + + typedef enum NVPW_MetricType + { + NVPW_METRIC_TYPE_COUNTER = 0, + NVPW_METRIC_TYPE_RATIO, + NVPW_METRIC_TYPE_THROUGHPUT, + NVPW_METRIC_TYPE__COUNT + } NVPW_MetricType; + + typedef enum NVPW_Submetric + { + NVPW_SUBMETRIC_NONE = 0, + NVPW_SUBMETRIC_PEAK_SUSTAINED = 1, + NVPW_SUBMETRIC_PEAK_SUSTAINED_ACTIVE = 2, + NVPW_SUBMETRIC_PEAK_SUSTAINED_ACTIVE_PER_SECOND = 3, + NVPW_SUBMETRIC_PEAK_SUSTAINED_ELAPSED = 4, + NVPW_SUBMETRIC_PEAK_SUSTAINED_ELAPSED_PER_SECOND = 5, + NVPW_SUBMETRIC_PEAK_SUSTAINED_FRAME = 6, + NVPW_SUBMETRIC_PEAK_SUSTAINED_FRAME_PER_SECOND = 7, + NVPW_SUBMETRIC_PEAK_SUSTAINED_REGION = 8, + NVPW_SUBMETRIC_PEAK_SUSTAINED_REGION_PER_SECOND = 9, + NVPW_SUBMETRIC_PER_CYCLE_ACTIVE = 10, + NVPW_SUBMETRIC_PER_CYCLE_ELAPSED = 11, + NVPW_SUBMETRIC_PER_CYCLE_IN_FRAME = 12, + NVPW_SUBMETRIC_PER_CYCLE_IN_REGION = 13, + NVPW_SUBMETRIC_PER_SECOND = 14, + NVPW_SUBMETRIC_PCT_OF_PEAK_SUSTAINED_ACTIVE = 15, + NVPW_SUBMETRIC_PCT_OF_PEAK_SUSTAINED_ELAPSED = 16, + NVPW_SUBMETRIC_PCT_OF_PEAK_SUSTAINED_FRAME = 17, + NVPW_SUBMETRIC_PCT_OF_PEAK_SUSTAINED_REGION = 18, + NVPW_SUBMETRIC_MAX_RATE = 19, + NVPW_SUBMETRIC_PCT = 20, + NVPW_SUBMETRIC_RATIO = 21, + NVPW_SUBMETRIC__COUNT + } NVPW_Submetric; + + typedef struct NVPW_MetricEvalRequest + { + /// the metric index as in 'NVPW_MetricsEvaluator_GetMetricNames' + size_t metricIndex; + /// one of 'NVPW_MetricType' + uint8_t metricType; + /// one of 'NVPW_RollupOp', required for Counter and Throughput, doesn't apply to Ratio + uint8_t rollupOp; + /// one of 'NVPW_Submetric', required for Ratio and Throughput, optional for Counter + uint16_t submetric; + } NVPW_MetricEvalRequest; +#define NVPW_MetricEvalRequest_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricEvalRequest, submetric) + + typedef struct NVPW_DimUnitFactor + { + /// one of 'NVPW_DimUnitName' + uint32_t dimUnit; + int8_t exponent; + } NVPW_DimUnitFactor; +#define NVPW_DimUnitFactor_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_DimUnitFactor, exponent) + + typedef struct NVPW_MetricsEvaluator_Destroy_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + } NVPW_MetricsEvaluator_Destroy_Params; +#define NVPW_MetricsEvaluator_Destroy_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_Destroy_Params, pMetricsEvaluator) + + NVPA_Status NVPW_MetricsEvaluator_Destroy(NVPW_MetricsEvaluator_Destroy_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_GetMetricNames_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] one of 'NVPW_MetricType' + uint8_t metricType; + /// [out] + const char* pMetricNames; + /// [out] + const size_t* pMetricNameBeginIndices; + /// [out] + size_t numMetrics; + } NVPW_MetricsEvaluator_GetMetricNames_Params; +#define NVPW_MetricsEvaluator_GetMetricNames_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetMetricNames_Params, numMetrics) + + NVPA_Status NVPW_MetricsEvaluator_GetMetricNames(NVPW_MetricsEvaluator_GetMetricNames_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_GetMetricTypeAndIndex_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] can be either a base metric or a metric + const char* pMetricName; + /// [out] one of 'NVPW_MetricType' + uint8_t metricType; + /// [out] the metric index as in 'NVPW_MetricsEvaluator_GetMetricNames' + size_t metricIndex; + } NVPW_MetricsEvaluator_GetMetricTypeAndIndex_Params; +#define NVPW_MetricsEvaluator_GetMetricTypeAndIndex_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetMetricTypeAndIndex_Params, metricIndex) + + NVPA_Status NVPW_MetricsEvaluator_GetMetricTypeAndIndex(NVPW_MetricsEvaluator_GetMetricTypeAndIndex_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_ConvertMetricNameToMetricEvalRequest_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] + const char* pMetricName; + /// [inout] 'pMetricEvalRequest' is in, '*pMetricEvalRequest' is out + struct NVPW_MetricEvalRequest* pMetricEvalRequest; + /// [in] set to 'NVPW_MetricEvalRequest_STRUCT_SIZE' + size_t metricEvalRequestStructSize; + } NVPW_MetricsEvaluator_ConvertMetricNameToMetricEvalRequest_Params; +#define NVPW_MetricsEvaluator_ConvertMetricNameToMetricEvalRequest_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_ConvertMetricNameToMetricEvalRequest_Params, metricEvalRequestStructSize) + + NVPA_Status NVPW_MetricsEvaluator_ConvertMetricNameToMetricEvalRequest(NVPW_MetricsEvaluator_ConvertMetricNameToMetricEvalRequest_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_HwUnitToString_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] one of 'NVPW_HwUnit' + uint32_t hwUnit; + /// [out] + const char* pHwUnitName; + } NVPW_MetricsEvaluator_HwUnitToString_Params; +#define NVPW_MetricsEvaluator_HwUnitToString_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_HwUnitToString_Params, pHwUnitName) + + NVPA_Status NVPW_MetricsEvaluator_HwUnitToString(NVPW_MetricsEvaluator_HwUnitToString_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_GetCounterProperties_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] the metric index as in 'NVPW_MetricsEvaluator_GetMetricNames' + size_t counterIndex; + /// [out] + const char* pDescription; + /// [out] one of 'NVPW_HwUnit' + uint32_t hwUnit; + } NVPW_MetricsEvaluator_GetCounterProperties_Params; +#define NVPW_MetricsEvaluator_GetCounterProperties_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetCounterProperties_Params, hwUnit) + + NVPA_Status NVPW_MetricsEvaluator_GetCounterProperties(NVPW_MetricsEvaluator_GetCounterProperties_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_GetRatioMetricProperties_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] the metric index as in 'NVPW_MetricsEvaluator_GetMetricNames' + size_t ratioMetricIndex; + /// [out] + const char* pDescription; + /// [out] + uint64_t hwUnit; + } NVPW_MetricsEvaluator_GetRatioMetricProperties_Params; +#define NVPW_MetricsEvaluator_GetRatioMetricProperties_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetRatioMetricProperties_Params, hwUnit) + + NVPA_Status NVPW_MetricsEvaluator_GetRatioMetricProperties(NVPW_MetricsEvaluator_GetRatioMetricProperties_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_GetThroughputMetricProperties_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] the metric index as in 'NVPW_MetricsEvaluator_GetMetricNames' + size_t throughputMetricIndex; + /// [out] + const char* pDescription; + /// [out] + uint32_t hwUnit; + /// [out] number of constituent counters for the throughput metric + size_t numCounters; + /// [out] metric indices as in 'NVPW_MetricsEvaluator_GetMetricNames', valid if 'numCounters' > 0, otherwise + /// returned as nullptr + const size_t* pCounterIndices; + /// [out] number of constituent sub-throughputs for the throughput metric + size_t numSubThroughputs; + /// [out] metric indices as in 'NVPW_MetricsEvaluator_GetMetricNames', valid if 'numSubThroughputs' > 0, + /// otherwise returned as nullptr + const size_t* pSubThroughputIndices; + } NVPW_MetricsEvaluator_GetThroughputMetricProperties_Params; +#define NVPW_MetricsEvaluator_GetThroughputMetricProperties_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetThroughputMetricProperties_Params, pSubThroughputIndices) + + NVPA_Status NVPW_MetricsEvaluator_GetThroughputMetricProperties(NVPW_MetricsEvaluator_GetThroughputMetricProperties_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_GetSupportedSubmetrics_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] one of 'NVPW_MetricType' + uint8_t metricType; + /// [out] an array of 'NVPW_Submetric' + const uint16_t* pSupportedSubmetrics; + /// [out] + size_t numSupportedSubmetrics; + } NVPW_MetricsEvaluator_GetSupportedSubmetrics_Params; +#define NVPW_MetricsEvaluator_GetSupportedSubmetrics_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetSupportedSubmetrics_Params, numSupportedSubmetrics) + + NVPA_Status NVPW_MetricsEvaluator_GetSupportedSubmetrics(NVPW_MetricsEvaluator_GetSupportedSubmetrics_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_GetMetricRawDependencies_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] + const struct NVPW_MetricEvalRequest* pMetricEvalRequests; + /// [in] + size_t numMetricEvalRequests; + /// [in] set to 'NVPW_MetricEvalRequest_STRUCT_SIZE' + size_t metricEvalRequestStructSize; + /// [in] set to sizeof('NVPW_MetricEvalRequest') + size_t metricEvalRequestStrideSize; + /// [inout] 'ppRawDependencies' is in, '*ppRawDependencies' is out + const char** ppRawDependencies; + /// [inout] if 'ppRawDependencies' is NULL, number of raw dependencies available will be returned; otherwise it + /// should be set to the number of elements allocated for 'ppRawDependencies', and on return, it will be + /// overwritten by number of elements copied to 'ppRawDependencies' + size_t numRawDependencies; + /// [inout] 'ppOptionalRawDependencies' is in, '*ppOptionalRawDependencies' is out + const char** ppOptionalRawDependencies; + /// [inout] if 'ppOptionalRawDependencies' is NULL, number of optional raw dependencies available will be + /// returned; otherwise it should be set to the number of elements allocated for 'ppOptionalRawDependencies', + /// and on return, it will be overwritten by number of elements copied to 'ppOptionalRawDependencies' + size_t numOptionalRawDependencies; + } NVPW_MetricsEvaluator_GetMetricRawDependencies_Params; +#define NVPW_MetricsEvaluator_GetMetricRawDependencies_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetMetricRawDependencies_Params, numOptionalRawDependencies) + + NVPA_Status NVPW_MetricsEvaluator_GetMetricRawDependencies(NVPW_MetricsEvaluator_GetMetricRawDependencies_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_DimUnitToString_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] one of 'NVPW_DimUnitName' + uint32_t dimUnit; + /// [out] + const char* pSingularName; + /// [out] + const char* pPluralName; + } NVPW_MetricsEvaluator_DimUnitToString_Params; +#define NVPW_MetricsEvaluator_DimUnitToString_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_DimUnitToString_Params, pPluralName) + + NVPA_Status NVPW_MetricsEvaluator_DimUnitToString(NVPW_MetricsEvaluator_DimUnitToString_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_GetMetricDimUnits_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] + const struct NVPW_MetricEvalRequest* pMetricEvalRequest; + /// [in] set to 'NVPW_MetricEvalRequest_STRUCT_SIZE' + size_t metricEvalRequestStructSize; + /// [inout] 'pDimUnits' is in, '*pDimUnits' is out + NVPW_DimUnitFactor* pDimUnits; + /// [inout] if 'pDimUnits' is NULL, number of dim-units available will be returned; otherwise it should be set + /// to the number of elements allocated for 'pDimUnits', and on return, it will be overwritten by number of + /// elements copied to 'pDimUnits' + size_t numDimUnits; + /// [in] set to 'NVPW_DimUnitFactor_STRUCT_SIZE' + size_t dimUnitFactorStructSize; + } NVPW_MetricsEvaluator_GetMetricDimUnits_Params; +#define NVPW_MetricsEvaluator_GetMetricDimUnits_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetMetricDimUnits_Params, dimUnitFactorStructSize) + + NVPA_Status NVPW_MetricsEvaluator_GetMetricDimUnits(NVPW_MetricsEvaluator_GetMetricDimUnits_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_SetUserData_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] duration in ns of user defined frame + double frameDuration; + /// [in] duration in ns of user defined region + double regionDuration; + /// [in] + NVPA_Bool isolated; + } NVPW_MetricsEvaluator_SetUserData_Params; +#define NVPW_MetricsEvaluator_SetUserData_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_SetUserData_Params, isolated) + + NVPA_Status NVPW_MetricsEvaluator_SetUserData(NVPW_MetricsEvaluator_SetUserData_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_EvaluateToGpuValues_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] + const struct NVPW_MetricEvalRequest* pMetricEvalRequests; + /// [in] + size_t numMetricEvalRequests; + /// [in] set to 'NVPW_MetricEvalRequest_STRUCT_SIZE' + size_t metricEvalRequestStructSize; + /// [in] set to sizeof('NVPW_MetricEvalRequest') + size_t metricEvalRequestStrideSize; + /// [in] + const uint8_t* pCounterDataImage; + /// [in] + size_t counterDataImageSize; + /// [in] + size_t rangeIndex; + /// [in] + NVPA_Bool isolated; + /// [inout] 'pMetricValues' is in, '*pMetricValues' is out + double* pMetricValues; + } NVPW_MetricsEvaluator_EvaluateToGpuValues_Params; +#define NVPW_MetricsEvaluator_EvaluateToGpuValues_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_EvaluateToGpuValues_Params, pMetricValues) + + NVPA_Status NVPW_MetricsEvaluator_EvaluateToGpuValues(NVPW_MetricsEvaluator_EvaluateToGpuValues_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_SetDeviceAttributes_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] + const uint8_t* pCounterDataImage; + /// [in] + size_t counterDataImageSize; + } NVPW_MetricsEvaluator_SetDeviceAttributes_Params; +#define NVPW_MetricsEvaluator_SetDeviceAttributes_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_SetDeviceAttributes_Params, counterDataImageSize) + + NVPA_Status NVPW_MetricsEvaluator_SetDeviceAttributes(NVPW_MetricsEvaluator_SetDeviceAttributes_Params* pParams); + +/** + * @} + ******************************************************************************/ + + +#endif // NVPERF_HOST_API_DEFINED + + + + +#ifdef __cplusplus +} // extern "C" +#endif + +#if defined(__GNUC__) && defined(NVPA_SHARED_LIB) + #pragma GCC visibility pop +#endif + +#endif // NVPERF_HOST_H diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_target.h b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_target.h new file mode 100644 index 0000000000000000000000000000000000000000..25d8b8ab64dad76a628b3a9eda668f08e3be7a32 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_target.h @@ -0,0 +1,572 @@ +#ifndef NVPERF_TARGET_H +#define NVPERF_TARGET_H + +/* + * Copyright 2014-2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO USER: + * + * This source code is subject to NVIDIA ownership rights under U.S. and + * international Copyright laws. + * + * This software and the information contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions + * of a form of NVIDIA software license agreement. + * + * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE + * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR + * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH + * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, + * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS + * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE + * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE + * OR PERFORMANCE OF THIS SOURCE CODE. + * + * U.S. Government End Users. This source code is a "commercial item" as + * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of + * "commercial computer software" and "commercial computer software + * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) + * and is provided to the U.S. Government only as a commercial end item. + * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through + * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the + * source code with only those rights set forth herein. + * + * Any use of this source code in individual and commercial software must + * include, in the user documentation and internal comments to the code, + * the above Disclaimer and U.S. Government End Users Notice. + */ + +#include +#include +#include "nvperf_common.h" + +#if defined(__GNUC__) && defined(NVPA_SHARED_LIB) + #pragma GCC visibility push(default) + #if !defined(NVPW_LOCAL) + #define NVPW_LOCAL __attribute__ ((visibility ("hidden"))) + #endif +#else + #if !defined(NVPW_LOCAL) + #define NVPW_LOCAL + #endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @file nvperf_target.h + */ + +#ifndef NVPW_GPU_ARCHITECTURE_SUPPORT_LEVEL_DEFINED +#define NVPW_GPU_ARCHITECTURE_SUPPORT_LEVEL_DEFINED + /// GPU architecture support level + typedef enum NVPW_GpuArchitectureSupportLevel + { + NVPW_GPU_ARCHITECTURE_SUPPORT_LEVEL_UNKNOWN = 0, + NVPW_GPU_ARCHITECTURE_SUPPORT_LEVEL_UNSUPPORTED, + NVPW_GPU_ARCHITECTURE_SUPPORT_LEVEL_SUPPORTED + } NVPW_GpuArchitectureSupportLevel; +#endif //NVPW_GPU_ARCHITECTURE_SUPPORT_LEVEL_DEFINED + +#ifndef NVPW_SLI_SUPPORT_LEVEL_DEFINED +#define NVPW_SLI_SUPPORT_LEVEL_DEFINED + /// SLI configuration support level + typedef enum NVPW_SliSupportLevel + { + NVPW_SLI_SUPPORT_LEVEL_UNKNOWN = 0, + NVPW_SLI_SUPPORT_LEVEL_UNSUPPORTED, + /// Only Non-SLI configurations are supported. + NVPW_SLI_SUPPORT_LEVEL_SUPPORTED_NON_SLI_CONFIGURATION + } NVPW_SliSupportLevel; +#endif //NVPW_SLI_SUPPORT_LEVEL_DEFINED + +#ifndef NVPW_VGPU_SUPPORT_LEVEL_DEFINED +#define NVPW_VGPU_SUPPORT_LEVEL_DEFINED + /// Virtualized GPU configuration support level + typedef enum NVPW_VGpuSupportLevel + { + NVPW_VGPU_SUPPORT_LEVEL_UNKNOWN = 0, + NVPW_VGPU_SUPPORT_LEVEL_UNSUPPORTED, + /// Supported but not allowed by system admin. + NVPW_VGPU_SUPPORT_LEVEL_SUPPORTED_DISALLOWED, + NVPW_VGPU_SUPPORT_LEVEL_SUPPORTED_ALLOWED, + NVPW_VGPU_SUPPORT_LEVEL_SUPPORTED_NON_VGPU_CONFIGURATION + } NVPW_VGpuSupportLevel; +#endif //NVPW_VGPU_SUPPORT_LEVEL_DEFINED + +#ifndef NVPW_CONF_COMPUTE_SUPPORT_LEVEL_DEFINED +#define NVPW_CONF_COMPUTE_SUPPORT_LEVEL_DEFINED + /// Confidential Compute mode support level + typedef enum NVPW_ConfidentialComputeSupportLevel + { + NVPW_CONF_COMPUTE_SUPPORT_LEVEL_UNKNOWN = 0, + NVPW_CONF_COMPUTE_SUPPORT_LEVEL_UNSUPPORTED, + NVPW_CONF_COMPUTE_SUPPORT_LEVEL_SUPPORTED_NON_CONF_COMPUTE_CONFIGURATION + } NVPW_ConfidentialComputeSupportLevel; +#endif //NVPW_CONF_COMPUTE_SUPPORT_LEVEL_DEFINED + +#ifndef NVPW_CMP_SUPPORT_LEVEL_DEFINED +#define NVPW_CMP_SUPPORT_LEVEL_DEFINED + /// CMP support level + typedef enum NVPW_CmpSupportLevel + { + NVPW_CMP_SUPPORT_LEVEL_UNKNOWN = 0, + NVPW_CMP_SUPPORT_LEVEL_UNSUPPORTED, + NVPW_CMP_SUPPORT_LEVEL_SUPPORTED_NON_CMP_CONFIGURATON + } NVPW_CmpSupportLevel; +#endif //NVPW_CMP_SUPPORT_LEVEL_DEFINED + +#ifndef NVPW_WSL_SUPPORT_LEVEL_DEFINED +#define NVPW_WSL_SUPPORT_LEVEL_DEFINED + /// WSL support level + typedef enum NVPW_WslSupportLevel + { + NVPW_WSL_SUPPORT_LEVEL_UNKNOWN = 0, + NVPW_WSL_SUPPORT_LEVEL_UNSUPPORTED_INSUFFICIENT_DRIVER_VERSION, + NVPW_WSL_SUPPORT_LEVEL_SUPPORTED, + NVPW_WSL_SUPPORT_LEVEL_SUPPORTED_NON_WSL_CONFIGURATION + } NVPW_WslSupportLevel; +#endif //NVPW_WSL_SUPPORT_LEVEL_DEFINED + + typedef struct NVPW_InitializeTarget_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + } NVPW_InitializeTarget_Params; +#define NVPW_InitializeTarget_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_InitializeTarget_Params, pPriv) + + /// Load the target library. + NVPA_Status NVPW_InitializeTarget(NVPW_InitializeTarget_Params* pParams); + + typedef struct NVPW_GetDeviceCount_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + size_t numDevices; + } NVPW_GetDeviceCount_Params; +#define NVPW_GetDeviceCount_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_GetDeviceCount_Params, numDevices) + + NVPA_Status NVPW_GetDeviceCount(NVPW_GetDeviceCount_Params* pParams); + + typedef struct NVPW_Device_GetNames_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + size_t deviceIndex; + const char* pDeviceName; + const char* pChipName; + } NVPW_Device_GetNames_Params; +#define NVPW_Device_GetNames_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Device_GetNames_Params, pChipName) + + NVPA_Status NVPW_Device_GetNames(NVPW_Device_GetNames_Params* pParams); + + typedef struct NVPW_PciBusId + { + /// The PCI domain on which the device bus resides. + uint32_t domain; + /// The bus on which the device resides. + uint16_t bus; + /// device ID. + uint16_t device; + } NVPW_PciBusId; +#define NVPW_PciBusId_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_PciBusId, device) + + typedef struct NVPW_Device_GetPciBusIds_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] caller-allocated array of NVPW_PciBusId, indexed by NVPW deviceIndex + NVPW_PciBusId* pBusIds; + /// [in] size of the pBusIDs array; use result from NVPW_GetDeviceCount + size_t numDevices; + } NVPW_Device_GetPciBusIds_Params; +#define NVPW_Device_GetPciBusIds_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Device_GetPciBusIds_Params, numDevices) + + NVPA_Status NVPW_Device_GetPciBusIds(NVPW_Device_GetPciBusIds_Params* pParams); + + +#define NVPW_DEVICE_MIG_GPU_INSTANCE_ID_INVALID 0xFFFFFFFFu +#define NVPW_DEVICE_MIG_GPU_INSTANCE_ID_FULLCHIP 0xFFFFFFFEu + + + typedef struct NVPW_Device_GetMigAttributes_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + size_t deviceIndex; + /// [out] + NVPA_Bool isMigPartition; + /// [out] + uint32_t gpuInstanceId; + /// [out] + uint32_t computeInstanceId; + } NVPW_Device_GetMigAttributes_Params; +#define NVPW_Device_GetMigAttributes_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Device_GetMigAttributes_Params, computeInstanceId) + + NVPA_Status NVPW_Device_GetMigAttributes(NVPW_Device_GetMigAttributes_Params* pParams); + + typedef struct NVPW_Adapter_GetDeviceIndex_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct IDXGIAdapter* pAdapter; + /// [in] + size_t sliIndex; + /// [out] + size_t deviceIndex; + } NVPW_Adapter_GetDeviceIndex_Params; +#define NVPW_Adapter_GetDeviceIndex_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Adapter_GetDeviceIndex_Params, deviceIndex) + + NVPA_Status NVPW_Adapter_GetDeviceIndex(NVPW_Adapter_GetDeviceIndex_Params* pParams); + + typedef struct NVPW_CounterData_GetNumRanges_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + const uint8_t* pCounterDataImage; + size_t numRanges; + } NVPW_CounterData_GetNumRanges_Params; +#define NVPW_CounterData_GetNumRanges_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterData_GetNumRanges_Params, numRanges) + + NVPA_Status NVPW_CounterData_GetNumRanges(NVPW_CounterData_GetNumRanges_Params* pParams); + + typedef struct NVPW_CounterData_GetChipName_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + const uint8_t* pCounterDataImage; + /// [in] + size_t counterDataImageSize; + /// [out] + const char* pChipName; + } NVPW_CounterData_GetChipName_Params; +#define NVPW_CounterData_GetChipName_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterData_GetChipName_Params, pChipName) + + NVPA_Status NVPW_CounterData_GetChipName(NVPW_CounterData_GetChipName_Params* pParams); + + typedef struct NVPW_Config_GetNumPasses_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + const uint8_t* pConfig; + /// [out] + size_t numPipelinedPasses; + /// [out] + size_t numIsolatedPasses; + } NVPW_Config_GetNumPasses_Params; +#define NVPW_Config_GetNumPasses_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Config_GetNumPasses_Params, numIsolatedPasses) + + /// Total num passes = numPipelinedPasses + numIsolatedPasses * numNestingLevels + NVPA_Status NVPW_Config_GetNumPasses(NVPW_Config_GetNumPasses_Params* pParams); + + typedef struct NVPW_Config_GetNumPasses_V2_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + const uint8_t* pConfig; + /// [out] + size_t numPasses; + } NVPW_Config_GetNumPasses_V2_Params; +#define NVPW_Config_GetNumPasses_V2_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Config_GetNumPasses_V2_Params, numPasses) + + /// Total num passes = numPasses * numNestingLevels + NVPA_Status NVPW_Config_GetNumPasses_V2(NVPW_Config_GetNumPasses_V2_Params* pParams); + +#define NVPW_API_SET_CUDA_PROFILER 0x18209d0775b2f89dULL + +#define NVPW_API_SET_D3D11_PROFILER 0xca55c6738445db2bULL + +#define NVPW_API_SET_D3D12_PROFILER 0xc0c2d46dd7c7ad78ULL + +#define NVPW_API_SET_EGL_PROFILER 0x3c3747dae1f9565cULL + +#define NVPW_API_SET_GPU_PERIODICSAMPLER 0x9f4c2571fc0b2e8aULL + +#define NVPW_API_SET_METRICSCONTEXT 0x7c8579f6f2144beaULL + +#define NVPW_API_SET_METRICSEVALUATOR 0x0368a8768d811af9ULL + +#define NVPW_API_SET_METRICS_GA100_COMP 0x16b7d8c20d8b4915ULL + +#define NVPW_API_SET_METRICS_GA100_GRFX 0xc94eaabec04a94faULL + +#define NVPW_API_SET_METRICS_GA10X_COMP 0xb5d6391c2e299ab5ULL + +#define NVPW_API_SET_METRICS_GA10X_GRFX 0x6ebc121178b5ce0bULL + +#define NVPW_API_SET_METRICS_GV100_COMP 0x863705cc57919f72ULL + +#define NVPW_API_SET_METRICS_GV100_GRFX 0x9900da75d164fecfULL + +#define NVPW_API_SET_METRICS_GV11B_COMP 0xd3f79a859235848fULL + +#define NVPW_API_SET_METRICS_GV11B_GRFX 0xeb8e26220106e227ULL + +#define NVPW_API_SET_METRICS_TU10X_COMP 0x70f40be0afd35da8ULL + +#define NVPW_API_SET_METRICS_TU10X_GRFX 0xdf219cb838db6968ULL + +#define NVPW_API_SET_METRICS_TU11X_COMP 0xeb0069d7d0956678ULL + +#define NVPW_API_SET_METRICS_TU11X_GRFX 0x0977d9342bd62743ULL + +#define NVPW_API_SET_OPENGL_PROFILER 0xe4cd9ea40f2ee777ULL + +#define NVPW_API_SET_VULKAN_PROFILER 0x8c56b6a03d779689ULL + +#define NVPW_SDK_VERSION 0x1e128b6f001423fcULL + + typedef struct NVPW_QueryVersionNumber_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + uint64_t apiSet; + /// [out] + uint32_t major; + /// [out] + uint32_t minor; + /// [out] + uint32_t patch; + /// [out] + uint32_t relMajor; + /// [out] + uint32_t relMinor; + /// [out] + uint32_t relPatch; + } NVPW_QueryVersionNumber_Params; +#define NVPW_QueryVersionNumber_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_QueryVersionNumber_Params, relPatch) + + /// Query version number of an API set + NVPA_Status NVPW_QueryVersionNumber(NVPW_QueryVersionNumber_Params* pParams); + + typedef enum NVPW_Device_ClockStatus + { + /// clock status is unknown + NVPW_DEVICE_CLOCK_STATUS_UNKNOWN, + /// clocks are locked to rated tdp values + NVPW_DEVICE_CLOCK_STATUS_LOCKED_TO_RATED_TDP, + /// clocks are not locked and can boost above rated tdp + NVPW_DEVICE_CLOCK_STATUS_BOOST_ENABLED, + /// clocks are not locked and will not go above rated tdp + NVPW_DEVICE_CLOCK_STATUS_BOOST_DISABLED, + NVPW_DEVICE_CLOCK_STATUS__COUNT + } NVPW_Device_ClockStatus; + + typedef struct NVPW_Device_GetClockStatus_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + size_t deviceIndex; + /// [in] + NVPW_Device_ClockStatus clockStatus; + } NVPW_Device_GetClockStatus_Params; +#define NVPW_Device_GetClockStatus_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Device_GetClockStatus_Params, clockStatus) + + NVPA_Status NVPW_Device_GetClockStatus(NVPW_Device_GetClockStatus_Params* pParams); + + typedef enum NVPW_Device_ClockSetting + { + /// invalid op, specify valid clocks operation during profiling + NVPW_DEVICE_CLOCK_SETTING_INVALID, + /// default to driver/application config (normally unlocked and not boosted, but could be unlocked boosted, or + /// locked to rated TDP) + NVPW_DEVICE_CLOCK_SETTING_DEFAULT, + /// lock clocks at rated tdp base values + NVPW_DEVICE_CLOCK_SETTING_LOCK_TO_RATED_TDP, + NVPW_DEVICE_CLOCK_SETTING__COUNT + } NVPW_Device_ClockSetting; + + typedef struct NVPW_Device_SetClockSetting_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + size_t deviceIndex; + /// [in] + NVPW_Device_ClockSetting clockSetting; + } NVPW_Device_SetClockSetting_Params; +#define NVPW_Device_SetClockSetting_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Device_SetClockSetting_Params, clockSetting) + + NVPA_Status NVPW_Device_SetClockSetting(NVPW_Device_SetClockSetting_Params* pParams); + + typedef struct NVPW_CounterData_GetRangeDescriptions_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + const uint8_t* pCounterDataImage; + size_t rangeIndex; + /// [inout] Number of descriptions allocated in ppDescriptions + size_t numDescriptions; + const char** ppDescriptions; + } NVPW_CounterData_GetRangeDescriptions_Params; +#define NVPW_CounterData_GetRangeDescriptions_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterData_GetRangeDescriptions_Params, ppDescriptions) + + NVPA_Status NVPW_CounterData_GetRangeDescriptions(NVPW_CounterData_GetRangeDescriptions_Params* pParams); + + typedef struct NVPW_Profiler_CounterData_GetRangeDescriptions_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + const uint8_t* pCounterDataImage; + size_t rangeIndex; + /// [inout] Number of descriptions allocated in ppDescriptions + size_t numDescriptions; + const char** ppDescriptions; + } NVPW_Profiler_CounterData_GetRangeDescriptions_Params; +#define NVPW_Profiler_CounterData_GetRangeDescriptions_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Profiler_CounterData_GetRangeDescriptions_Params, ppDescriptions) + + NVPA_Status NVPW_Profiler_CounterData_GetRangeDescriptions(NVPW_Profiler_CounterData_GetRangeDescriptions_Params* pParams); + +#ifndef NVPW_PERIODIC_SAMPLER_COUNTER_DATA_APPEND_MODE_DEFINED +#define NVPW_PERIODIC_SAMPLER_COUNTER_DATA_APPEND_MODE_DEFINED + typedef enum NVPW_PeriodicSampler_CounterData_AppendMode + { + NVPW_PERIODIC_SAMPLER_COUNTER_DATA_APPEND_MODE_LINEAR = 0, + NVPW_PERIODIC_SAMPLER_COUNTER_DATA_APPEND_MODE_CIRCULAR = 1, + NVPW_PERIODIC_SAMPLER_COUNTER_DATA_APPEND_MODE__COUNT + } NVPW_PeriodicSampler_CounterData_AppendMode; +#endif //NVPW_PERIODIC_SAMPLER_COUNTER_DATA_APPEND_MODE_DEFINED + + typedef struct NVPW_PeriodicSampler_CounterData_GetSampleTime_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + const uint8_t* pCounterDataImage; + /// [in] + size_t rangeIndex; + /// [out] + uint64_t timestampStart; + /// [out] + uint64_t timestampEnd; + } NVPW_PeriodicSampler_CounterData_GetSampleTime_Params; +#define NVPW_PeriodicSampler_CounterData_GetSampleTime_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_PeriodicSampler_CounterData_GetSampleTime_Params, timestampEnd) + + NVPA_Status NVPW_PeriodicSampler_CounterData_GetSampleTime(NVPW_PeriodicSampler_CounterData_GetSampleTime_Params* pParams); + + typedef struct NVPW_PeriodicSampler_CounterData_TrimInPlace_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + uint8_t* pCounterDataImage; + /// [in] + size_t counterDataImageSize; + /// [out] + size_t counterDataImageTrimmedSize; + } NVPW_PeriodicSampler_CounterData_TrimInPlace_Params; +#define NVPW_PeriodicSampler_CounterData_TrimInPlace_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_PeriodicSampler_CounterData_TrimInPlace_Params, counterDataImageTrimmedSize) + + NVPA_Status NVPW_PeriodicSampler_CounterData_TrimInPlace(NVPW_PeriodicSampler_CounterData_TrimInPlace_Params* pParams); + + typedef struct NVPW_PeriodicSampler_CounterData_GetInfo_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + const uint8_t* pCounterDataImage; + /// [in] + size_t counterDataImageSize; + /// [out] total number of ranges in the counter data + size_t numTotalRanges; + /// [out] if in "linear" mode, this API returns the number of "populated" ranges; if it's in "circular" mode, + /// then it returns the last "populated" range index + 1, when there is no such range, it returns 0. + size_t numPopulatedRanges; + /// [out] if in "linear" mode, this API returns the number of "completed" ranges; if it's in "circular" mode, + /// then it returns the last "completed" range index + 1, when there is no such range, it returns 0. + size_t numCompletedRanges; + } NVPW_PeriodicSampler_CounterData_GetInfo_Params; +#define NVPW_PeriodicSampler_CounterData_GetInfo_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_PeriodicSampler_CounterData_GetInfo_Params, numCompletedRanges) + + /// In periodic sampler, a range in counter data stores exactly one sample's data. For better performance, periodic + /// sampler may operate in an out-of-order fashion when populating sample data, i.e. it may not fully populate all + /// counters of a sample/range before starting to populate the next sample/range. As a result, we have two concepts + /// here, "populated" & "completed": a range is considered "populated" even if only partial counters have been + /// written; on the other hand, a range is only considered "completed" if all the collecting counters have been + /// written. + NVPA_Status NVPW_PeriodicSampler_CounterData_GetInfo(NVPW_PeriodicSampler_CounterData_GetInfo_Params* pParams); + + typedef struct NVPW_PeriodicSampler_CounterData_GetTriggerCount_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + const uint8_t* pCounterDataImage; + /// [in] + size_t counterDataImageSize; + /// [in] + size_t rangeIndex; + /// [out] + uint32_t triggerCount; + } NVPW_PeriodicSampler_CounterData_GetTriggerCount_Params; +#define NVPW_PeriodicSampler_CounterData_GetTriggerCount_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_PeriodicSampler_CounterData_GetTriggerCount_Params, triggerCount) + + NVPA_Status NVPW_PeriodicSampler_CounterData_GetTriggerCount(NVPW_PeriodicSampler_CounterData_GetTriggerCount_Params* pParams); + + + typedef struct NVPW_TimestampReport + { + uint32_t payload; + uint8_t reserved0004[4]; + uint64_t timestamp; + } NVPW_TimestampReport; + + + + +#ifdef __cplusplus +} // extern "C" +#endif + +#if defined(__GNUC__) && defined(NVPA_SHARED_LIB) + #pragma GCC visibility pop +#endif + +#endif // NVPERF_TARGET_H diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/__init__.py b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..316059cd99f03c1a313cb6badfa6d46538839acc Binary files /dev/null and b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libpcsamplingutil.so b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libpcsamplingutil.so new file mode 100644 index 0000000000000000000000000000000000000000..451d7952ec367e0f7ff981401cc47a021b31f228 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libpcsamplingutil.so differ diff --git a/venv/lib/python3.10/site-packages/nvidia/curand/__init__.py b/venv/lib/python3.10/site-packages/nvidia/curand/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/nvidia/curand/include/__init__.py b/venv/lib/python3.10/site-packages/nvidia/curand/include/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/nvidia/curand/include/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nvidia/curand/include/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12bc7216d3691c380cabfe6e2c8a84b777945693 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nvidia/curand/include/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_discrete.h b/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_discrete.h new file mode 100644 index 0000000000000000000000000000000000000000..7e194487a0e2ec02abcb1dd8634c42141a148d84 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_discrete.h @@ -0,0 +1,87 @@ + /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(CURANDDISCRETE_H_) +#define CURANDDISCRETE_H_ + +struct curandDistributionShift_st { + curandDistribution_t probability; + curandDistribution_t host_probability; + unsigned int shift; + unsigned int length; + unsigned int host_gen; +}; + +struct curandHistogramM2_st { + curandHistogramM2V_t V; + curandHistogramM2V_t host_V; + curandHistogramM2K_t K; + curandHistogramM2K_t host_K; + unsigned int host_gen; +}; + + +struct curandDistributionM2Shift_st { + curandHistogramM2_t histogram; + curandHistogramM2_t host_histogram; + unsigned int shift; + unsigned int length; + unsigned int host_gen; +}; + +struct curandDiscreteDistribution_st { + curandDiscreteDistribution_t self_host_ptr; + curandDistributionM2Shift_t M2; + curandDistributionM2Shift_t host_M2; + double stddev; + double mean; + curandMethod_t method; + unsigned int host_gen; +}; + +#endif // !defined(CURANDDISCRETE_H_) diff --git a/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_kernel.h b/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..c9d27c5f271cae931091c85ecfe37b5e7d427bf3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_kernel.h @@ -0,0 +1,1677 @@ + + /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + + +#if !defined(CURAND_KERNEL_H_) +#define CURAND_KERNEL_H_ + +/** + * \defgroup DEVICE Device API + * + * @{ + */ + +#if !defined(QUALIFIERS) +#define QUALIFIERS static __forceinline__ __device__ +#endif + +/* To prevent unused parameter warnings */ +#if !defined(GCC_UNUSED_PARAMETER) +#if defined(__GNUC__) +#define GCC_UNUSED_PARAMETER __attribute__((unused)) +#else +#define GCC_UNUSED_PARAMETER +#endif /* defined(__GNUC__) */ +#endif /* !defined(GCC_UNUSED_PARAMETER) */ + +#include + +#ifdef __CUDACC_RTC__ +#define CURAND_DETAIL_USE_CUDA_STL +#endif + +#if __cplusplus >= 201103L +# ifdef CURAND_DETAIL_USE_CUDA_STL +# define CURAND_STD cuda::std +# include +# else +# define CURAND_STD std +# include +# endif // CURAND_DETAIL_USE_CUDA_STL +#else +// To support C++03 compilation +# define CURAND_STD curand_detail +namespace curand_detail { + template + struct enable_if {}; + + template + struct enable_if { typedef T type; }; + + template + struct is_same { static const bool value = false; }; + + template + struct is_same { static const bool value = true; }; +} // namespace curand_detail +#endif // __cplusplus >= 201103L + +#ifndef __CUDACC_RTC__ +#include +#endif // __CUDACC_RTC__ + +#include "curand.h" +#include "curand_discrete.h" +#include "curand_precalc.h" +#include "curand_mrg32k3a.h" +#include "curand_mtgp32_kernel.h" +#include "curand_philox4x32_x.h" +#include "curand_globals.h" + +/* Test RNG */ +/* This generator uses the formula: + x_n = x_(n-1) + 1 mod 2^32 + x_0 = (unsigned int)seed * 3 + Subsequences are spaced 31337 steps apart. +*/ +struct curandStateTest { + unsigned int v; +}; + +/** \cond UNHIDE_TYPEDEFS */ +typedef struct curandStateTest curandStateTest_t; +/** \endcond */ + +/* XORSHIFT FAMILY RNGs */ +/* These generators are a family proposed by Marsaglia. They keep state + in 32 bit chunks, then use repeated shift and xor operations to scramble + the bits. The following generators are a combination of a simple Weyl + generator with an N variable XORSHIFT generator. +*/ + +/* XORSHIFT RNG */ +/* This generator uses the xorwow formula of +www.jstatsoft.org/v08/i14/paper page 5 +Has period 2^192 - 2^32. +*/ +/** + * CURAND XORWOW state + */ +struct curandStateXORWOW; + +/* + * Implementation details not in reference documentation */ +struct curandStateXORWOW { + unsigned int d, v[5]; + int boxmuller_flag; + int boxmuller_flag_double; + float boxmuller_extra; + double boxmuller_extra_double; +}; + +/* + * CURAND XORWOW state + */ +/** \cond UNHIDE_TYPEDEFS */ +typedef struct curandStateXORWOW curandStateXORWOW_t; + +#define EXTRA_FLAG_NORMAL 0x00000001 +#define EXTRA_FLAG_LOG_NORMAL 0x00000002 +/** \endcond */ + +/* Combined Multiple Recursive Generators */ +/* These generators are a family proposed by L'Ecuyer. They keep state + in sets of doubles, then use repeated modular arithmetic multiply operations + to scramble the bits in each set, and combine the result. +*/ + +/* MRG32k3a RNG */ +/* This generator uses the MRG32k3A formula of +http://www.iro.umontreal.ca/~lecuyer/myftp/streams00/c++/streams4.pdf +Has period 2^191. +*/ + +/* moduli for the recursions */ +/** \cond UNHIDE_DEFINES */ +#define MRG32K3A_MOD1 4294967087. +#define MRG32K3A_MOD2 4294944443. + +/* Constants used in generation */ + +#define MRG32K3A_A12 1403580. +#define MRG32K3A_A13N 810728. +#define MRG32K3A_A21 527612. +#define MRG32K3A_A23N 1370589. +#define MRG32K3A_NORM (2.3283065498378288e-10) +// +// #define MRG32K3A_BITS_NORM ((double)((POW32_DOUBLE-1.0)/MOD1)) +// above constant, used verbatim, rounds differently on some host systems. +#define MRG32K3A_BITS_NORM 1.000000048662 + +/** \endcond */ + + + + +/** + * CURAND MRG32K3A state + */ +struct curandStateMRG32k3a; + +/* Implementation details not in reference documentation */ +struct curandStateMRG32k3a { + unsigned int s1[3]; + unsigned int s2[3]; + int boxmuller_flag; + int boxmuller_flag_double; + float boxmuller_extra; + double boxmuller_extra_double; +}; + +/* + * CURAND MRG32K3A state + */ +/** \cond UNHIDE_TYPEDEFS */ +typedef struct curandStateMRG32k3a curandStateMRG32k3a_t; +/** \endcond */ + +/* SOBOL QRNG */ +/** + * CURAND Sobol32 state + */ +struct curandStateSobol32; + +/* Implementation details not in reference documentation */ +struct curandStateSobol32 { + unsigned int i, x, c; + unsigned int direction_vectors[32]; +}; + +/* + * CURAND Sobol32 state + */ +/** \cond UNHIDE_TYPEDEFS */ +typedef struct curandStateSobol32 curandStateSobol32_t; +/** \endcond */ + +/** + * CURAND Scrambled Sobol32 state + */ +struct curandStateScrambledSobol32; + +/* Implementation details not in reference documentation */ +struct curandStateScrambledSobol32 { + unsigned int i, x, c; + unsigned int direction_vectors[32]; +}; + +/* + * CURAND Scrambled Sobol32 state + */ +/** \cond UNHIDE_TYPEDEFS */ +typedef struct curandStateScrambledSobol32 curandStateScrambledSobol32_t; +/** \endcond */ + +/** + * CURAND Sobol64 state + */ +struct curandStateSobol64; + +/* Implementation details not in reference documentation */ +struct curandStateSobol64 { + unsigned long long i, x, c; + unsigned long long direction_vectors[64]; +}; + +/* + * CURAND Sobol64 state + */ +/** \cond UNHIDE_TYPEDEFS */ +typedef struct curandStateSobol64 curandStateSobol64_t; +/** \endcond */ + +/** + * CURAND Scrambled Sobol64 state + */ +struct curandStateScrambledSobol64; + +/* Implementation details not in reference documentation */ +struct curandStateScrambledSobol64 { + unsigned long long i, x, c; + unsigned long long direction_vectors[64]; +}; + +/* + * CURAND Scrambled Sobol64 state + */ +/** \cond UNHIDE_TYPEDEFS */ +typedef struct curandStateScrambledSobol64 curandStateScrambledSobol64_t; +/** \endcond */ + +/* + * Default RNG + */ +/** \cond UNHIDE_TYPEDEFS */ +typedef struct curandStateXORWOW curandState_t; +typedef struct curandStateXORWOW curandState; +/** \endcond */ + +/****************************************************************************/ +/* Utility functions needed by RNGs */ +/****************************************************************************/ +/** \cond UNHIDE_UTILITIES */ +/* + multiply vector by matrix, store in result + matrix is n x n, measured in 32 bit units + matrix is stored in row major order + vector and result cannot be same pointer +*/ +template +QUALIFIERS void __curand_matvec_inplace(unsigned int *vector, unsigned int *matrix) +{ + unsigned int result[N] = { 0 }; + for(int i = 0; i < N; i++) { + #ifdef __CUDA_ARCH__ + #pragma unroll 16 + #endif + for(int j = 0; j < 32; j++) { + if(vector[i] & (1 << j)) { + for(int k = 0; k < N; k++) { + result[k] ^= matrix[N * (i * 32 + j) + k]; + } + } + } + } + for(int i = 0; i < N; i++) { + vector[i] = result[i]; + } +} + +QUALIFIERS void __curand_matvec(unsigned int *vector, unsigned int *matrix, + unsigned int *result, int n) +{ + for(int i = 0; i < n; i++) { + result[i] = 0; + } + for(int i = 0; i < n; i++) { + for(int j = 0; j < 32; j++) { + if(vector[i] & (1 << j)) { + for(int k = 0; k < n; k++) { + result[k] ^= matrix[n * (i * 32 + j) + k]; + } + } + } + } +} + +/* generate identity matrix */ +QUALIFIERS void __curand_matidentity(unsigned int *matrix, int n) +{ + int r; + for(int i = 0; i < n * 32; i++) { + for(int j = 0; j < n; j++) { + r = i & 31; + if(i / 32 == j) { + matrix[i * n + j] = (1 << r); + } else { + matrix[i * n + j] = 0; + } + } + } +} + +/* multiply matrixA by matrixB, store back in matrixA + matrixA and matrixB must not be same matrix */ +QUALIFIERS void __curand_matmat(unsigned int *matrixA, unsigned int *matrixB, int n) +{ + unsigned int result[MAX_XOR_N]; + for(int i = 0; i < n * 32; i++) { + __curand_matvec(matrixA + i * n, matrixB, result, n); + for(int j = 0; j < n; j++) { + matrixA[i * n + j] = result[j]; + } + } +} + +/* copy vectorA to vector */ +QUALIFIERS void __curand_veccopy(unsigned int *vector, unsigned int *vectorA, int n) +{ + for(int i = 0; i < n; i++) { + vector[i] = vectorA[i]; + } +} + +/* copy matrixA to matrix */ +QUALIFIERS void __curand_matcopy(unsigned int *matrix, unsigned int *matrixA, int n) +{ + for(int i = 0; i < n * n * 32; i++) { + matrix[i] = matrixA[i]; + } +} + +/* compute matrixA to power p, store result in matrix */ +QUALIFIERS void __curand_matpow(unsigned int *matrix, unsigned int *matrixA, + unsigned long long p, int n) +{ + unsigned int matrixR[MAX_XOR_N * MAX_XOR_N * 32]; + unsigned int matrixS[MAX_XOR_N * MAX_XOR_N * 32]; + __curand_matidentity(matrix, n); + __curand_matcopy(matrixR, matrixA, n); + while(p) { + if(p & 1) { + __curand_matmat(matrix, matrixR, n); + } + __curand_matcopy(matrixS, matrixR, n); + __curand_matmat(matrixR, matrixS, n); + p >>= 1; + } +} + +/****************************************************************************/ +/* Utility functions needed by MRG32k3a RNG */ +/* Matrix operations modulo some integer less than 2**32, done in */ +/* double precision floating point, with care not to overflow 53 bits */ +/****************************************************************************/ + +/* return i mod m. */ +/* assumes i and m are integers represented accurately in doubles */ + +QUALIFIERS double curand_MRGmod(double i, double m) +{ + double quo; + double rem; + quo = floor(i/m); + rem = i - (quo*m); + if (rem < 0.0) rem += m; + return rem; +} + +/* Multiplication modulo m. Inputs i and j less than 2**32 */ +/* Ensure intermediate results do not exceed 2**53 */ + +QUALIFIERS double curand_MRGmodMul(double i, double j, double m) +{ + double tempHi; + double tempLo; + + tempHi = floor(i/131072.0); + tempLo = i - (tempHi*131072.0); + tempLo = curand_MRGmod( curand_MRGmod( (tempHi * j), m) * 131072.0 + curand_MRGmod(tempLo * j, m),m); + + if (tempLo < 0.0) tempLo += m; + return tempLo; +} + +/* multiply 3 by 3 matrices of doubles, modulo m */ + +QUALIFIERS void curand_MRGmatMul3x3(unsigned int i1[][3],unsigned int i2[][3],unsigned int o[][3],double m) +{ + int i,j; + double temp[3][3]; + for (i=0; i<3; i++){ + for (j=0; j<3; j++){ + temp[i][j] = ( curand_MRGmodMul(i1[i][0], i2[0][j], m) + + curand_MRGmodMul(i1[i][1], i2[1][j], m) + + curand_MRGmodMul(i1[i][2], i2[2][j], m)); + temp[i][j] = curand_MRGmod( temp[i][j], m ); + } + } + for (i=0; i<3; i++){ + for (j=0; j<3; j++){ + o[i][j] = (unsigned int)temp[i][j]; + } + } +} + +/* multiply 3 by 3 matrix times 3 by 1 vector of doubles, modulo m */ + +QUALIFIERS void curand_MRGmatVecMul3x3( unsigned int i[][3], unsigned int v[], double m) +{ + int k; + double t[3]; + for (k = 0; k < 3; k++) { + t[k] = ( curand_MRGmodMul(i[k][0], v[0], m) + + curand_MRGmodMul(i[k][1], v[1], m) + + curand_MRGmodMul(i[k][2], v[2], m) ); + t[k] = curand_MRGmod( t[k], m ); + } + for (k = 0; k < 3; k++) { + v[k] = (unsigned int)t[k]; + } + +} + +/* raise a 3 by 3 matrix of doubles to a 64 bit integer power pow, modulo m */ +/* input is index zero of an array of 3 by 3 matrices m, */ +/* each m = m[0]**(2**index) */ + +QUALIFIERS void curand_MRGmatPow3x3( unsigned int in[][3][3], unsigned int o[][3], double m, unsigned long long pow ) +{ + int i,j; + for ( i = 0; i < 3; i++ ) { + for ( j = 0; j < 3; j++ ) { + o[i][j] = 0; + if ( i == j ) o[i][j] = 1; + } + } + i = 0; + curand_MRGmatVecMul3x3(o,o[0],m); + while (pow) { + if ( pow & 1ll ) { + curand_MRGmatMul3x3(in[i], o, o, m); + } + i++; + pow >>= 1; + } +} + +/* raise a 3 by 3 matrix of doubles to the power */ +/* 2 to the power (pow modulo 191), modulo m */ + +QUALIFIERS void curnand_MRGmatPow2Pow3x3( double in[][3], double o[][3], double m, unsigned long pow ) +{ + unsigned int temp[3][3]; + int i,j; + pow = pow % 191; + for ( i = 0; i < 3; i++ ) { + for ( j = 0; j < 3; j++ ) { + temp[i][j] = (unsigned int)in[i][j]; + } + } + while (pow) { + curand_MRGmatMul3x3(temp, temp, temp, m); + pow--; + } + for ( i = 0; i < 3; i++ ) { + for ( j = 0; j < 3; j++ ) { + o[i][j] = temp[i][j]; + } + } +} + +/** \endcond */ + +/****************************************************************************/ +/* Kernel implementations of RNGs */ +/****************************************************************************/ + +/* Test RNG */ + +QUALIFIERS void curand_init(unsigned long long seed, + unsigned long long subsequence, + unsigned long long offset, + curandStateTest_t *state) +{ + state->v = (unsigned int)(seed * 3) + (unsigned int)(subsequence * 31337) + \ + (unsigned int)offset; +} + + +QUALIFIERS unsigned int curand(curandStateTest_t *state) +{ + unsigned int r = state->v++; + return r; +} + +QUALIFIERS void skipahead(unsigned long long n, curandStateTest_t *state) +{ + state->v += (unsigned int)n; +} + +/* XORWOW RNG */ + +template +QUALIFIERS void __curand_generate_skipahead_matrix_xor(unsigned int matrix[]) +{ + T state; + // Generate matrix that advances one step + // matrix has n * n * 32 32-bit elements + // solve for matrix by stepping single bit states + for(int i = 0; i < 32 * n; i++) { + state.d = 0; + for(int j = 0; j < n; j++) { + state.v[j] = 0; + } + state.v[i / 32] = (1 << (i & 31)); + curand(&state); + for(int j = 0; j < n; j++) { + matrix[i * n + j] = state.v[j]; + } + } +} + +template +QUALIFIERS void _skipahead_scratch(unsigned long long x, T *state, unsigned int *scratch) +{ + // unsigned int matrix[n * n * 32]; + unsigned int *matrix = scratch; + // unsigned int matrixA[n * n * 32]; + unsigned int *matrixA = scratch + (n * n * 32); + // unsigned int vector[n]; + unsigned int *vector = scratch + (n * n * 32) + (n * n * 32); + // unsigned int result[n]; + unsigned int *result = scratch + (n * n * 32) + (n * n * 32) + n; + unsigned long long p = x; + for(int i = 0; i < n; i++) { + vector[i] = state->v[i]; + } + int matrix_num = 0; + while(p && (matrix_num < PRECALC_NUM_MATRICES - 1)) { + for(unsigned int t = 0; t < (p & PRECALC_BLOCK_MASK); t++) { +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + __curand_matvec(vector, precalc_xorwow_offset_matrix[matrix_num], result, n); +, + __curand_matvec(vector, precalc_xorwow_offset_matrix_host[matrix_num], result, n); +) + __curand_veccopy(vector, result, n); + } + p >>= PRECALC_BLOCK_SIZE; + matrix_num++; + } + if(p) { +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + __curand_matcopy(matrix, precalc_xorwow_offset_matrix[PRECALC_NUM_MATRICES - 1], n); + __curand_matcopy(matrixA, precalc_xorwow_offset_matrix[PRECALC_NUM_MATRICES - 1], n); +, + __curand_matcopy(matrix, precalc_xorwow_offset_matrix_host[PRECALC_NUM_MATRICES - 1], n); + __curand_matcopy(matrixA, precalc_xorwow_offset_matrix_host[PRECALC_NUM_MATRICES - 1], n); +) + } + while(p) { + for(unsigned int t = 0; t < (p & SKIPAHEAD_MASK); t++) { + __curand_matvec(vector, matrixA, result, n); + __curand_veccopy(vector, result, n); + } + p >>= SKIPAHEAD_BLOCKSIZE; + if(p) { + for(int i = 0; i < SKIPAHEAD_BLOCKSIZE; i++) { + __curand_matmat(matrix, matrixA, n); + __curand_matcopy(matrixA, matrix, n); + } + } + } + for(int i = 0; i < n; i++) { + state->v[i] = vector[i]; + } + state->d += 362437 * (unsigned int)x; +} + +template +QUALIFIERS void _skipahead_sequence_scratch(unsigned long long x, T *state, unsigned int *scratch) +{ + // unsigned int matrix[n * n * 32]; + unsigned int *matrix = scratch; + // unsigned int matrixA[n * n * 32]; + unsigned int *matrixA = scratch + (n * n * 32); + // unsigned int vector[n]; + unsigned int *vector = scratch + (n * n * 32) + (n * n * 32); + // unsigned int result[n]; + unsigned int *result = scratch + (n * n * 32) + (n * n * 32) + n; + unsigned long long p = x; + for(int i = 0; i < n; i++) { + vector[i] = state->v[i]; + } + int matrix_num = 0; + while(p && matrix_num < PRECALC_NUM_MATRICES - 1) { + for(unsigned int t = 0; t < (p & PRECALC_BLOCK_MASK); t++) { +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + __curand_matvec(vector, precalc_xorwow_matrix[matrix_num], result, n); +, + __curand_matvec(vector, precalc_xorwow_matrix_host[matrix_num], result, n); +) + __curand_veccopy(vector, result, n); + } + p >>= PRECALC_BLOCK_SIZE; + matrix_num++; + } + if(p) { +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + __curand_matcopy(matrix, precalc_xorwow_matrix[PRECALC_NUM_MATRICES - 1], n); + __curand_matcopy(matrixA, precalc_xorwow_matrix[PRECALC_NUM_MATRICES - 1], n); +, + __curand_matcopy(matrix, precalc_xorwow_matrix_host[PRECALC_NUM_MATRICES - 1], n); + __curand_matcopy(matrixA, precalc_xorwow_matrix_host[PRECALC_NUM_MATRICES - 1], n); +) + } + while(p) { + for(unsigned int t = 0; t < (p & SKIPAHEAD_MASK); t++) { + __curand_matvec(vector, matrixA, result, n); + __curand_veccopy(vector, result, n); + } + p >>= SKIPAHEAD_BLOCKSIZE; + if(p) { + for(int i = 0; i < SKIPAHEAD_BLOCKSIZE; i++) { + __curand_matmat(matrix, matrixA, n); + __curand_matcopy(matrixA, matrix, n); + } + } + } + for(int i = 0; i < n; i++) { + state->v[i] = vector[i]; + } + /* No update of state->d needed, guaranteed to be a multiple of 2^32 */ +} + +template +QUALIFIERS void _skipahead_inplace(const unsigned long long x, T *state) +{ + unsigned long long p = x; + int matrix_num = 0; + while(p) { + for(unsigned int t = 0; t < (p & PRECALC_BLOCK_MASK); t++) { +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + __curand_matvec_inplace(state->v, precalc_xorwow_offset_matrix[matrix_num]); +, + __curand_matvec_inplace(state->v, precalc_xorwow_offset_matrix_host[matrix_num]); +) + } + p >>= PRECALC_BLOCK_SIZE; + matrix_num++; + } + state->d += 362437 * (unsigned int)x; +} + +template +QUALIFIERS void _skipahead_sequence_inplace(unsigned long long x, T *state) +{ + int matrix_num = 0; + while(x) { + for(unsigned int t = 0; t < (x & PRECALC_BLOCK_MASK); t++) { +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + __curand_matvec_inplace(state->v, precalc_xorwow_matrix[matrix_num]); +, + __curand_matvec_inplace(state->v, precalc_xorwow_matrix_host[matrix_num]); +) + } + x >>= PRECALC_BLOCK_SIZE; + matrix_num++; + } + /* No update of state->d needed, guaranteed to be a multiple of 2^32 */ +} + +/** + * \brief Update XORWOW state to skip \p n elements. + * + * Update the XORWOW state in \p state to skip ahead \p n elements. + * + * All values of \p n are valid. Large values require more computation and so + * will take more time to complete. + * + * \param n - Number of elements to skip + * \param state - Pointer to state to update + */ +QUALIFIERS void skipahead(unsigned long long n, curandStateXORWOW_t *state) +{ + _skipahead_inplace(n, state); +} + +/** + * \brief Update XORWOW state to skip ahead \p n subsequences. + * + * Update the XORWOW state in \p state to skip ahead \p n subsequences. Each + * subsequence is \xmlonly267\endxmlonly elements long, so this means the function will skip ahead + * \xmlonly267\endxmlonly * n elements. + * + * All values of \p n are valid. Large values require more computation and so + * will take more time to complete. + * + * \param n - Number of subsequences to skip + * \param state - Pointer to state to update + */ +QUALIFIERS void skipahead_sequence(unsigned long long n, curandStateXORWOW_t *state) +{ + _skipahead_sequence_inplace(n, state); +} + +QUALIFIERS void _curand_init_scratch(unsigned long long seed, + unsigned long long subsequence, + unsigned long long offset, + curandStateXORWOW_t *state, + unsigned int *scratch) +{ + // Break up seed, apply salt + // Constants are arbitrary nonzero values + unsigned int s0 = ((unsigned int)seed) ^ 0xaad26b49UL; + unsigned int s1 = (unsigned int)(seed >> 32) ^ 0xf7dcefddUL; + // Simple multiplication to mix up bits + // Constants are arbitrary odd values + unsigned int t0 = 1099087573UL * s0; + unsigned int t1 = 2591861531UL * s1; + state->d = 6615241 + t1 + t0; + state->v[0] = 123456789UL + t0; + state->v[1] = 362436069UL ^ t0; + state->v[2] = 521288629UL + t1; + state->v[3] = 88675123UL ^ t1; + state->v[4] = 5783321UL + t0; + _skipahead_sequence_scratch(subsequence, state, scratch); + _skipahead_scratch(offset, state, scratch); + state->boxmuller_flag = 0; + state->boxmuller_flag_double = 0; + state->boxmuller_extra = 0.f; + state->boxmuller_extra_double = 0.; +} + +QUALIFIERS void _curand_init_inplace(unsigned long long seed, + unsigned long long subsequence, + unsigned long long offset, + curandStateXORWOW_t *state) +{ + // Break up seed, apply salt + // Constants are arbitrary nonzero values + unsigned int s0 = ((unsigned int)seed) ^ 0xaad26b49UL; + unsigned int s1 = (unsigned int)(seed >> 32) ^ 0xf7dcefddUL; + // Simple multiplication to mix up bits + // Constants are arbitrary odd values + unsigned int t0 = 1099087573UL * s0; + unsigned int t1 = 2591861531UL * s1; + state->d = 6615241 + t1 + t0; + state->v[0] = 123456789UL + t0; + state->v[1] = 362436069UL ^ t0; + state->v[2] = 521288629UL + t1; + state->v[3] = 88675123UL ^ t1; + state->v[4] = 5783321UL + t0; + _skipahead_sequence_inplace(subsequence, state); + _skipahead_inplace(offset, state); + state->boxmuller_flag = 0; + state->boxmuller_flag_double = 0; + state->boxmuller_extra = 0.f; + state->boxmuller_extra_double = 0.; +} + +/** + * \brief Initialize XORWOW state. + * + * Initialize XORWOW state in \p state with the given \p seed, \p subsequence, + * and \p offset. + * + * All input values of \p seed, \p subsequence, and \p offset are legal. Large + * values for \p subsequence and \p offset require more computation and so will + * take more time to complete. + * + * A value of 0 for \p seed sets the state to the values of the original + * published version of the \p xorwow algorithm. + * + * \param seed - Arbitrary bits to use as a seed + * \param subsequence - Subsequence to start at + * \param offset - Absolute offset into sequence + * \param state - Pointer to state to initialize + */ +QUALIFIERS void curand_init(unsigned long long seed, + unsigned long long subsequence, + unsigned long long offset, + curandStateXORWOW_t *state) +{ + _curand_init_inplace(seed, subsequence, offset, state); +} + +/** + * \brief Return 32-bits of pseudorandomness from an XORWOW generator. + * + * Return 32-bits of pseudorandomness from the XORWOW generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * + * \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use. + */ +QUALIFIERS unsigned int curand(curandStateXORWOW_t *state) +{ + unsigned int t; + t = (state->v[0] ^ (state->v[0] >> 2)); + state->v[0] = state->v[1]; + state->v[1] = state->v[2]; + state->v[2] = state->v[3]; + state->v[3] = state->v[4]; + state->v[4] = (state->v[4] ^ (state->v[4] <<4)) ^ (t ^ (t << 1)); + state->d += 362437; + return state->v[4] + state->d; +} + + +/** + * \brief Return 32-bits of pseudorandomness from an Philox4_32_10 generator. + * + * Return 32-bits of pseudorandomness from the Philox4_32_10 generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * + * \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use. + */ + +QUALIFIERS unsigned int curand(curandStatePhilox4_32_10_t *state) +{ + // Maintain the invariant: output[STATE] is always "good" and + // is the next value to be returned by curand. + unsigned int ret; + switch(state->STATE++){ + default: + ret = state->output.x; + break; + case 1: + ret = state->output.y; + break; + case 2: + ret = state->output.z; + break; + case 3: + ret = state->output.w; + break; + } + if(state->STATE == 4){ + Philox_State_Incr(state); + state->output = curand_Philox4x32_10(state->ctr,state->key); + state->STATE = 0; + } + return ret; +} + +/** + * \brief Return tuple of 4 32-bit pseudorandoms from a Philox4_32_10 generator. + * + * Return 128 bits of pseudorandomness from the Philox4_32_10 generator in \p state, + * increment position of generator by four. + * + * \param state - Pointer to state to update + * + * \return 128-bits of pseudorandomness as a uint4, all bits valid to use. + */ + +QUALIFIERS uint4 curand4(curandStatePhilox4_32_10_t *state) +{ + uint4 r; + + uint4 tmp = state->output; + Philox_State_Incr(state); + state->output= curand_Philox4x32_10(state->ctr,state->key); + switch(state->STATE){ + case 0: + return tmp; + case 1: + r.x = tmp.y; + r.y = tmp.z; + r.z = tmp.w; + r.w = state->output.x; + break; + case 2: + r.x = tmp.z; + r.y = tmp.w; + r.z = state->output.x; + r.w = state->output.y; + break; + case 3: + r.x = tmp.w; + r.y = state->output.x; + r.z = state->output.y; + r.w = state->output.z; + break; + default: + // NOT possible but needed to avoid compiler warnings + return tmp; + } + return r; +} + +/** + * \brief Update Philox4_32_10 state to skip \p n elements. + * + * Update the Philox4_32_10 state in \p state to skip ahead \p n elements. + * + * All values of \p n are valid. + * + * \param n - Number of elements to skip + * \param state - Pointer to state to update + */ +QUALIFIERS void skipahead(unsigned long long n, curandStatePhilox4_32_10_t *state) +{ + state->STATE += (n & 3); + n /= 4; + if( state->STATE > 3 ){ + n += 1; + state->STATE -= 4; + } + Philox_State_Incr(state, n); + state->output = curand_Philox4x32_10(state->ctr,state->key); +} + +/** + * \brief Update Philox4_32_10 state to skip ahead \p n subsequences. + * + * Update the Philox4_32_10 state in \p state to skip ahead \p n subsequences. Each + * subsequence is \xmlonly266\endxmlonly elements long, so this means the function will skip ahead + * \xmlonly266\endxmlonly * n elements. + * + * All values of \p n are valid. + * + * \param n - Number of subsequences to skip + * \param state - Pointer to state to update + */ +QUALIFIERS void skipahead_sequence(unsigned long long n, curandStatePhilox4_32_10_t *state) +{ + Philox_State_Incr_hi(state, n); + state->output = curand_Philox4x32_10(state->ctr,state->key); +} + +/** + * \brief Initialize Philox4_32_10 state. + * + * Initialize Philox4_32_10 state in \p state with the given \p seed, p\ subsequence, + * and \p offset. + * + * All input values for \p seed, \p subseqence and \p offset are legal. Each of the + * \xmlonly264\endxmlonly possible + * values of seed selects an independent sequence of length + * \xmlonly2130\endxmlonly. + * The first + * \xmlonly266 * subsequence + offset\endxmlonly. + * values of the sequence are skipped. + * I.e., subsequences are of length + * \xmlonly266\endxmlonly. + * + * \param seed - Arbitrary bits to use as a seed + * \param subsequence - Subsequence to start at + * \param offset - Absolute offset into subsequence + * \param state - Pointer to state to initialize + */ +QUALIFIERS void curand_init(unsigned long long seed, + unsigned long long subsequence, + unsigned long long offset, + curandStatePhilox4_32_10_t *state) +{ + state->ctr = make_uint4(0, 0, 0, 0); + state->key.x = (unsigned int)seed; + state->key.y = (unsigned int)(seed>>32); + state->STATE = 0; + state->boxmuller_flag = 0; + state->boxmuller_flag_double = 0; + state->boxmuller_extra = 0.f; + state->boxmuller_extra_double = 0.; + skipahead_sequence(subsequence, state); + skipahead(offset, state); +} + + +/* MRG32k3a RNG */ + +/* Base generator for MRG32k3a */ +QUALIFIERS unsigned long long __curand_umad(GCC_UNUSED_PARAMETER unsigned int a, GCC_UNUSED_PARAMETER unsigned int b, GCC_UNUSED_PARAMETER unsigned long long c) +{ + unsigned long long r = 0; +NV_IF_TARGET(NV_PROVIDES_SM_61, + asm("mad.wide.u32 %0, %1, %2, %3;" + : "=l"(r) : "r"(a), "r"(b), "l"(c)); +) + return r; +} +QUALIFIERS unsigned long long __curand_umul(GCC_UNUSED_PARAMETER unsigned int a, GCC_UNUSED_PARAMETER unsigned int b) +{ + unsigned long long r = 0; +NV_IF_TARGET(NV_PROVIDES_SM_61, + asm("mul.wide.u32 %0, %1, %2;" + : "=l"(r) : "r"(a), "r"(b)); +) + return r; +} +QUALIFIERS double curand_MRG32k3a (curandStateMRG32k3a_t *state) +{ +NV_IF_TARGET(NV_PROVIDES_SM_61, + const unsigned int m1 = 4294967087u; + const unsigned int m2 = 4294944443u; + const unsigned int m1c = 209u; + const unsigned int m2c = 22853u; + const unsigned int a12 = 1403580u; + const unsigned int a13n = 810728u; + const unsigned int a21 = 527612u; + const unsigned int a23n = 1370589u; + + unsigned long long p1; + unsigned long long p2; + const unsigned long long p3 = __curand_umul(a13n, m1 - state->s1[0]); + p1 = __curand_umad(a12, state->s1[1], p3); + + // Putting addition inside and changing umul to umad + // slowed this function down on GV100 + p1 = __curand_umul(p1 >> 32, m1c) + (p1 & 0xffffffff); + if (p1 >= m1) p1 -= m1; + + state->s1[0] = state->s1[1]; state->s1[1] = state->s1[2]; state->s1[2] = p1; + const unsigned long long p4 = __curand_umul(a23n, m2 - state->s2[0]); + p2 = __curand_umad(a21, state->s2[2], p4); + + // Putting addition inside and changing umul to umad + // slowed this function down on GV100 + p2 = __curand_umul(p2 >> 32, m2c) + (p2 & 0xffffffff); + p2 = __curand_umul(p2 >> 32, m2c) + (p2 & 0xffffffff); + if (p2 >= m2) p2 -= m2; + + state->s2[0] = state->s2[1]; state->s2[1] = state->s2[2]; state->s2[2] = p2; + + const unsigned int p5 = (unsigned int)p1 - (unsigned int)p2; + if(p1 <= p2) return p5 + m1; + return p5; +) +NV_IF_TARGET(NV_IS_DEVICE, +/* nj's implementation */ + const double m1 = 4294967087.; + const double m2 = 4294944443.; + const double a12 = 1403580.; + const double a13n = 810728.; + const double a21 = 527612.; + const double a23n = 1370589.; + + const double rh1 = 2.3283065498378290e-010; /* (1.0 / m1)__hi */ + const double rl1 = -1.7354913086174288e-026; /* (1.0 / m1)__lo */ + const double rh2 = 2.3283188252407387e-010; /* (1.0 / m2)__hi */ + const double rl2 = 2.4081018096503646e-026; /* (1.0 / m2)__lo */ + + double q; + double p1; + double p2; + p1 = a12 * state->s1[1] - a13n * state->s1[0]; + q = trunc (fma (p1, rh1, p1 * rl1)); + p1 -= q * m1; + if (p1 < 0.0) p1 += m1; + state->s1[0] = state->s1[1]; state->s1[1] = state->s1[2]; state->s1[2] = (unsigned int)p1; + p2 = a21 * state->s2[2] - a23n * state->s2[0]; + q = trunc (fma (p2, rh2, p2 * rl2)); + p2 -= q * m2; + if (p2 < 0.0) p2 += m2; + state->s2[0] = state->s2[1]; state->s2[1] = state->s2[2]; state->s2[2] = (unsigned int)p2; + if (p1 <= p2) return (p1 - p2 + m1); + else return (p1 - p2); +) +/* end nj's implementation */ + double p1; + double p2; + double r; + p1 = (MRG32K3A_A12 * state->s1[1]) - (MRG32K3A_A13N * state->s1[0]); + p1 = curand_MRGmod(p1, MRG32K3A_MOD1); + if (p1 < 0.0) p1 += MRG32K3A_MOD1; + state->s1[0] = state->s1[1]; + state->s1[1] = state->s1[2]; + state->s1[2] = (unsigned int)p1; + p2 = (MRG32K3A_A21 * state->s2[2]) - (MRG32K3A_A23N * state->s2[0]); + p2 = curand_MRGmod(p2, MRG32K3A_MOD2); + if (p2 < 0) p2 += MRG32K3A_MOD2; + state->s2[0] = state->s2[1]; + state->s2[1] = state->s2[2]; + state->s2[2] = (unsigned int)p2; + r = p1 - p2; + if (r <= 0) r += MRG32K3A_MOD1; + return r; +} + + +/** + * \brief Return 32-bits of pseudorandomness from an MRG32k3a generator. + * + * Return 32-bits of pseudorandomness from the MRG32k3a generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * + * \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use. + */ +QUALIFIERS unsigned int curand(curandStateMRG32k3a_t *state) +{ + double dRet; + dRet = (double)curand_MRG32k3a(state)*(double)MRG32K3A_BITS_NORM; + return (unsigned int)dRet; +} + + + +/** + * \brief Update MRG32k3a state to skip \p n elements. + * + * Update the MRG32k3a state in \p state to skip ahead \p n elements. + * + * All values of \p n are valid. Large values require more computation and so + * will take more time to complete. + * + * \param n - Number of elements to skip + * \param state - Pointer to state to update + */ +QUALIFIERS void skipahead(unsigned long long n, curandStateMRG32k3a_t *state) +{ + unsigned int t[3][3]; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + curand_MRGmatPow3x3( mrg32k3aM1, t, MRG32K3A_MOD1, n); + curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1); + curand_MRGmatPow3x3(mrg32k3aM2, t, MRG32K3A_MOD2, n); + curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2); +, + curand_MRGmatPow3x3( mrg32k3aM1Host, t, MRG32K3A_MOD1, n); + curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1); + curand_MRGmatPow3x3(mrg32k3aM2Host, t, MRG32K3A_MOD2, n); + curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2); +) +} + +/** + * \brief Update MRG32k3a state to skip ahead \p n subsequences. + * + * Update the MRG32k3a state in \p state to skip ahead \p n subsequences. Each + * subsequence is \xmlonly2127\endxmlonly + * + * \xmlonly276\endxmlonly elements long, so this means the function will skip ahead + * \xmlonly267\endxmlonly * n elements. + * + * Valid values of \p n are 0 to \xmlonly251\endxmlonly. Note \p n will be masked to 51 bits + * + * \param n - Number of subsequences to skip + * \param state - Pointer to state to update + */ +QUALIFIERS void skipahead_subsequence(unsigned long long n, curandStateMRG32k3a_t *state) +{ + unsigned int t[3][3]; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + curand_MRGmatPow3x3( mrg32k3aM1SubSeq, t, MRG32K3A_MOD1, n); + curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1); + curand_MRGmatPow3x3( mrg32k3aM2SubSeq, t, MRG32K3A_MOD2, n); + curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2); +, + curand_MRGmatPow3x3( mrg32k3aM1SubSeqHost, t, MRG32K3A_MOD1, n); + curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1); + curand_MRGmatPow3x3( mrg32k3aM2SubSeqHost, t, MRG32K3A_MOD2, n); + curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2); +) +} + +/** + * \brief Update MRG32k3a state to skip ahead \p n sequences. + * + * Update the MRG32k3a state in \p state to skip ahead \p n sequences. Each + * sequence is \xmlonly2127\endxmlonly elements long, so this means the function will skip ahead + * \xmlonly2127\endxmlonly * n elements. + * + * All values of \p n are valid. Large values require more computation and so + * will take more time to complete. + * + * \param n - Number of sequences to skip + * \param state - Pointer to state to update + */ +QUALIFIERS void skipahead_sequence(unsigned long long n, curandStateMRG32k3a_t *state) +{ + unsigned int t[3][3]; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + curand_MRGmatPow3x3( mrg32k3aM1Seq, t, MRG32K3A_MOD1, n); + curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1); + curand_MRGmatPow3x3( mrg32k3aM2Seq, t, MRG32K3A_MOD2, n); + curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2); +, + curand_MRGmatPow3x3( mrg32k3aM1SeqHost, t, MRG32K3A_MOD1, n); + curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1); + curand_MRGmatPow3x3( mrg32k3aM2SeqHost, t, MRG32K3A_MOD2, n); + curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2); +) +} + + +/** + * \brief Initialize MRG32k3a state. + * + * Initialize MRG32k3a state in \p state with the given \p seed, \p subsequence, + * and \p offset. + * + * All input values of \p seed, \p subsequence, and \p offset are legal. + * \p subsequence will be truncated to 51 bits to avoid running into the next sequence + * + * A value of 0 for \p seed sets the state to the values of the original + * published version of the \p MRG32k3a algorithm. + * + * \param seed - Arbitrary bits to use as a seed + * \param subsequence - Subsequence to start at + * \param offset - Absolute offset into sequence + * \param state - Pointer to state to initialize + */ +QUALIFIERS void curand_init(unsigned long long seed, + unsigned long long subsequence, + unsigned long long offset, + curandStateMRG32k3a_t *state) +{ + int i; + for ( i=0; i<3; i++ ) { + state->s1[i] = 12345u; + state->s2[i] = 12345u; + } + if (seed != 0ull) { + unsigned int x1 = ((unsigned int)seed) ^ 0x55555555UL; + unsigned int x2 = (unsigned int)((seed >> 32) ^ 0xAAAAAAAAUL); + state->s1[0] = (unsigned int)curand_MRGmodMul(x1, state->s1[0], MRG32K3A_MOD1); + state->s1[1] = (unsigned int)curand_MRGmodMul(x2, state->s1[1], MRG32K3A_MOD1); + state->s1[2] = (unsigned int)curand_MRGmodMul(x1, state->s1[2], MRG32K3A_MOD1); + state->s2[0] = (unsigned int)curand_MRGmodMul(x2, state->s2[0], MRG32K3A_MOD2); + state->s2[1] = (unsigned int)curand_MRGmodMul(x1, state->s2[1], MRG32K3A_MOD2); + state->s2[2] = (unsigned int)curand_MRGmodMul(x2, state->s2[2], MRG32K3A_MOD2); + } + skipahead_subsequence( subsequence, state ); + skipahead( offset, state ); + state->boxmuller_flag = 0; + state->boxmuller_flag_double = 0; + state->boxmuller_extra = 0.f; + state->boxmuller_extra_double = 0.; +} + +/** + * \brief Update Sobol32 state to skip \p n elements. + * + * Update the Sobol32 state in \p state to skip ahead \p n elements. + * + * All values of \p n are valid. + * + * \param n - Number of elements to skip + * \param state - Pointer to state to update + */ +template +QUALIFIERS +typename CURAND_STD::enable_if::value || CURAND_STD::is_same::value>::type +skipahead(unsigned int n, T state) +{ + unsigned int i_gray; + state->x = state->c; + state->i += n; + /* Convert state->i to gray code */ + i_gray = state->i ^ (state->i >> 1); + for(unsigned int k = 0; k < 32; k++) { + if(i_gray & (1 << k)) { + state->x ^= state->direction_vectors[k]; + } + } + return; +} + +/** + * \brief Update Sobol64 state to skip \p n elements. + * + * Update the Sobol64 state in \p state to skip ahead \p n elements. + * + * All values of \p n are valid. + * + * \param n - Number of elements to skip + * \param state - Pointer to state to update + */ +template +QUALIFIERS +typename CURAND_STD::enable_if::value || CURAND_STD::is_same::value>::type +skipahead(unsigned long long n, T state) +{ + unsigned long long i_gray; + state->x = state->c; + state->i += n; + /* Convert state->i to gray code */ + i_gray = state->i ^ (state->i >> 1); + for(unsigned k = 0; k < 64; k++) { + if(i_gray & (1ULL << k)) { + state->x ^= state->direction_vectors[k]; + } + } + return; +} + +/** + * \brief Initialize Sobol32 state. + * + * Initialize Sobol32 state in \p state with the given \p direction \p vectors and + * \p offset. + * + * The direction vector is a device pointer to an array of 32 unsigned ints. + * All input values of \p offset are legal. + * + * \param direction_vectors - Pointer to array of 32 unsigned ints representing the + * direction vectors for the desired dimension + * \param offset - Absolute offset into sequence + * \param state - Pointer to state to initialize + */ +QUALIFIERS void curand_init(curandDirectionVectors32_t direction_vectors, + unsigned int offset, + curandStateSobol32_t *state) +{ + state->i = 0; + state->c = 0; + for(int i = 0; i < 32; i++) { + state->direction_vectors[i] = direction_vectors[i]; + } + state->x = 0; + skipahead(offset, state); +} +/** + * \brief Initialize Scrambled Sobol32 state. + * + * Initialize Sobol32 state in \p state with the given \p direction \p vectors and + * \p offset. + * + * The direction vector is a device pointer to an array of 32 unsigned ints. + * All input values of \p offset are legal. + * + * \param direction_vectors - Pointer to array of 32 unsigned ints representing the + direction vectors for the desired dimension + * \param scramble_c Scramble constant + * \param offset - Absolute offset into sequence + * \param state - Pointer to state to initialize + */ +QUALIFIERS void curand_init(curandDirectionVectors32_t direction_vectors, + unsigned int scramble_c, + unsigned int offset, + curandStateScrambledSobol32_t *state) +{ + state->i = 0; + state->c = scramble_c; + for(int i = 0; i < 32; i++) { + state->direction_vectors[i] = direction_vectors[i]; + } + state->x = state->c; + skipahead(offset, state); +} + +QUALIFIERS int __curand_find_trailing_zero(unsigned int x) +{ +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + int y = __ffs(~x); + if(y) + return y - 1; + return 31; +, + int i = 1; + while(x & 1) { + i++; + x >>= 1; + } + i = i - 1; + return i == 32 ? 31 : i; +) +} + +QUALIFIERS int __curand_find_trailing_zero(unsigned long long x) +{ +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + int y = __ffsll(~x); + if(y) + return y - 1; + return 63; +, + int i = 1; + while(x & 1) { + i++; + x >>= 1; + } + i = i - 1; + return i == 64 ? 63 : i; +) +} + +/** + * \brief Initialize Sobol64 state. + * + * Initialize Sobol64 state in \p state with the given \p direction \p vectors and + * \p offset. + * + * The direction vector is a device pointer to an array of 64 unsigned long longs. + * All input values of \p offset are legal. + * + * \param direction_vectors - Pointer to array of 64 unsigned long longs representing the + direction vectors for the desired dimension + * \param offset - Absolute offset into sequence + * \param state - Pointer to state to initialize + */ +QUALIFIERS void curand_init(curandDirectionVectors64_t direction_vectors, + unsigned long long offset, + curandStateSobol64_t *state) +{ + state->i = 0; + state->c = 0; + for(int i = 0; i < 64; i++) { + state->direction_vectors[i] = direction_vectors[i]; + } + state->x = 0; + skipahead(offset, state); +} + +/** + * \brief Initialize Scrambled Sobol64 state. + * + * Initialize Sobol64 state in \p state with the given \p direction \p vectors and + * \p offset. + * + * The direction vector is a device pointer to an array of 64 unsigned long longs. + * All input values of \p offset are legal. + * + * \param direction_vectors - Pointer to array of 64 unsigned long longs representing the + direction vectors for the desired dimension + * \param scramble_c Scramble constant + * \param offset - Absolute offset into sequence + * \param state - Pointer to state to initialize + */ +QUALIFIERS void curand_init(curandDirectionVectors64_t direction_vectors, + unsigned long long scramble_c, + unsigned long long offset, + curandStateScrambledSobol64_t *state) +{ + state->i = 0; + state->c = scramble_c; + for(int i = 0; i < 64; i++) { + state->direction_vectors[i] = direction_vectors[i]; + } + state->x = state->c; + skipahead(offset, state); +} + +/** + * \brief Return 32-bits of quasirandomness from a Sobol32 generator. + * + * Return 32-bits of quasirandomness from the Sobol32 generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * + * \return 32-bits of quasirandomness as an unsigned int, all bits valid to use. + */ + +QUALIFIERS unsigned int curand(curandStateSobol32_t * state) +{ + /* Moving from i to i+1 element in gray code is flipping one bit, + the trailing zero bit of i + */ + unsigned int res = state->x; + state->x ^= state->direction_vectors[__curand_find_trailing_zero(state->i)]; + state->i ++; + return res; +} + +/** + * \brief Return 32-bits of quasirandomness from a scrambled Sobol32 generator. + * + * Return 32-bits of quasirandomness from the scrambled Sobol32 generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * + * \return 32-bits of quasirandomness as an unsigned int, all bits valid to use. + */ + +QUALIFIERS unsigned int curand(curandStateScrambledSobol32_t * state) +{ + /* Moving from i to i+1 element in gray code is flipping one bit, + the trailing zero bit of i + */ + unsigned int res = state->x; + state->x ^= state->direction_vectors[__curand_find_trailing_zero(state->i)]; + state->i ++; + return res; +} + +/** + * \brief Return 64-bits of quasirandomness from a Sobol64 generator. + * + * Return 64-bits of quasirandomness from the Sobol64 generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * + * \return 64-bits of quasirandomness as an unsigned long long, all bits valid to use. + */ + +QUALIFIERS unsigned long long curand(curandStateSobol64_t * state) +{ + /* Moving from i to i+1 element in gray code is flipping one bit, + the trailing zero bit of i + */ + unsigned long long res = state->x; + state->x ^= state->direction_vectors[__curand_find_trailing_zero(state->i)]; + state->i ++; + return res; +} + +/** + * \brief Return 64-bits of quasirandomness from a scrambled Sobol64 generator. + * + * Return 64-bits of quasirandomness from the scrambled Sobol32 generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * + * \return 64-bits of quasirandomness as an unsigned long long, all bits valid to use. + */ + +QUALIFIERS unsigned long long curand(curandStateScrambledSobol64_t * state) +{ + /* Moving from i to i+1 element in gray code is flipping one bit, + the trailing zero bit of i + */ + unsigned long long res = state->x; + state->x ^= state->direction_vectors[__curand_find_trailing_zero(state->i)]; + state->i ++; + return res; +} + +#include "curand_uniform.h" +#include "curand_normal.h" +#include "curand_lognormal.h" +#include "curand_poisson.h" +#include "curand_discrete2.h" + +__device__ static inline unsigned int *__get_precalculated_matrix(int n) +{ + if(n == 0) { + return precalc_xorwow_matrix[n]; + } + if(n == 2) { + return precalc_xorwow_offset_matrix[n]; + } + return precalc_xorwow_matrix[n]; +} + +#ifndef __CUDACC_RTC__ +__host__ static inline unsigned int *__get_precalculated_matrix_host(int n) +{ + if(n == 1) { + return precalc_xorwow_matrix_host[n]; + } + if(n == 3) { + return precalc_xorwow_offset_matrix_host[n]; + } + return precalc_xorwow_matrix_host[n]; +} +#endif // #ifndef __CUDACC_RTC__ + +__device__ static inline unsigned int *__get_mrg32k3a_matrix(int n) +{ + if(n == 0) { + return mrg32k3aM1[n][0]; + } + if(n == 2) { + return mrg32k3aM2[n][0]; + } + if(n == 4) { + return mrg32k3aM1SubSeq[n][0]; + } + if(n == 6) { + return mrg32k3aM2SubSeq[n][0]; + } + if(n == 8) { + return mrg32k3aM1Seq[n][0]; + } + if(n == 10) { + return mrg32k3aM2Seq[n][0]; + } + return mrg32k3aM1[n][0]; +} + +#ifndef __CUDACC_RTC__ +__host__ static inline unsigned int *__get_mrg32k3a_matrix_host(int n) +{ + if(n == 1) { + return mrg32k3aM1Host[n][0]; + } + if(n == 3) { + return mrg32k3aM2Host[n][0]; + } + if(n == 5) { + return mrg32k3aM1SubSeqHost[n][0]; + } + if(n == 7) { + return mrg32k3aM2SubSeqHost[n][0]; + } + if(n == 9) { + return mrg32k3aM1SeqHost[n][0]; + } + if(n == 11) { + return mrg32k3aM2SeqHost[n][0]; + } + return mrg32k3aM1Host[n][0]; +} + +__host__ static inline double *__get__cr_lgamma_table_host(void) { + return __cr_lgamma_table; +} +#endif // #ifndef __CUDACC_RTC__ + +/** @} */ + +#endif // !defined(CURAND_KERNEL_H_) diff --git a/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_lognormal.h b/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_lognormal.h new file mode 100644 index 0000000000000000000000000000000000000000..2ebb27599e54cef8325e0d43457f09283d13cb88 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_lognormal.h @@ -0,0 +1,697 @@ + + /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + + +#if !defined(CURAND_LOGNORMAL_H_) +#define CURAND_LOGNORMAL_H_ + +/** + * \defgroup DEVICE Device API + * + * @{ + */ + +#ifndef __CUDACC_RTC__ +#include +#endif // __CUDACC_RTC__ + +#include "curand_mrg32k3a.h" +#include "curand_mtgp32_kernel.h" +#include "curand_philox4x32_x.h" + +/** + * \brief Return a log-normally distributed float from an XORWOW generator. + * + * Return a single log-normally distributed float derived from a normal + * distribution with mean \p mean and standard deviation \p stddev + * from the XORWOW generator in \p state, + * increment position of generator by one. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results, transforms them to log-normal distribution, + * then returns them one at a time. + * See ::curand_log_normal2() for a more efficient version that returns + * both results at once. + * + * \param state - Pointer to state to update + * \param mean - Mean of the related normal distribution + * \param stddev - Standard deviation of the related normal distribution + * + * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev + */ +QUALIFIERS float curand_log_normal(curandStateXORWOW_t *state, float mean, float stddev) +{ + if(state->boxmuller_flag != EXTRA_FLAG_LOG_NORMAL) { + unsigned int x, y; + x = curand(state); + y = curand(state); + float2 v = _curand_box_muller(x, y); + state->boxmuller_extra = expf(mean + (stddev * v.y)); + state->boxmuller_flag = EXTRA_FLAG_LOG_NORMAL; + return expf(mean + (stddev * v.x)); + } + state->boxmuller_flag = 0; + return state->boxmuller_extra; +} + +/** + * \brief Return a log-normally distributed float from an Philox4_32_10 generator. + * + * Return a single log-normally distributed float derived from a normal + * distribution with mean \p mean and standard deviation \p stddev + * from the Philox4_32_10 generator in \p state, + * increment position of generator by one. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results, transforms them to log-normal distribution, + * then returns them one at a time. + * See ::curand_log_normal2() for a more efficient version that returns + * both results at once. + * + * \param state - Pointer to state to update + * \param mean - Mean of the related normal distribution + * \param stddev - Standard deviation of the related normal distribution + * + * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev + */ + +QUALIFIERS float curand_log_normal(curandStatePhilox4_32_10_t *state, float mean, float stddev) +{ + if(state->boxmuller_flag != EXTRA_FLAG_LOG_NORMAL) { + unsigned int x, y; + x = curand(state); + y = curand(state); + float2 v = _curand_box_muller(x, y); + state->boxmuller_extra = expf(mean + (stddev * v.y)); + state->boxmuller_flag = EXTRA_FLAG_LOG_NORMAL; + return expf(mean + (stddev * v.x)); + } + state->boxmuller_flag = 0; + return state->boxmuller_extra; +} + +/** + * \brief Return two normally distributed floats from an XORWOW generator. + * + * Return two log-normally distributed floats derived from a normal + * distribution with mean \p mean and standard deviation \p stddev + * from the XORWOW generator in \p state, + * increment position of generator by two. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results, then transforms them to log-normal. + * + * \param state - Pointer to state to update + * \param mean - Mean of the related normal distribution + * \param stddev - Standard deviation of the related normal distribution + * + * \return Log-normally distributed float2 where each element is from a + * distribution with mean \p mean and standard deviation \p stddev + */ +QUALIFIERS float2 curand_log_normal2(curandStateXORWOW_t *state, float mean, float stddev) +{ + float2 v = curand_box_muller(state); + v.x = expf(mean + (stddev * v.x)); + v.y = expf(mean + (stddev * v.y)); + return v; +} + +/** + * \brief Return two normally distributed floats from an Philox4_32_10 generator. + * + * Return two log-normally distributed floats derived from a normal + * distribution with mean \p mean and standard deviation \p stddev + * from the Philox4_32_10 generator in \p state, + * increment position of generator by two. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results, then transforms them to log-normal. + * + * \param state - Pointer to state to update + * \param mean - Mean of the related normal distribution + * \param stddev - Standard deviation of the related normal distribution + * + * \return Log-normally distributed float2 where each element is from a + * distribution with mean \p mean and standard deviation \p stddev + */ +QUALIFIERS float2 curand_log_normal2(curandStatePhilox4_32_10_t *state, float mean, float stddev) +{ + float2 v = curand_box_muller(state); + v.x = expf(mean + (stddev * v.x)); + v.y = expf(mean + (stddev * v.y)); + return v; +} +/** + * \brief Return four normally distributed floats from an Philox4_32_10 generator. + * + * Return four log-normally distributed floats derived from a normal + * distribution with mean \p mean and standard deviation \p stddev + * from the Philox4_32_10 generator in \p state, + * increment position of generator by four. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results, then transforms them to log-normal. + * + * \param state - Pointer to state to update + * \param mean - Mean of the related normal distribution + * \param stddev - Standard deviation of the related normal distribution + * + * \return Log-normally distributed float4 where each element is from a + * distribution with mean \p mean and standard deviation \p stddev + */ +QUALIFIERS float4 curand_log_normal4(curandStatePhilox4_32_10_t *state, float mean, float stddev) +{ + float4 v = curand_box_muller4(state); + v.x = expf(mean + (stddev * v.x)); + v.y = expf(mean + (stddev * v.y)); + v.z = expf(mean + (stddev * v.z)); + v.w = expf(mean + (stddev * v.w)); + return v; +} + +/** + * \brief Return a log-normally distributed float from an MRG32k3a generator. + * + * Return a single log-normally distributed float derived from a normal + * distribution with mean \p mean and standard deviation \p stddev + * from the MRG32k3a generator in \p state, + * increment position of generator by one. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results, transforms them to log-normal distribution, + * then returns them one at a time. + * See ::curand_log_normal2() for a more efficient version that returns + * both results at once. + * + * \param state - Pointer to state to update + * \param mean - Mean of the related normal distribution + * \param stddev - Standard deviation of the related normal distribution + * + * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev + */ +QUALIFIERS float curand_log_normal(curandStateMRG32k3a_t *state, float mean, float stddev) +{ + if(state->boxmuller_flag != EXTRA_FLAG_LOG_NORMAL) { + float2 v = curand_box_muller_mrg(state); + state->boxmuller_extra = expf(mean + (stddev * v.y)); + state->boxmuller_flag = EXTRA_FLAG_LOG_NORMAL; + return expf(mean + (stddev * v.x)); + } + state->boxmuller_flag = 0; + return state->boxmuller_extra; +} + +/** + * \brief Return two normally distributed floats from an MRG32k3a generator. + * + * Return two log-normally distributed floats derived from a normal + * distribution with mean \p mean and standard deviation \p stddev + * from the MRG32k3a generator in \p state, + * increment position of generator by two. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results, then transforms them to log-normal. + * + * \param state - Pointer to state to update + * \param mean - Mean of the related normal distribution + * \param stddev - Standard deviation of the related normal distribution + * + * \return Log-normally distributed float2 where each element is from a + * distribution with mean \p mean and standard deviation \p stddev + */ +QUALIFIERS float2 curand_log_normal2(curandStateMRG32k3a_t *state, float mean, float stddev) +{ + float2 v = curand_box_muller_mrg(state); + v.x = expf(mean + (stddev * v.x)); + v.y = expf(mean + (stddev * v.y)); + return v; +} + +/** + * \brief Return a log-normally distributed float from an MTGP32 generator. + * + * Return a single log-normally distributed float derived from a normal + * distribution with mean \p mean and standard deviation \p stddev + * from the MTGP32 generator in \p state, + * increment position of generator. + * + * The implementation uses the inverse cumulative distribution function + * to generate a normally distributed result, then transforms the result + * to log-normal. + * + * \param state - Pointer to state to update + * \param mean - Mean of the related normal distribution + * \param stddev - Standard deviation of the related normal distribution + * + * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev + */ +QUALIFIERS float curand_log_normal(curandStateMtgp32_t *state, float mean, float stddev) +{ + return expf(mean + (stddev * _curand_normal_icdf(curand(state)))); +} + +/** + * \brief Return a log-normally distributed float from a Sobol32 generator. + * + * Return a single log-normally distributed float derived from a normal + * distribution with mean \p mean and standard deviation \p stddev + * from the Sobol32 generator in \p state, + * increment position of generator by one. + * + * The implementation uses the inverse cumulative distribution function + * to generate a normally distributed result, then transforms the result + * to log-normal. + * + * \param state - Pointer to state to update + * \param mean - Mean of the related normal distribution + * \param stddev - Standard deviation of the related normal distribution + * + * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev + */ +QUALIFIERS float curand_log_normal(curandStateSobol32_t *state, float mean, float stddev) +{ + return expf(mean + (stddev * _curand_normal_icdf(curand(state)))); +} +/** + * \brief Return a log-normally distributed float from a scrambled Sobol32 generator. + * + * Return a single log-normally distributed float derived from a normal + * distribution with mean \p mean and standard deviation \p stddev + * from the scrambled Sobol32 generator in \p state, + * increment position of generator by one. + * + * The implementation uses the inverse cumulative distribution function + * to generate a normally distributed result, then transforms the result + * to log-normal. + * + * \param state - Pointer to state to update + * \param mean - Mean of the related normal distribution + * \param stddev - Standard deviation of the related normal distribution + * + * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev + */ +QUALIFIERS float curand_log_normal(curandStateScrambledSobol32_t *state, float mean, float stddev) +{ + return expf(mean + (stddev * _curand_normal_icdf(curand(state)))); +} + +/** + * \brief Return a log-normally distributed float from a Sobol64 generator. + * + * Return a single log-normally distributed float derived from a normal + * distribution with mean \p mean and standard deviation \p stddev + * from the Sobol64 generator in \p state, + * increment position of generator by one. + * + * The implementation uses the inverse cumulative distribution function + * to generate normally distributed results, then converts to log-normal + * distribution. + * + * \param state - Pointer to state to update + * \param mean - Mean of the related normal distribution + * \param stddev - Standard deviation of the related normal distribution + * + * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev + */ +QUALIFIERS float curand_log_normal(curandStateSobol64_t *state, float mean, float stddev) +{ + return expf(mean + (stddev * _curand_normal_icdf(curand(state)))); +} + +/** + * \brief Return a log-normally distributed float from a scrambled Sobol64 generator. + * + * Return a single log-normally distributed float derived from a normal + * distribution with mean \p mean and standard deviation \p stddev + * from the scrambled Sobol64 generator in \p state, + * increment position of generator by one. + * + * The implementation uses the inverse cumulative distribution function + * to generate normally distributed results, then converts to log-normal + * distribution. + * + * \param state - Pointer to state to update + * \param mean - Mean of the related normal distribution + * \param stddev - Standard deviation of the related normal distribution + * + * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev + */ +QUALIFIERS float curand_log_normal(curandStateScrambledSobol64_t *state, float mean, float stddev) +{ + return expf(mean + (stddev * _curand_normal_icdf(curand(state)))); +} + +/** + * \brief Return a log-normally distributed double from an XORWOW generator. + * + * Return a single normally distributed double derived from a normal + * distribution with mean \p mean and standard deviation \p stddev + * from the XORWOW generator in \p state, + * increment position of generator. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results, transforms them to log-normal distribution, + * then returns them one at a time. + * See ::curand_log_normal2_double() for a more efficient version that returns + * both results at once. + * + * \param state - Pointer to state to update + * \param mean - Mean of the related normal distribution + * \param stddev - Standard deviation of the related normal distribution + * + * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev + */ + +QUALIFIERS double curand_log_normal_double(curandStateXORWOW_t *state, double mean, double stddev) +{ + if(state->boxmuller_flag_double != EXTRA_FLAG_LOG_NORMAL) { + unsigned int x0, x1, y0, y1; + x0 = curand(state); + x1 = curand(state); + y0 = curand(state); + y1 = curand(state); + double2 v = _curand_box_muller_double(x0, x1, y0, y1); + state->boxmuller_extra_double = exp(mean + (stddev * v.y)); + state->boxmuller_flag_double = EXTRA_FLAG_LOG_NORMAL; + return exp(mean + (stddev * v.x)); + } + state->boxmuller_flag_double = 0; + return state->boxmuller_extra_double; +} + +/** + * \brief Return a log-normally distributed double from an Philox4_32_10 generator. + * + * Return a single normally distributed double derived from a normal + * distribution with mean \p mean and standard deviation \p stddev + * from the Philox4_32_10 generator in \p state, + * increment position of generator. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results, transforms them to log-normal distribution, + * then returns them one at a time. + * See ::curand_log_normal2_double() for a more efficient version that returns + * both results at once. + * + * \param state - Pointer to state to update + * \param mean - Mean of the related normal distribution + * \param stddev - Standard deviation of the related normal distribution + * + * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev + */ + +QUALIFIERS double curand_log_normal_double(curandStatePhilox4_32_10_t *state, double mean, double stddev) +{ + if(state->boxmuller_flag_double != EXTRA_FLAG_LOG_NORMAL) { + uint4 _x; + _x = curand4(state); + double2 v = _curand_box_muller_double(_x.x, _x.y, _x.z, _x.w); + state->boxmuller_extra_double = exp(mean + (stddev * v.y)); + state->boxmuller_flag_double = EXTRA_FLAG_LOG_NORMAL; + return exp(mean + (stddev * v.x)); + } + state->boxmuller_flag_double = 0; + return state->boxmuller_extra_double; +} + + +/** + * \brief Return two log-normally distributed doubles from an XORWOW generator. + * + * Return two log-normally distributed doubles derived from a normal + * distribution with mean \p mean and standard deviation \p stddev + * from the XORWOW generator in \p state, + * increment position of generator by two. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results, and transforms them to log-normal distribution,. + * + * \param state - Pointer to state to update + * \param mean - Mean of the related normal distribution + * \param stddev - Standard deviation of the related normal distribution + * + * \return Log-normally distributed double2 where each element is from a + * distribution with mean \p mean and standard deviation \p stddev + */ +QUALIFIERS double2 curand_log_normal2_double(curandStateXORWOW_t *state, double mean, double stddev) +{ + double2 v = curand_box_muller_double(state); + v.x = exp(mean + (stddev * v.x)); + v.y = exp(mean + (stddev * v.y)); + return v; +} + +/** + * \brief Return two log-normally distributed doubles from an Philox4_32_10 generator. + * + * Return two log-normally distributed doubles derived from a normal + * distribution with mean \p mean and standard deviation \p stddev + * from the Philox4_32_10 generator in \p state, + * increment position of generator by four. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results, and transforms them to log-normal distribution,. + * + * \param state - Pointer to state to update + * \param mean - Mean of the related normal distribution + * \param stddev - Standard deviation of the related normal distribution + * + * \return Log-normally distributed double4 where each element is from a + * distribution with mean \p mean and standard deviation \p stddev + */ +QUALIFIERS double2 curand_log_normal2_double(curandStatePhilox4_32_10_t *state, double mean, double stddev) +{ + double2 v = curand_box_muller2_double(state); + v.x = exp(mean + (stddev * v.x)); + v.y = exp(mean + (stddev * v.y)); + return v; +} +// nor part of API +QUALIFIERS double4 curand_log_normal4_double(curandStatePhilox4_32_10_t *state, double mean, double stddev) +{ + double4 v = curand_box_muller4_double(state); + v.x = exp(mean + (stddev * v.x)); + v.y = exp(mean + (stddev * v.y)); + v.z = exp(mean + (stddev * v.z)); + v.w = exp(mean + (stddev * v.w)); + return v; +} + +/** + * \brief Return a log-normally distributed double from an MRG32k3a generator. + * + * Return a single normally distributed double derived from a normal + * distribution with mean \p mean and standard deviation \p stddev + * from the MRG32k3a generator in \p state, + * increment position of generator. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results, transforms them to log-normal distribution, + * then returns them one at a time. + * See ::curand_log_normal2_double() for a more efficient version that returns + * both results at once. + * + * \param state - Pointer to state to update + * \param mean - Mean of the related normal distribution + * \param stddev - Standard deviation of the related normal distribution + * + * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev + */ +QUALIFIERS double curand_log_normal_double(curandStateMRG32k3a_t *state, double mean, double stddev) +{ + if(state->boxmuller_flag_double != EXTRA_FLAG_LOG_NORMAL) { + double2 v = curand_box_muller_mrg_double(state); + state->boxmuller_extra_double = exp(mean + (stddev * v.y)); + state->boxmuller_flag_double = EXTRA_FLAG_LOG_NORMAL; + return exp(mean + (stddev * v.x)); + } + state->boxmuller_flag_double = 0; + return state->boxmuller_extra_double; +} + +/** + * \brief Return two log-normally distributed doubles from an MRG32k3a generator. + * + * Return two log-normally distributed doubles derived from a normal + * distribution with mean \p mean and standard deviation \p stddev + * from the MRG32k3a generator in \p state, + * increment position of generator by two. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results, and transforms them to log-normal distribution,. + * + * \param state - Pointer to state to update + * \param mean - Mean of the related normal distribution + * \param stddev - Standard deviation of the related normal distribution + * + * \return Log-normally distributed double2 where each element is from a + * distribution with mean \p mean and standard deviation \p stddev + */ +QUALIFIERS double2 curand_log_normal2_double(curandStateMRG32k3a_t *state, double mean, double stddev) +{ + double2 v = curand_box_muller_mrg_double(state); + v.x = exp(mean + (stddev * v.x)); + v.y = exp(mean + (stddev * v.y)); + return v; +} + +/** + * \brief Return a log-normally distributed double from an MTGP32 generator. + * + * Return a single log-normally distributed double derived from a normal + * distribution with mean \p mean and standard deviation \p stddev + * from the MTGP32 generator in \p state, + * increment position of generator. + * + * The implementation uses the inverse cumulative distribution function + * to generate normally distributed results, and transforms them into + * log-normal distribution. + * + * \param state - Pointer to state to update + * \param mean - Mean of the related normal distribution + * \param stddev - Standard deviation of the related normal distribution + * + * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev + */ +QUALIFIERS double curand_log_normal_double(curandStateMtgp32_t *state, double mean, double stddev) +{ + return exp(mean + (stddev * _curand_normal_icdf_double(curand(state)))); +} + +/** + * \brief Return a log-normally distributed double from a Sobol32 generator. + * + * Return a single log-normally distributed double derived from a normal + * distribution with mean \p mean and standard deviation \p stddev + * from the Sobol32 generator in \p state, + * increment position of generator by one. + * + * The implementation uses the inverse cumulative distribution function + * to generate normally distributed results, and transforms them into + * log-normal distribution. + * + * \param state - Pointer to state to update + * \param mean - Mean of the related normal distribution + * \param stddev - Standard deviation of the related normal distribution + * + * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev + */ +QUALIFIERS double curand_log_normal_double(curandStateSobol32_t *state, double mean, double stddev) +{ + return exp(mean + (stddev * _curand_normal_icdf_double(curand(state)))); +} + +/** + * \brief Return a log-normally distributed double from a scrambled Sobol32 generator. + * + * Return a single log-normally distributed double derived from a normal + * distribution with mean \p mean and standard deviation \p stddev + * from the scrambled Sobol32 generator in \p state, + * increment position of generator by one. + * + * The implementation uses the inverse cumulative distribution function + * to generate normally distributed results, and transforms them into + * log-normal distribution. + * + * \param state - Pointer to state to update + * \param mean - Mean of the related normal distribution + * \param stddev - Standard deviation of the related normal distribution + * + * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev + */ +QUALIFIERS double curand_log_normal_double(curandStateScrambledSobol32_t *state, double mean, double stddev) +{ + return exp(mean + (stddev * _curand_normal_icdf_double(curand(state)))); +} + +/** + * \brief Return a log-normally distributed double from a Sobol64 generator. + * + * Return a single normally distributed double derived from a normal + * distribution with mean \p mean and standard deviation \p stddev + * from the Sobol64 generator in \p state, + * increment position of generator by one. + * + * The implementation uses the inverse cumulative distribution function + * to generate normally distributed results. + * + * \param state - Pointer to state to update + * \param mean - Mean of the related normal distribution + * \param stddev - Standard deviation of the related normal distribution + * + * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev + */ +QUALIFIERS double curand_log_normal_double(curandStateSobol64_t *state, double mean, double stddev) +{ + return exp(mean + (stddev * _curand_normal_icdf_double(curand(state)))); +} + +/** + * \brief Return a log-normally distributed double from a scrambled Sobol64 generator. + * + * Return a single normally distributed double derived from a normal + * distribution with mean \p mean and standard deviation \p stddev + * from the scrambled Sobol64 generator in \p state, + * increment position of generator by one. + * + * The implementation uses the inverse cumulative distribution function + * to generate normally distributed results. + * + * \param state - Pointer to state to update + * \param mean - Mean of the related normal distribution + * \param stddev - Standard deviation of the related normal distribution + * + * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev + */ +QUALIFIERS double curand_log_normal_double(curandStateScrambledSobol64_t *state, double mean, double stddev) +{ + return exp(mean + (stddev * _curand_normal_icdf_double(curand(state)))); +} + +#endif // !defined(CURAND_LOGNORMAL_H_) diff --git a/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32_host.h b/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32_host.h new file mode 100644 index 0000000000000000000000000000000000000000..7b9a2d31b314194e6999aeb7046db67b6c9e19ac --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32_host.h @@ -0,0 +1,516 @@ +/* + * Copyright 2010-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * curand_mtgp32_host.h + * + * + * MTGP32-11213 + * + * Mersenne Twister RNG for the GPU + * + * The period of generated integers is 211213-1. + * + * This code generates 32-bit unsigned integers, and + * single precision floating point numbers uniformly distributed + * in the range [1, 2). (float r; 1.0 <= r < 2.0) + */ + +/* + * Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima + * University and University of Tokyo. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#if !defined CURAND_MTGP32_HOST_H +#define CURAND_MTGP32_HOST_H + +#if !defined(QUALIFIERS) +#define QUALIFIERS static inline __device__ +#endif + +#include +#include +#include +#include +#include "curand.h" +#include "curand_mtgp32.h" +#include "curand_mtgp32dc_p_11213.h" + + +/** + * \addtogroup DEVICE Device API + * + * @{ + */ + +static const unsigned int non_zero = 0x4d544750; + +/* + * This function represents a function used in the initialization + * by mtgp32_init_by_array() and mtgp32_init_by_str(). + * @param[in] x 32-bit integer + * @return 32-bit integer + */ +static __forceinline__ unsigned int ini_func1(unsigned int x) { + return (x ^ (x >> 27)) * (1664525); +} + +/* + * This function represents a function used in the initialization + * by mtgp32_init_by_array() and mtgp32_init_by_str(). + * @param[in] x 32-bit integer + * @return 32-bit integer + */ +static __forceinline__ unsigned int ini_func2(unsigned int x) { + return (x ^ (x >> 27)) * (1566083941); +} + +/* + * This function initializes the internal state array with a 32-bit + * integer seed. The allocated memory should be freed by calling + * mtgp32_free(). \b para should be one of the elements in the + * parameter table (mtgp32-param-ref.c). + * + * This function is call by cuda program, because cuda program uses + * another structure and another allocation method. + * + * @param[out] array MTGP internal status vector. + * @param[in] para parameter structure + * @param[in] seed a 32-bit integer used as the seed. + */ +static __forceinline__ __host__ +void mtgp32_init_state(unsigned int state[], + const mtgp32_params_fast_t *para, unsigned int seed) { + int i; + int size = para->mexp / 32 + 1; + unsigned int hidden_seed; + unsigned int tmp; + hidden_seed = para->tbl[4] ^ (para->tbl[8] << 16); + tmp = hidden_seed; + tmp += tmp >> 16; + tmp += tmp >> 8; + memset(state, tmp & 0xff, sizeof(unsigned int) * size); + state[0] = seed; + state[1] = hidden_seed; + for (i = 1; i < size; i++) { + state[i] ^= (1812433253) * (state[i - 1] ^ (state[i - 1] >> 30)) + i; + } +} + +/* + * This function initializes the internal state array + * with a 32-bit integer array. \b para should be one of the elements in + * the parameter table (mtgp32-param-ref.c). + * + * @param[out] mtgp32 MTGP structure. + * @param[in] para parameter structure + * @param[in] array a 32-bit integer array used as a seed. + * @param[in] length length of the array. + * @return CURAND_STATUS_SUCCESS + */ +static __forceinline__ __host__ +int mtgp32_init_by_array(unsigned int state[], + const mtgp32_params_fast_t *para, + unsigned int *array, int length) { + int i, j, count; + unsigned int r; + int lag; + int mid; + int size = para->mexp / 32 + 1; + unsigned int hidden_seed; + unsigned int tmp; + + if (size >= 623) { + lag = 11; + } else if (size >= 68) { + lag = 7; + } else if (size >= 39) { + lag = 5; + } else { + lag = 3; + } + mid = (size - lag) / 2; + + hidden_seed = para->tbl[4] ^ (para->tbl[8] << 16); + tmp = hidden_seed; + tmp += tmp >> 16; + tmp += tmp >> 8; + memset(state, tmp & 0xff, sizeof(unsigned int) * size); + state[0] = hidden_seed; + + if (length + 1 > size) { + count = length + 1; + } else { + count = size; + } + r = ini_func1(state[0] ^ state[mid] ^ state[size - 1]); + state[mid] += r; + r += length; + state[(mid + lag) % size] += r; + state[0] = r; + i = 1; + count--; + for (i = 1, j = 0; (j < count) && (j < length); j++) { + r = ini_func1(state[i] ^ state[(i + mid) % size] + ^ state[(i + size - 1) % size]); + state[(i + mid) % size] += r; + r += array[j] + i; + state[(i + mid + lag) % size] += r; + state[i] = r; + i = (i + 1) % size; + } + for (; j < count; j++) { + r = ini_func1(state[i] ^ state[(i + mid) % size] + ^ state[(i + size - 1) % size]); + state[(i + mid) % size] += r; + r += i; + state[(i + mid + lag) % size] += r; + state[i] = r; + i = (i + 1) % size; + } + for (j = 0; j < size; j++) { + r = ini_func2(state[i] + state[(i + mid) % size] + + state[(i + size - 1) % size]); + state[(i + mid) % size] ^= r; + r -= i; + state[(i + mid + lag) % size] ^= r; + state[i] = r; + i = (i + 1) % size; + } + if (state[size - 1] == 0) { + state[size - 1] = non_zero; + } + return 0; +} + +/* + * This function initializes the internal state array + * with a character array. \b para should be one of the elements in + * the parameter table (mtgp32-param-ref.c). + * This is the same algorithm with mtgp32_init_by_array(), but hope to + * be more useful. + * + * @param[out] mtgp32 MTGP structure. + * @param[in] para parameter structure + * @param[in] array a character array used as a seed. (terminated by zero.) + * @return memory allocation result. if 0 then O.K. + */ +static __forceinline__ __host__ +int mtgp32_init_by_str(unsigned int state[], + const mtgp32_params_fast_t *para, unsigned char *array) { + int i, j, count; + unsigned int r; + int lag; + int mid; + int size = para->mexp / 32 + 1; + int length = (unsigned int)strlen((char *)array); + unsigned int hidden_seed; + unsigned int tmp; + + if (size >= 623) { + lag = 11; + } else if (size >= 68) { + lag = 7; + } else if (size >= 39) { + lag = 5; + } else { + lag = 3; + } + mid = (size - lag) / 2; + + hidden_seed = para->tbl[4] ^ (para->tbl[8] << 16); + tmp = hidden_seed; + tmp += tmp >> 16; + tmp += tmp >> 8; + memset(state, tmp & 0xff, sizeof(unsigned int) * size); + state[0] = hidden_seed; + + if (length + 1 > size) { + count = length + 1; + } else { + count = size; + } + r = ini_func1(state[0] ^ state[mid] ^ state[size - 1]); + state[mid] += r; + r += length; + state[(mid + lag) % size] += r; + state[0] = r; + i = 1; + count--; + for (i = 1, j = 0; (j < count) && (j < length); j++) { + r = ini_func1(state[i] ^ state[(i + mid) % size] + ^ state[(i + size - 1) % size]); + state[(i + mid) % size] += r; + r += array[j] + i; + state[(i + mid + lag) % size] += r; + state[i] = r; + i = (i + 1) % size; + } + for (; j < count; j++) { + r = ini_func1(state[i] ^ state[(i + mid) % size] + ^ state[(i + size - 1) % size]); + state[(i + mid) % size] += r; + r += i; + state[(i + mid + lag) % size] += r; + state[i] = r; + i = (i + 1) % size; + } + for (j = 0; j < size; j++) { + r = ini_func2(state[i] + state[(i + mid) % size] + + state[(i + size - 1) % size]); + state[(i + mid) % size] ^= r; + r -= i; + state[(i + mid + lag) % size] ^= r; + state[i] = r; + i = (i + 1) % size; + } + if (state[size - 1] == 0) { + state[size - 1] = non_zero; + } + return 0; +} + +template +static __forceinline__ __host__ +curandStatus_t curandMakeMTGP32ConstantsImpl(const mtgp32_params_fast_t params[], ParamsType * p, const int block_num) +{ + const int size1 = sizeof(unsigned int) * block_num; + const int size2 = sizeof(unsigned int) * block_num * TBL_SIZE; + unsigned int *h_pos_tbl; + unsigned int *h_sh1_tbl; + unsigned int *h_sh2_tbl; + unsigned int *h_param_tbl; + unsigned int *h_temper_tbl; + unsigned int *h_single_temper_tbl; + unsigned int *h_mask; + curandStatus_t status = CURAND_STATUS_SUCCESS; + + h_pos_tbl = (unsigned int *)malloc(size1); + h_sh1_tbl = (unsigned int *)malloc(size1); + h_sh2_tbl = (unsigned int *)malloc(size1); + h_param_tbl = (unsigned int *)malloc(size2); + h_temper_tbl = (unsigned int *)malloc(size2); + h_single_temper_tbl = (unsigned int *)malloc(size2); + h_mask = (unsigned int *)malloc(sizeof(unsigned int)); + if (h_pos_tbl == NULL + || h_sh1_tbl == NULL + || h_sh2_tbl == NULL + || h_param_tbl == NULL + || h_temper_tbl == NULL + || h_single_temper_tbl == NULL + || h_mask == NULL) { + if (h_pos_tbl != NULL) free(h_pos_tbl); + if (h_sh1_tbl != NULL) free(h_sh1_tbl); + if (h_sh2_tbl != NULL) free(h_sh2_tbl); + if (h_param_tbl != NULL) free(h_param_tbl); + if (h_temper_tbl != NULL) free(h_temper_tbl); + if (h_single_temper_tbl != NULL) free(h_single_temper_tbl); + if (h_mask != NULL) free(h_mask); + status = CURAND_STATUS_ALLOCATION_FAILED; + } else { + + h_mask[0] = params[0].mask; + for (int i = 0; i < block_num; i++) { + h_pos_tbl[i] = params[i].pos; + h_sh1_tbl[i] = params[i].sh1; + h_sh2_tbl[i] = params[i].sh2; + for (int j = 0; j < TBL_SIZE; j++) { + h_param_tbl[i * TBL_SIZE + j] = params[i].tbl[j]; + h_temper_tbl[i * TBL_SIZE + j] = params[i].tmp_tbl[j]; + h_single_temper_tbl[i * TBL_SIZE + j] = params[i].flt_tmp_tbl[j]; + } + } + if (cudaMemcpy( p->pos_tbl, + h_pos_tbl, size1, cudaMemcpyHostToDevice) != cudaSuccess) + { + status = CURAND_STATUS_INITIALIZATION_FAILED; + } else + if (cudaMemcpy( p->sh1_tbl, + h_sh1_tbl, size1, cudaMemcpyHostToDevice) != cudaSuccess) + { + status = CURAND_STATUS_INITIALIZATION_FAILED; + } else + if (cudaMemcpy( p->sh2_tbl, + h_sh2_tbl, size1, cudaMemcpyHostToDevice) != cudaSuccess) + { + status = CURAND_STATUS_INITIALIZATION_FAILED; + } else + if (cudaMemcpy( p->param_tbl, + h_param_tbl, size2, cudaMemcpyHostToDevice) != cudaSuccess) + { + status = CURAND_STATUS_INITIALIZATION_FAILED; + } else + if (cudaMemcpy( p->temper_tbl, + h_temper_tbl, size2, cudaMemcpyHostToDevice) != cudaSuccess) + { + status = CURAND_STATUS_INITIALIZATION_FAILED; + } else + if (cudaMemcpy( p->single_temper_tbl, + h_single_temper_tbl, size2, cudaMemcpyHostToDevice) != cudaSuccess) + { + status = CURAND_STATUS_INITIALIZATION_FAILED; + } else + if (cudaMemcpy( p->mask, + h_mask, sizeof(unsigned int), cudaMemcpyHostToDevice) != cudaSuccess) + { + status = CURAND_STATUS_INITIALIZATION_FAILED; + } + } + if (h_pos_tbl != NULL) free(h_pos_tbl); + if (h_sh1_tbl != NULL) free(h_sh1_tbl); + if (h_sh2_tbl != NULL) free(h_sh2_tbl); + if (h_param_tbl != NULL) free(h_param_tbl); + if (h_temper_tbl != NULL) free(h_temper_tbl); + if (h_single_temper_tbl != NULL)free(h_single_temper_tbl); + if (h_mask != NULL) free(h_mask); + return status; +} + +/** + * \brief Set up constant parameters for the mtgp32 generator + * + * This host-side helper function re-organizes CURAND_NUM_MTGP32_PARAMS sets of + * generator parameters for use by kernel functions and copies the + * result to the specified location in device memory. + * + * \param params - Pointer to an array of type mtgp32_params_fast_t in host memory + * \param p - pointer to a structure of type mtgp32_kernel_params_t in device memory. + * + * \return + * - CURAND_STATUS_ALLOCATION_FAILED if host memory could not be allocated + * - CURAND_STATUS_INITIALIZATION_FAILED if the copy to device memory failed + * - CURAND_STATUS_SUCCESS otherwise + */ +static __forceinline__ __host__ +curandStatus_t curandMakeMTGP32Constants(const mtgp32_params_fast_t params[], mtgp32_kernel_params_t * p) +{ + return curandMakeMTGP32ConstantsImpl(params, p, CURAND_NUM_MTGP32_PARAMS); +} + +/** + * \brief Set up initial states for the mtgp32 generator + * + * This host-side helper function initializes a number of states (one parameter set per state) for + * an mtgp32 generator. To accomplish this it allocates a state array in host memory, + * initializes that array, and copies the result to device memory. + * + * \param s - pointer to an array of states in device memory + * \param params - Pointer to an array of type mtgp32_params_fast_t in host memory + * \param k - pointer to a structure of type mtgp32_kernel_params_t in device memory + * \param n - number of parameter sets/states to initialize + * \param seed - seed value + * + * \return + * - CURAND_STATUS_ALLOCATION_FAILED if host memory state could not be allocated + * - CURAND_STATUS_INITIALIZATION_FAILED if the copy to device memory failed + * - CURAND_STATUS_SUCCESS otherwise + */ +static __forceinline__ __host__ +curandStatus_t CURANDAPI curandMakeMTGP32KernelState(curandStateMtgp32_t *s, + mtgp32_params_fast_t params[], + mtgp32_kernel_params_t *k, + int n, + unsigned long long seed) +{ + int i; + curandStatus_t status = CURAND_STATUS_SUCCESS; + curandStateMtgp32_t *h_status =(curandStateMtgp32_t *) malloc(sizeof(curandStateMtgp32_t) * n); + if (h_status == NULL) { + status = CURAND_STATUS_ALLOCATION_FAILED; + } else { + seed = seed ^ (seed >> 32); + for (i = 0; i < n; i++) { + mtgp32_init_state(&(h_status[i].s[0]), ¶ms[i],(unsigned int)seed + i + 1); + h_status[i].offset = 0; + h_status[i].pIdx = i; + h_status[i].k = k; + } + if (cudaMemcpy(s, h_status, + sizeof(curandStateMtgp32_t) * n, + cudaMemcpyHostToDevice) != cudaSuccess) { + status = CURAND_STATUS_INITIALIZATION_FAILED; + } + } + free(h_status); + return status; +} + +/** @} */ + +#endif + + diff --git a/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32_kernel.h b/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..087cdd0796d1e6d6731acd74a07718dfd1312275 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32_kernel.h @@ -0,0 +1,386 @@ +/* + * Copyright 2010-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * curand_mtgp32_kernel.h + * + * + * MTGP32-11213 + * + * Mersenne Twister RNG for the GPU + * + * The period of generated integers is 211213-1. + * + * This code generates 32-bit unsigned integers, and + * single precision floating point numbers uniformly distributed + * in the range [1, 2). (float r; 1.0 <= r < 2.0) + */ + +/* + * Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima + * University and University of Tokyo. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#if !defined CURAND_MTGP32_KERNEL_H +#define CURAND_MTGP32_KERNEL_H + +#if !defined(QUALIFIERS) +#define QUALIFIERS static __forceinline__ __device__ +#endif + +#ifndef __CUDACC_RTC__ +#include +#include +#include +#include +#endif // ifndef __CUDACC_RTC__ +#include +#include "curand.h" +#include "curand_mtgp32.h" + +/** + * \addtogroup DEVICE Device API + * + * @{ + */ + +#ifndef __CUDA_ARCH__ +// define blockDim and threadIdx for host compatibility call +extern const dim3 blockDim; +extern const uint3 threadIdx; +#endif + + +/* + * The function of the recursion formula calculation. + * + * @param[in] X1 the farthest part of state array. + * @param[in] X2 the second farthest part of state array. + * @param[in] Y a part of state array. + * @param[in] bid block id. + * @return output + */ +QUALIFIERS unsigned int para_rec(mtgp32_kernel_params_t * k,unsigned int X1, unsigned int X2, unsigned int Y, int bid) { + unsigned int X = (X1 & k->mask[0]) ^ X2; + unsigned int MAT; + + X ^= X << k->sh1_tbl[bid]; + Y = X ^ (Y >> k->sh2_tbl[bid]); + MAT = k->param_tbl[bid][Y & 0x0f]; + return Y ^ MAT; +} + +/* + * The tempering function. + * + * @param[in] V the output value should be tempered. + * @param[in] T the tempering helper value. + * @param[in] bid block id. + * @return the tempered value. + */ +QUALIFIERS unsigned int temper(mtgp32_kernel_params_t * k,unsigned int V, unsigned int T, int bid) { + unsigned int MAT; + + T ^= T >> 16; + T ^= T >> 8; + MAT = k->temper_tbl[bid][T & 0x0f]; + return V ^ MAT; +} + +/* + * The tempering and converting function. + * By using the preset table, converting to IEEE format + * and tempering are done simultaneously. + * + * @param[in] V the output value should be tempered. + * @param[in] T the tempering helper value. + * @param[in] bid block id. + * @return the tempered and converted value. + */ +QUALIFIERS unsigned int temper_single(mtgp32_kernel_params_t * k,unsigned int V, unsigned int T, int bid) { + unsigned int MAT; + unsigned int r; + + T ^= T >> 16; + T ^= T >> 8; + MAT = k->single_temper_tbl[bid][T & 0x0f]; + r = (V >> 9) ^ MAT; + return r; +} + +/** + * \brief Return 32-bits of pseudorandomness from a mtgp32 generator. + * + * Return 32-bits of pseudorandomness from the mtgp32 generator in \p state, + * increment position of generator by the number of threads in the block. + * Note the number of threads in the block can not exceed 256. + * + * \param state - Pointer to state to update + * + * \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use. + */ +QUALIFIERS unsigned int curand(curandStateMtgp32_t *state) +{ + unsigned int t; + unsigned int d; + int pos = state->k->pos_tbl[state->pIdx]; + unsigned int r; + unsigned int o; + + d = blockDim.z * blockDim.y * blockDim.x; + //assert( d <= 256 ); + t = (blockDim.z * blockDim.y * threadIdx.z) + (blockDim.x * threadIdx.y) + threadIdx.x; + r = para_rec(state->k, state->s[(t + state->offset) & MTGP32_STATE_MASK], + state->s[(t + state->offset + 1) & MTGP32_STATE_MASK], + state->s[(t + state->offset + pos) & MTGP32_STATE_MASK], + state->pIdx); + + state->s[(t + state->offset + MTGPDC_N) & MTGP32_STATE_MASK] = r; + o = temper(state->k, r, + state->s[(t + state->offset + pos -1) & MTGP32_STATE_MASK], + state->pIdx); +NV_IF_TARGET(NV_IS_DEVICE, + __syncthreads(); +) + if (t == 0) + { + state->offset = (state->offset + d) & MTGP32_STATE_MASK; + } +NV_IF_TARGET(NV_IS_DEVICE, + __syncthreads(); +) + return o; + +} +/** + * \brief Return 32-bits of pseudorandomness from a specific position in a mtgp32 generator. + * + * Return 32-bits of pseudorandomness from position \p index of the mtgp32 generator in \p state, + * increment position of generator by \p n positions, which must be the total number of positions + * upddated in the state by the thread block, for this invocation. + * + * Note : + * Thread indices must range from 0...\ n - 1. + * The number of positions updated may not exceed 256. + * A thread block may update more than one state, but a given state may not be updated by more than one thread block. + * + * \param state - Pointer to state to update + * \param index - Index (0..255) of the position within the state to draw from and update + * \param n - The total number of postions in this state that are being updated by this invocation + * + * \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use. + */ +QUALIFIERS unsigned int curand_mtgp32_specific(curandStateMtgp32_t *state, unsigned char index, unsigned char n) +{ + unsigned int t; + int pos = state->k->pos_tbl[state->pIdx]; + unsigned int r; + unsigned int o; + + t = index; + r = para_rec(state->k, state->s[(t + state->offset) & MTGP32_STATE_MASK], + state->s[(t + state->offset + 1) & MTGP32_STATE_MASK], + state->s[(t + state->offset + pos) & MTGP32_STATE_MASK], + state->pIdx); + + state->s[(t + state->offset + MTGPDC_N) & MTGP32_STATE_MASK] = r; + o = temper(state->k, r, + state->s[(t + state->offset + pos -1) & MTGP32_STATE_MASK], + state->pIdx); +NV_IF_TARGET(NV_IS_DEVICE, + __syncthreads(); +) + if (index == 0) + { + state->offset = (state->offset + n) & MTGP32_STATE_MASK; + } +NV_IF_TARGET(NV_IS_DEVICE, + __syncthreads(); +) + return o; +} +/** + * \brief Return a uniformly distributed float from a mtgp32 generator. + * + * Return a uniformly distributed float between \p 0.0f and \p 1.0f + * from the mtgp32 generator in \p state, increment position of generator. + * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating + * point outputs are never returned. + * + * Note: This alternate derivation of a uniform float is provided for completeness + * with the original source + * + * \param state - Pointer to state to update + * + * \return uniformly distributed float between \p 0.0f and \p 1.0f + */ +QUALIFIERS float curand_mtgp32_single(curandStateMtgp32_t *state) +{ + unsigned int t; + unsigned int d; + int pos = state->k->pos_tbl[state->pIdx]; + unsigned int r; + unsigned int o_u; + float o_f; + + + t = blockDim.z * blockDim.y; + d = t * blockDim.x; + //assert( d <= 256 ); + t += threadIdx.x; + r = para_rec(state->k, state->s[(t + state->offset) & MTGP32_STATE_MASK], + state->s[(t + state->offset + 1) & MTGP32_STATE_MASK], + state->s[(t + state->offset + pos) & MTGP32_STATE_MASK], + state->pIdx); + + state->s[t] = r; + o_u = temper_single(state->k, r, + state->s[(t + state->offset + pos -1) & MTGP32_STATE_MASK], + state->pIdx); +NV_IF_TARGET(NV_IS_DEVICE, + __syncthreads(); +) + if (threadIdx.x == 0) + { + state->offset = (state->offset + d) & MTGP32_STATE_MASK; + } +NV_IF_TARGET(NV_IS_DEVICE, + __syncthreads(); +) + memcpy(&o_f, &o_u, sizeof(o_u)); + return o_f; +} + +/** + * \brief Return a uniformly distributed float from a specific position in a mtgp32 generator. + * + * Return a uniformly distributed float between \p 0.0f and \p 1.0f + * from position \p index of the mtgp32 generator in \p state, and + * increment position of generator by \p n positions, which must be the total number of positions + * upddated in the state by the thread block, for this invocation. + * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating + * point outputs are never returned. + * + * Note 1: + * Thread indices must range from 0...\p n - 1. + * The number of positions updated may not exceed 256. + * A thread block may update more than one state, but a given state may not be updated by more than one thread block. + * + * Note 2: This alternate derivation of a uniform float is provided for completeness + * with the original source + * + * \param state - Pointer to state to update + * \param index - Index (0..255) of the position within the state to draw from and update + * \param n - The total number of postions in this state that are being updated by this invocation + * + * \return uniformly distributed float between \p 0.0f and \p 1.0f + */ +QUALIFIERS float curand_mtgp32_single_specific(curandStateMtgp32_t *state, unsigned char index, unsigned char n) +{ + unsigned int t; + int pos = state->k->pos_tbl[state->pIdx]; + unsigned int r; + unsigned int o_u; + float o_f; + + t = index; + r = para_rec(state->k, state->s[(t + state->offset) & MTGP32_STATE_MASK], + state->s[(t + state->offset + 1) & MTGP32_STATE_MASK], + state->s[(t + state->offset + pos) & MTGP32_STATE_MASK], + state->pIdx); + + state->s[t] = r; + o_u = temper_single(state->k, r, + state->s[(t + state->offset + pos -1) & MTGP32_STATE_MASK], + state->pIdx); +NV_IF_TARGET(NV_IS_DEVICE, + __syncthreads(); +) + if (threadIdx.x == 0) + { + state->offset = (state->offset + n) & MTGP32_STATE_MASK; + } +NV_IF_TARGET(NV_IS_DEVICE, + __syncthreads(); +) + memcpy(&o_f, &o_u, sizeof(o_u)); + return o_f; +} + +/** @} */ + +#endif diff --git a/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_normal.h b/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_normal.h new file mode 100644 index 0000000000000000000000000000000000000000..e3c91001032acf253959df8b8a5464d038ee252b --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_normal.h @@ -0,0 +1,840 @@ + + /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + + +#if !defined(CURAND_NORMAL_H_) +#define CURAND_NORMAL_H_ + +/** + * \defgroup DEVICE Device API + * + * @{ + */ + +#ifndef __CUDACC_RTC__ +#include +#endif // __CUDACC_RTC__ +#include + +#include "curand_mrg32k3a.h" +#include "curand_mtgp32_kernel.h" +#include "curand_philox4x32_x.h" +#include "curand_normal_static.h" + +QUALIFIERS float2 _curand_box_muller(unsigned int x, unsigned int y) +{ + float2 result; + float u = x * CURAND_2POW32_INV + (CURAND_2POW32_INV/2); + float v = y * CURAND_2POW32_INV_2PI + (CURAND_2POW32_INV_2PI/2); + float s; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + s = sqrtf(-2.0f * logf(u)); + __sincosf(v, &result.x, &result.y); +, + s = sqrtf(-2.0f * logf(u)); + result.x = sinf(v); + result.y = cosf(v); +) + result.x *= s; + result.y *= s; + return result; +} + +QUALIFIERS float2 curand_box_muller_mrg(curandStateMRG32k3a_t * state) +{ + float x, y; + x = curand_uniform(state); + y = curand_uniform(state) * CURAND_2PI; + float2 result; + float s; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + s = sqrtf(-2.0f * logf(x)); + __sincosf(y, &result.x, &result.y); +, + s = sqrtf(-2.0f * logf(x)); + result.x = sinf(y); + result.y = cosf(y); +) + result.x *= s; + result.y *= s; + return result; +} + +QUALIFIERS double2 +_curand_box_muller_double(unsigned int x0, unsigned int x1, + unsigned int y0, unsigned int y1) +{ + double2 result; + unsigned long long zx = (unsigned long long)x0 ^ + ((unsigned long long)x1 << (53 - 32)); + double u = zx * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/2.0); + unsigned long long zy = (unsigned long long)y0 ^ + ((unsigned long long)y1 << (53 - 32)); + double v = zy * (CURAND_2POW53_INV_DOUBLE*2.0) + CURAND_2POW53_INV_DOUBLE; + double s = sqrt(-2.0 * log(u)); + +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + sincospi(v, &result.x, &result.y); +, + result.x = sin(v*CURAND_PI_DOUBLE); + result.y = cos(v*CURAND_PI_DOUBLE); +) + result.x *= s; + result.y *= s; + + return result; +} + +QUALIFIERS double2 +curand_box_muller_mrg_double(curandStateMRG32k3a_t * state) +{ + double x, y; + double2 result; + x = curand_uniform_double(state); + y = curand_uniform_double(state) * 2.0; + + double s = sqrt(-2.0 * log(x)); +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + sincospi(y, &result.x, &result.y); +, + result.x = sin(y*CURAND_PI_DOUBLE); + result.y = cos(y*CURAND_PI_DOUBLE); +) + result.x *= s; + result.y *= s; + return result; +} + +template +QUALIFIERS float2 curand_box_muller(R *state) +{ + float2 result; + unsigned int x = curand(state); + unsigned int y = curand(state); + result = _curand_box_muller(x, y); + return result; +} + +template +QUALIFIERS float4 curand_box_muller4(R *state) +{ + float4 result; + float2 _result; + uint4 x = curand4(state); + //unsigned int y = curand(state); + _result = _curand_box_muller(x.x, x.y); + result.x = _result.x; + result.y = _result.y; + _result = _curand_box_muller(x.z, x.w); + result.z = _result.x; + result.w = _result.y; + return result; +} + +template +QUALIFIERS double2 curand_box_muller_double(R *state) +{ + double2 result; + unsigned int x0 = curand(state); + unsigned int x1 = curand(state); + unsigned int y0 = curand(state); + unsigned int y1 = curand(state); + result = _curand_box_muller_double(x0, x1, y0, y1); + return result; +} + +template +QUALIFIERS double2 curand_box_muller2_double(R *state) +{ + double2 result; + uint4 _x; + _x = curand4(state); + result = _curand_box_muller_double(_x.x, _x.y, _x.z, _x.w); + return result; +} + + +template +QUALIFIERS double4 curand_box_muller4_double(R *state) +{ + double4 result; + double2 _res1; + double2 _res2; + uint4 _x; + uint4 _y; + _x = curand4(state); + _y = curand4(state); + _res1 = _curand_box_muller_double(_x.x, _x.y, _x.z, _x.w); + _res2 = _curand_box_muller_double(_y.x, _y.y, _y.z, _y.w); + result.x = _res1.x; + result.y = _res1.y; + result.z = _res2.x; + result.w = _res2.y; + return result; +} + +//QUALIFIERS float _curand_normal_icdf(unsigned int x) +//{ +//#if __CUDA_ARCH__ > 0 || defined(HOST_HAVE_ERFCINVF) +// float s = CURAND_SQRT2; +// // Mirror to avoid loss of precision +// if(x > 0x80000000UL) { +// x = 0xffffffffUL - x; +// s = -s; +// } +// float p = x * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f); +// // p is in (0, 0.5], 2p is in (0, 1] +// return s * erfcinvf(2.0f * p); +//#else +// x++; //suppress warnings +// return 0.0f; +//#endif +//} +// +//QUALIFIERS float _curand_normal_icdf(unsigned long long x) +//{ +//#if __CUDA_ARCH__ > 0 || defined(HOST_HAVE_ERFCINVF) +// unsigned int t = (unsigned int)(x >> 32); +// float s = CURAND_SQRT2; +// // Mirror to avoid loss of precision +// if(t > 0x80000000UL) { +// t = 0xffffffffUL - t; +// s = -s; +// } +// float p = t * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f); +// // p is in (0, 0.5], 2p is in (0, 1] +// return s * erfcinvf(2.0f * p); +//#else +// x++; +// return 0.0f; +//#endif +//} +// +//QUALIFIERS double _curand_normal_icdf_double(unsigned int x) +//{ +//#if __CUDA_ARCH__ > 0 || defined(HOST_HAVE_ERFCINVF) +// double s = CURAND_SQRT2_DOUBLE; +// // Mirror to avoid loss of precision +// if(x > 0x80000000UL) { +// x = 0xffffffffUL - x; +// s = -s; +// } +// double p = x * CURAND_2POW32_INV_DOUBLE + (CURAND_2POW32_INV_DOUBLE/2.0); +// // p is in (0, 0.5], 2p is in (0, 1] +// return s * erfcinv(2.0 * p); +//#else +// x++; +// return 0.0; +//#endif +//} +// +//QUALIFIERS double _curand_normal_icdf_double(unsigned long long x) +//{ +//#if __CUDA_ARCH__ > 0 || defined(HOST_HAVE_ERFCINVF) +// double s = CURAND_SQRT2_DOUBLE; +// x >>= 11; +// // Mirror to avoid loss of precision +// if(x > 0x10000000000000UL) { +// x = 0x1fffffffffffffUL - x; +// s = -s; +// } +// double p = x * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/2.0); +// // p is in (0, 0.5], 2p is in (0, 1] +// return s * erfcinv(2.0 * p); +//#else +// x++; +// return 0.0; +//#endif +//} +// + +/** + * \brief Return a normally distributed float from an XORWOW generator. + * + * Return a single normally distributed float with mean \p 0.0f and + * standard deviation \p 1.0f from the XORWOW generator in \p state, + * increment position of generator by one. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results, then returns them one at a time. + * See ::curand_normal2() for a more efficient version that returns + * both results at once. + * + * \param state - Pointer to state to update + * + * \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f + */ +QUALIFIERS float curand_normal(curandStateXORWOW_t *state) +{ + if(state->boxmuller_flag != EXTRA_FLAG_NORMAL) { + unsigned int x, y; + x = curand(state); + y = curand(state); + float2 v = _curand_box_muller(x, y); + state->boxmuller_extra = v.y; + state->boxmuller_flag = EXTRA_FLAG_NORMAL; + return v.x; + } + state->boxmuller_flag = 0; + return state->boxmuller_extra; +} + +/** + * \brief Return a normally distributed float from an Philox4_32_10 generator. + * + * Return a single normally distributed float with mean \p 0.0f and + * standard deviation \p 1.0f from the Philox4_32_10 generator in \p state, + * increment position of generator by one. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results, then returns them one at a time. + * See ::curand_normal2() for a more efficient version that returns + * both results at once. + * + * \param state - Pointer to state to update + * + * \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f + */ + +QUALIFIERS float curand_normal(curandStatePhilox4_32_10_t *state) +{ + if(state->boxmuller_flag != EXTRA_FLAG_NORMAL) { + unsigned int x, y; + x = curand(state); + y = curand(state); + float2 v = _curand_box_muller(x, y); + state->boxmuller_extra = v.y; + state->boxmuller_flag = EXTRA_FLAG_NORMAL; + return v.x; + } + state->boxmuller_flag = 0; + return state->boxmuller_extra; +} + + + +/** + * \brief Return a normally distributed float from an MRG32k3a generator. + * + * Return a single normally distributed float with mean \p 0.0f and + * standard deviation \p 1.0f from the MRG32k3a generator in \p state, + * increment position of generator by one. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results, then returns them one at a time. + * See ::curand_normal2() for a more efficient version that returns + * both results at once. + * + * \param state - Pointer to state to update + * + * \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f + */ +QUALIFIERS float curand_normal(curandStateMRG32k3a_t *state) +{ + if(state->boxmuller_flag != EXTRA_FLAG_NORMAL) { + float2 v = curand_box_muller_mrg(state); + state->boxmuller_extra = v.y; + state->boxmuller_flag = EXTRA_FLAG_NORMAL; + return v.x; + } + state->boxmuller_flag = 0; + return state->boxmuller_extra; +} + +/** + * \brief Return two normally distributed floats from an XORWOW generator. + * + * Return two normally distributed floats with mean \p 0.0f and + * standard deviation \p 1.0f from the XORWOW generator in \p state, + * increment position of generator by two. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results. + * + * \param state - Pointer to state to update + * + * \return Normally distributed float2 where each element is from a + * distribution with mean \p 0.0f and standard deviation \p 1.0f + */ +QUALIFIERS float2 curand_normal2(curandStateXORWOW_t *state) +{ + return curand_box_muller(state); +} +/** + * \brief Return two normally distributed floats from an Philox4_32_10 generator. + * + * Return two normally distributed floats with mean \p 0.0f and + * standard deviation \p 1.0f from the Philox4_32_10 generator in \p state, + * increment position of generator by two. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results. + * + * \param state - Pointer to state to update + * + * \return Normally distributed float2 where each element is from a + * distribution with mean \p 0.0f and standard deviation \p 1.0f + */ +QUALIFIERS float2 curand_normal2(curandStatePhilox4_32_10_t *state) +{ + return curand_box_muller(state); +} + +/** + * \brief Return four normally distributed floats from an Philox4_32_10 generator. + * + * Return four normally distributed floats with mean \p 0.0f and + * standard deviation \p 1.0f from the Philox4_32_10 generator in \p state, + * increment position of generator by four. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results. + * + * \param state - Pointer to state to update + * + * \return Normally distributed float2 where each element is from a + * distribution with mean \p 0.0f and standard deviation \p 1.0f + */ +QUALIFIERS float4 curand_normal4(curandStatePhilox4_32_10_t *state) +{ + return curand_box_muller4(state); +} + + + +/** + * \brief Return two normally distributed floats from an MRG32k3a generator. + * + * Return two normally distributed floats with mean \p 0.0f and + * standard deviation \p 1.0f from the MRG32k3a generator in \p state, + * increment position of generator by two. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results. + * + * \param state - Pointer to state to update + * + * \return Normally distributed float2 where each element is from a + * distribution with mean \p 0.0f and standard deviation \p 1.0f + */ +QUALIFIERS float2 curand_normal2(curandStateMRG32k3a_t *state) +{ + return curand_box_muller_mrg(state); +} + +/** + * \brief Return a normally distributed float from a MTGP32 generator. + * + * Return a single normally distributed float with mean \p 0.0f and + * standard deviation \p 1.0f from the MTGP32 generator in \p state, + * increment position of generator. + * + * The implementation uses the inverse cumulative distribution function + * to generate normally distributed results. + * + * \param state - Pointer to state to update + * + * \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f + */ +QUALIFIERS float curand_normal(curandStateMtgp32_t *state) +{ + return _curand_normal_icdf(curand(state)); +} +/** + * \brief Return a normally distributed float from a Sobol32 generator. + * + * Return a single normally distributed float with mean \p 0.0f and + * standard deviation \p 1.0f from the Sobol32 generator in \p state, + * increment position of generator by one. + * + * The implementation uses the inverse cumulative distribution function + * to generate normally distributed results. + * + * \param state - Pointer to state to update + * + * \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f + */ +QUALIFIERS float curand_normal(curandStateSobol32_t *state) +{ + return _curand_normal_icdf(curand(state)); +} + +/** + * \brief Return a normally distributed float from a scrambled Sobol32 generator. + * + * Return a single normally distributed float with mean \p 0.0f and + * standard deviation \p 1.0f from the scrambled Sobol32 generator in \p state, + * increment position of generator by one. + * + * The implementation uses the inverse cumulative distribution function + * to generate normally distributed results. + * + * \param state - Pointer to state to update + * + * \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f + */ +QUALIFIERS float curand_normal(curandStateScrambledSobol32_t *state) +{ + return _curand_normal_icdf(curand(state)); +} + +/** + * \brief Return a normally distributed float from a Sobol64 generator. + * + * Return a single normally distributed float with mean \p 0.0f and + * standard deviation \p 1.0f from the Sobol64 generator in \p state, + * increment position of generator by one. + * + * The implementation uses the inverse cumulative distribution function + * to generate normally distributed results. + * + * \param state - Pointer to state to update + * + * \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f + */ +QUALIFIERS float curand_normal(curandStateSobol64_t *state) +{ + return _curand_normal_icdf(curand(state)); +} + +/** + * \brief Return a normally distributed float from a scrambled Sobol64 generator. + * + * Return a single normally distributed float with mean \p 0.0f and + * standard deviation \p 1.0f from the scrambled Sobol64 generator in \p state, + * increment position of generator by one. + * + * The implementation uses the inverse cumulative distribution function + * to generate normally distributed results. + * + * \param state - Pointer to state to update + * + * \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f + */ +QUALIFIERS float curand_normal(curandStateScrambledSobol64_t *state) +{ + return _curand_normal_icdf(curand(state)); +} + +/** + * \brief Return a normally distributed double from an XORWOW generator. + * + * Return a single normally distributed double with mean \p 0.0 and + * standard deviation \p 1.0 from the XORWOW generator in \p state, + * increment position of generator. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results, then returns them one at a time. + * See ::curand_normal2_double() for a more efficient version that returns + * both results at once. + * + * \param state - Pointer to state to update + * + * \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0 + */ +QUALIFIERS double curand_normal_double(curandStateXORWOW_t *state) +{ + if(state->boxmuller_flag_double != EXTRA_FLAG_NORMAL) { + unsigned int x0, x1, y0, y1; + x0 = curand(state); + x1 = curand(state); + y0 = curand(state); + y1 = curand(state); + double2 v = _curand_box_muller_double(x0, x1, y0, y1); + state->boxmuller_extra_double = v.y; + state->boxmuller_flag_double = EXTRA_FLAG_NORMAL; + return v.x; + } + state->boxmuller_flag_double = 0; + return state->boxmuller_extra_double; +} + +/** + * \brief Return a normally distributed double from an Philox4_32_10 generator. + * + * Return a single normally distributed double with mean \p 0.0 and + * standard deviation \p 1.0 from the Philox4_32_10 generator in \p state, + * increment position of generator. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results, then returns them one at a time. + * See ::curand_normal2_double() for a more efficient version that returns + * both results at once. + * + * \param state - Pointer to state to update + * + * \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0 + */ + +QUALIFIERS double curand_normal_double(curandStatePhilox4_32_10_t *state) +{ + if(state->boxmuller_flag_double != EXTRA_FLAG_NORMAL) { + uint4 _x; + _x = curand4(state); + double2 v = _curand_box_muller_double(_x.x, _x.y, _x.z, _x.w); + state->boxmuller_extra_double = v.y; + state->boxmuller_flag_double = EXTRA_FLAG_NORMAL; + return v.x; + } + state->boxmuller_flag_double = 0; + return state->boxmuller_extra_double; +} + + +/** + * \brief Return a normally distributed double from an MRG32k3a generator. + * + * Return a single normally distributed double with mean \p 0.0 and + * standard deviation \p 1.0 from the XORWOW generator in \p state, + * increment position of generator. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results, then returns them one at a time. + * See ::curand_normal2_double() for a more efficient version that returns + * both results at once. + * + * \param state - Pointer to state to update + * + * \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0 + */ +QUALIFIERS double curand_normal_double(curandStateMRG32k3a_t *state) +{ + if(state->boxmuller_flag_double != EXTRA_FLAG_NORMAL) { + double2 v = curand_box_muller_mrg_double(state); + state->boxmuller_extra_double = v.y; + state->boxmuller_flag_double = EXTRA_FLAG_NORMAL; + return v.x; + } + state->boxmuller_flag_double = 0; + return state->boxmuller_extra_double; +} + +/** + * \brief Return two normally distributed doubles from an XORWOW generator. + * + * Return two normally distributed doubles with mean \p 0.0 and + * standard deviation \p 1.0 from the XORWOW generator in \p state, + * increment position of generator by 2. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results. + * + * \param state - Pointer to state to update + * + * \return Normally distributed double2 where each element is from a + * distribution with mean \p 0.0 and standard deviation \p 1.0 + */ +QUALIFIERS double2 curand_normal2_double(curandStateXORWOW_t *state) +{ + return curand_box_muller_double(state); +} + +/** + * \brief Return two normally distributed doubles from an Philox4_32_10 generator. + * + * Return two normally distributed doubles with mean \p 0.0 and + * standard deviation \p 1.0 from the Philox4_32_10 generator in \p state, + * increment position of generator by 2. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results. + * + * \param state - Pointer to state to update + * + * \return Normally distributed double2 where each element is from a + * distribution with mean \p 0.0 and standard deviation \p 1.0 + */ +QUALIFIERS double2 curand_normal2_double(curandStatePhilox4_32_10_t *state) +{ + uint4 _x; + double2 result; + + _x = curand4(state); + double2 v1 = _curand_box_muller_double(_x.x, _x.y, _x.z, _x.w); + result.x = v1.x; + result.y = v1.y; + + return result; +} + + // not a part of API +QUALIFIERS double4 curand_normal4_double(curandStatePhilox4_32_10_t *state) +{ + uint4 _x; + uint4 _y; + double4 result; + + _x = curand4(state); + _y = curand4(state); + double2 v1 = _curand_box_muller_double(_x.x, _x.y, _x.z, _x.w); + double2 v2 = _curand_box_muller_double(_y.x, _y.y, _y.z, _y.w); + result.x = v1.x; + result.y = v1.y; + result.z = v2.x; + result.w = v2.y; + + return result; +} + + +/** + * \brief Return two normally distributed doubles from an MRG32k3a generator. + * + * Return two normally distributed doubles with mean \p 0.0 and + * standard deviation \p 1.0 from the MRG32k3a generator in \p state, + * increment position of generator. + * + * The implementation uses a Box-Muller transform to generate two + * normally distributed results. + * + * \param state - Pointer to state to update + * + * \return Normally distributed double2 where each element is from a + * distribution with mean \p 0.0 and standard deviation \p 1.0 + */ +QUALIFIERS double2 curand_normal2_double(curandStateMRG32k3a_t *state) +{ + return curand_box_muller_mrg_double(state); +} + +/** + * \brief Return a normally distributed double from an MTGP32 generator. + * + * Return a single normally distributed double with mean \p 0.0 and + * standard deviation \p 1.0 from the MTGP32 generator in \p state, + * increment position of generator. + * + * The implementation uses the inverse cumulative distribution function + * to generate normally distributed results. + * + * \param state - Pointer to state to update + * + * \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0 + */ +QUALIFIERS double curand_normal_double(curandStateMtgp32_t *state) +{ + return _curand_normal_icdf_double(curand(state)); +} + +/** + * \brief Return a normally distributed double from an Sobol32 generator. + * + * Return a single normally distributed double with mean \p 0.0 and + * standard deviation \p 1.0 from the Sobol32 generator in \p state, + * increment position of generator by one. + * + * The implementation uses the inverse cumulative distribution function + * to generate normally distributed results. + * + * \param state - Pointer to state to update + * + * \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0 + */ +QUALIFIERS double curand_normal_double(curandStateSobol32_t *state) +{ + return _curand_normal_icdf_double(curand(state)); +} + +/** + * \brief Return a normally distributed double from a scrambled Sobol32 generator. + * + * Return a single normally distributed double with mean \p 0.0 and + * standard deviation \p 1.0 from the scrambled Sobol32 generator in \p state, + * increment position of generator by one. + * + * The implementation uses the inverse cumulative distribution function + * to generate normally distributed results. + * + * \param state - Pointer to state to update + * + * \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0 + */ +QUALIFIERS double curand_normal_double(curandStateScrambledSobol32_t *state) +{ + return _curand_normal_icdf_double(curand(state)); +} + +/** + * \brief Return a normally distributed double from a Sobol64 generator. + * + * Return a single normally distributed double with mean \p 0.0 and + * standard deviation \p 1.0 from the Sobol64 generator in \p state, + * increment position of generator by one. + * + * The implementation uses the inverse cumulative distribution function + * to generate normally distributed results. + * + * \param state - Pointer to state to update + * + * \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0 + */ +QUALIFIERS double curand_normal_double(curandStateSobol64_t *state) +{ + return _curand_normal_icdf_double(curand(state)); +} + +/** + * \brief Return a normally distributed double from a scrambled Sobol64 generator. + * + * Return a single normally distributed double with mean \p 0.0 and + * standard deviation \p 1.0 from the scrambled Sobol64 generator in \p state, + * increment position of generator by one. + * + * The implementation uses the inverse cumulative distribution function + * to generate normally distributed results. + * + * \param state - Pointer to state to update + * + * \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0 + */ +QUALIFIERS double curand_normal_double(curandStateScrambledSobol64_t *state) +{ + return _curand_normal_icdf_double(curand(state)); +} +#endif // !defined(CURAND_NORMAL_H_) diff --git a/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_normal_static.h b/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_normal_static.h new file mode 100644 index 0000000000000000000000000000000000000000..f731101c22504b9cb68c1c5694d6087bc66df18a --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_normal_static.h @@ -0,0 +1,134 @@ + /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ +#ifndef CURAND_NORMAL_STATIC_H +#define CURAND_NORMAL_STATIC_H + +#define QUALIFIERS_STATIC __host__ __device__ __forceinline__ + +#include +#if defined(HOST_HAVE_ERFCINVF) + #define IF_DEVICE_OR_HAVE_ERFCINVF(t, f) _NV_BLOCK_EXPAND(t) +#else + #define IF_DEVICE_OR_HAVE_ERFCINVF(t, f) NV_IF_ELSE_TARGET(NV_IS_DEVICE, t, f) +#endif + +QUALIFIERS_STATIC float _curand_normal_icdf(unsigned int x) +{ +IF_DEVICE_OR_HAVE_ERFCINVF( + float s = CURAND_SQRT2; + // Mirror to avoid loss of precision + if(x > 0x80000000UL) { + x = 0xffffffffUL - x; + s = -s; + } + float p = x * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f); + // p is in (0, 0.5], 2p is in (0, 1] + return s * erfcinvf(2.0f * p); +, + x++; //suppress warnings + return 0.0f; +) +} + +QUALIFIERS_STATIC float _curand_normal_icdf(unsigned long long x) +{ +IF_DEVICE_OR_HAVE_ERFCINVF( + unsigned int t = (unsigned int)(x >> 32); + float s = CURAND_SQRT2; + // Mirror to avoid loss of precision + if(t > 0x80000000UL) { + t = 0xffffffffUL - t; + s = -s; + } + float p = t * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f); + // p is in (0 - 0.5] 2p is in (0 - 1] + return s * erfcinvf(2.0f * p); +, + x++; + return 0.0f; +) +} + +QUALIFIERS_STATIC double _curand_normal_icdf_double(unsigned int x) +{ +IF_DEVICE_OR_HAVE_ERFCINVF( + double s = CURAND_SQRT2_DOUBLE; + // Mirror to avoid loss of precision + if(x > 0x80000000UL) { + x = 0xffffffffUL - x; + s = -s; + } + double p = x * CURAND_2POW32_INV_DOUBLE + (CURAND_2POW32_INV_DOUBLE/2.0); + // p is in (0 - 0.5] 2p is in (0 - 1] + return s * erfcinv(2.0 * p); +, + x++; + return 0.0; +) +} + +QUALIFIERS_STATIC double _curand_normal_icdf_double(unsigned long long x) +{ +IF_DEVICE_OR_HAVE_ERFCINVF( + double s = CURAND_SQRT2_DOUBLE; + x >>= 11; + // Mirror to avoid loss of precision + if(x > 0x10000000000000UL) { + x = 0x1fffffffffffffUL - x; + s = -s; + } + double p = x * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/2.0); + // p is in (0 - 0.5] 2p is in (0 - 1] + return s * erfcinv(2.0 * p); +, + x++; + return 0.0; +) +} +#undef QUALIFIERS_STATIC +#endif diff --git a/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_poisson.h b/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_poisson.h new file mode 100644 index 0000000000000000000000000000000000000000..7881194cb868be14a197b581d7c82bf9dd16d617 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_poisson.h @@ -0,0 +1,763 @@ + + /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + + +#if !defined(CURAND_POISSON_H_) +#define CURAND_POISSON_H_ + +/** + * \defgroup DEVICE Device API + * + * @{ + */ + +#ifndef __CUDACC_RTC__ +#include +#endif // __CUDACC_RTC__ + +#include + +#include "curand_mrg32k3a.h" +#include "curand_mtgp32_kernel.h" +#include "curand_philox4x32_x.h" + +#define CR_CUDART_PI 3.1415926535897931e+0 +#define CR_CUDART_TWO_TO_52 4503599627370496.0 + + +QUALIFIERS float __cr_rsqrt(float a) +{ +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm ("rsqrt.approx.f32.ftz %0, %1;" : "=f"(a) : "f"(a)); +, + a = 1.0f / sqrtf (a); +) + return a; +} + +QUALIFIERS float __cr_exp (float a) +{ +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + a = a * 1.4426950408889634074; + asm ("ex2.approx.f32.ftz %0, %1;" : "=f"(a) : "f"(a)); +, + a = expf (a); +) + return a; +} + +QUALIFIERS float __cr_log (float a) +{ +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm ("lg2.approx.f32.ftz %0, %1;" : "=f"(a) : "f"(a)); + a = a * 0.69314718055994530942; +, + a = logf (a); +) + return a; +} + +QUALIFIERS float __cr_rcp (float a) +{ +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm ("rcp.approx.f32.ftz %0, %1;" : "=f"(a) : "f"(a)); +, + a = 1.0f / a; +) + return a; +} + +/* Computes regularized gamma function: gammainc(a,x)/gamma(a) */ +QUALIFIERS float __cr_pgammainc (float a, float x) +{ + float t, alpha, beta; + + /* First level parametrization constants */ + float ma1 = 1.43248035075540910f, + ma2 = 0.12400979329415655f, + ma3 = 0.00025361074907033f, + mb1 = 0.21096734870196546f, + mb2 = 1.97381164089999420f, + mb3 = 0.94201734077887530f; + + /* Second level parametrization constants (depends only on a) */ + + alpha = __cr_rsqrt (a - ma2); + alpha = ma1 * alpha + ma3; + beta = __cr_rsqrt (a - mb2); + beta = mb1 * beta + mb3; + + /* Final approximation (depends on a and x) */ + + t = a - x; + t = alpha * t - beta; + t = 1.0f + __cr_exp (t); + t = t * t; + t = __cr_rcp (t); + + /* Negative a,x or a,x=NAN requires special handling */ + //t = !(x > 0 && a >= 0) ? 0.0 : t; + + return t; +} + +/* Computes inverse of pgammainc */ +QUALIFIERS float __cr_pgammaincinv (float a, float y) +{ + float t, alpha, beta; + + /* First level parametrization constants */ + + float ma1 = 1.43248035075540910f, + ma2 = 0.12400979329415655f, + ma3 = 0.00025361074907033f, + mb1 = 0.21096734870196546f, + mb2 = 1.97381164089999420f, + mb3 = 0.94201734077887530f; + + /* Second level parametrization constants (depends only on a) */ + + alpha = __cr_rsqrt (a - ma2); + alpha = ma1 * alpha + ma3; + beta = __cr_rsqrt (a - mb2); + beta = mb1 * beta + mb3; + + /* Final approximation (depends on a and y) */ + + t = __cr_rsqrt (y) - 1.0f; + t = __cr_log (t); + t = beta + t; + t = - t * __cr_rcp (alpha) + a; + /* Negative a,x or a,x=NAN requires special handling */ + //t = !(y > 0 && a >= 0) ? 0.0 : t; + return t; +} + +#if defined(__CUDACC_RDC__) && (__cplusplus >= 201703L) && defined(__cpp_inline_variables) +inline __constant__ double __cr_lgamma_table [] = { +#else +static __constant__ double __cr_lgamma_table [] = { +#endif + 0.000000000000000000e-1, + 0.000000000000000000e-1, + 6.931471805599453094e-1, + 1.791759469228055001e0, + 3.178053830347945620e0, + 4.787491742782045994e0, + 6.579251212010100995e0, + 8.525161361065414300e0, + 1.060460290274525023e1 +}; + + +QUALIFIERS double __cr_lgamma_integer(int a) +{ + double s; + double t; + double fa = fabs((float)a); + double sum; + + if (a > 8) { + /* Stirling approximation; coefficients from Hart et al, "Computer + * Approximations", Wiley 1968. Approximation 5404. + */ + s = 1.0 / fa; + t = s * s; + sum = -0.1633436431e-2; + sum = sum * t + 0.83645878922e-3; + sum = sum * t - 0.5951896861197e-3; + sum = sum * t + 0.793650576493454e-3; + sum = sum * t - 0.277777777735865004e-2; + sum = sum * t + 0.833333333333331018375e-1; + sum = sum * s + 0.918938533204672; + s = 0.5 * log (fa); + t = fa - 0.5; + s = s * t; + t = s - fa; + s = s + sum; + t = t + s; + return t; + } else { +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + return __cr_lgamma_table [(int) fa-1]; +, + switch(a) { + case 1: return 0.000000000000000000e-1; + case 2: return 0.000000000000000000e-1; + case 3: return 6.931471805599453094e-1; + case 4: return 1.791759469228055001e0; + case 5: return 3.178053830347945620e0; + case 6: return 4.787491742782045994e0; + case 7: return 6.579251212010100995e0; + case 8: return 8.525161361065414300e0; + default: return 1.060460290274525023e1; + } +) + } +} + +#define KNUTH_FLOAT_CONST 60.0 +template +// Donald E. Knuth Seminumerical Algorithms. The Art of Computer Programming, Volume 2 +QUALIFIERS unsigned int curand_poisson_knuth(T *state, float lambda) +{ + unsigned int k = 0; + float p = expf(lambda); + do{ + k++; + p *= curand_uniform(state); + }while (p > 1.0); + return k-1; +} + +template +// Donald E. Knuth Seminumerical Algorithms. The Art of Computer Programming, Volume 2 +QUALIFIERS uint4 curand_poisson_knuth4(T *state, float lambda) +{ + uint4 k = {0,0,0,0}; + float exp_lambda = expf(lambda); + float4 p={ exp_lambda,exp_lambda,exp_lambda,exp_lambda }; + do{ + k.x++; + p.x *= curand_uniform(state); + }while (p.x > 1.0); + do{ + k.y++; + p.y *= curand_uniform(state); + }while (p.y > 1.0); + do{ + k.z++; + p.z *= curand_uniform(state); + }while (p.z > 1.0); + do{ + k.w++; + p.w *= curand_uniform(state); + }while (p.w > 1.0); + + k.x--; + k.y--; + k.z--; + k.w--; + return k; +} + +template +// Marsaglia, Tsang, Wang Journal of Statistical Software, square histogram. +QUALIFIERS unsigned int _curand_M2_double(T x, curandDistributionM2Shift_t distributionM2) +{ + double u = _curand_uniform_double(x); + int j = (int) floor(distributionM2->length*u); + + double histogramVj; + unsigned int histogramKj; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_35, + histogramVj = __ldg( &(distributionM2->histogram->V[j])); + histogramKj = __ldg( &(distributionM2->histogram->K[j])); +, + histogramVj = distributionM2->histogram->V[j]; + histogramKj = distributionM2->histogram->K[j]; +) + //if (u < distributionM2->histogram->V[j]) return distributionM2->shift + j; + //return distributionM2->shift + distributionM2->histogram->K[j]; + if (u < histogramVj) return distributionM2->shift + j; + return distributionM2->shift + histogramKj; +} + +template +// Marsaglia, Tsang, Wang Journal of Statistical Software, square histogram. +QUALIFIERS uint4 _curand_M2_double4(T x, curandDistributionM2Shift_t distributionM2) +{ + double4 u; + uint4 result = {0,0,0,0}; + int4 flag = {1,1,1,1}; + + u.x = _curand_uniform_double(x.x); + u.y = _curand_uniform_double(x.y); + u.z = _curand_uniform_double(x.z); + u.w = _curand_uniform_double(x.w); + + int4 j; + j.x = (int) floor(distributionM2->length*u.x); + j.y = (int) floor(distributionM2->length*u.y); + j.z = (int) floor(distributionM2->length*u.z); + j.w = (int) floor(distributionM2->length*u.w); +// int result; + + double histogramVjx; + double histogramVjy; + double histogramVjz; + double histogramVjw; + unsigned int histogramKjx; + unsigned int histogramKjy; + unsigned int histogramKjz; + unsigned int histogramKjw; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_35, + histogramVjx = __ldg( &(distributionM2->histogram->V[j.x])); + histogramVjy = __ldg( &(distributionM2->histogram->V[j.y])); + histogramVjz = __ldg( &(distributionM2->histogram->V[j.z])); + histogramVjw = __ldg( &(distributionM2->histogram->V[j.w])); + + histogramKjx = __ldg( &(distributionM2->histogram->K[j.x])); + histogramKjy = __ldg( &(distributionM2->histogram->K[j.y])); + histogramKjz = __ldg( &(distributionM2->histogram->K[j.z])); + histogramKjw = __ldg( &(distributionM2->histogram->K[j.w])); +, + histogramVjx = distributionM2->histogram->V[j.x]; + histogramVjy = distributionM2->histogram->V[j.y]; + histogramVjz = distributionM2->histogram->V[j.z]; + histogramVjw = distributionM2->histogram->V[j.w]; + + histogramKjx = distributionM2->histogram->K[j.x]; + histogramKjy = distributionM2->histogram->K[j.y]; + histogramKjz = distributionM2->histogram->K[j.z]; + histogramKjw = distributionM2->histogram->K[j.w]; +) + + if (u.x < histogramVjx){ result.x = distributionM2->shift + j.x; flag.x = 0; } + if (u.y < histogramVjy){ result.y = distributionM2->shift + j.y; flag.y = 0; } + if (u.z < histogramVjz){ result.z = distributionM2->shift + j.z; flag.z = 0; } + if (u.w < histogramVjw){ result.w = distributionM2->shift + j.w; flag.w = 0; } + //return distributionM2->shift + distributionM2->histogram->K[j]; + + if(flag.x) result.x = distributionM2->shift + histogramKjx; + if(flag.y) result.y = distributionM2->shift + histogramKjy; + if(flag.z) result.z = distributionM2->shift + histogramKjz; + if(flag.w) result.w = distributionM2->shift + histogramKjw; + + return result; +} + +template +QUALIFIERS unsigned int curand_M2_double(STATE *state, curandDistributionM2Shift_t distributionM2) +{ + return _curand_M2_double(curand(state), distributionM2); +} + +template +QUALIFIERS uint4 curand_M2_double4(STATE *state, curandDistributionM2Shift_t distributionM2) +{ + return _curand_M2_double4(curand4(state), distributionM2); +} + + +template +QUALIFIERS unsigned int _curand_binary_search_double(T x, curandDistributionShift_t distribution) +{ + double u = _curand_uniform_double(x); + int min = 0; + int max = distribution->length-1; + do{ + int mid = (max + min)/2; + double probability_mid; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_35, + probability_mid = __ldg( &(distribution->probability[mid])); +, + probability_mid = distribution->probability[mid]; +) + if (u <= probability_mid){ + max = mid; + }else{ + min = mid+1; + } + }while (min < max); + return distribution->shift + min; +} + +template +QUALIFIERS unsigned int curand_binary_search_double(STATE *state, curandDistributionShift_t distribution) +{ + return _curand_binary_search_double(curand(state), distribution); +} + +// Generates uniformly distributed double values in range (0.0; 1.0) from uniformly distributed +// unsigned int. We can't use standard _curand_uniform_double since it can generate 1.0. +// This is required only for _curand_poisson_ITR_double. +QUALIFIERS double _curand_uniform_double_excluding_one(unsigned int x) +{ + return x * CURAND_2POW32_INV_DOUBLE + (CURAND_2POW32_INV_DOUBLE/2.0); +} + +// Overload for unsigned long long. +// This is required only for _curand_poisson_ITR_double. +QUALIFIERS double _curand_uniform_double_excluding_one(unsigned long long x) +{ + return (x >> 11) * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/4.0); +} + +#define MAGIC_DOUBLE_CONST 500.0 +template +//George S. Fishman Discrete-event simulation: modeling, programming, and analysis +QUALIFIERS unsigned int _curand_poisson_ITR_double(T x, double lambda) +{ + double L,p = 1.0; + double q = 1.0; + unsigned int k = 0; + int pow=0; + // This algorithm requires u to be in (0;1) range, however, _curand_uniform_double + // returns a number in range (0;1]. If u is 1.0 the inner loop never ends. The + // following operation transforms the range from (0;1] to (0;1). + double u = _curand_uniform_double_excluding_one(x); + do{ + if (lambda > (double)(pow+MAGIC_DOUBLE_CONST)){ + L = exp(-MAGIC_DOUBLE_CONST); + }else{ + L = exp((double)(pow - lambda)); + } + p *= L; + q *= L; + pow += (int) MAGIC_DOUBLE_CONST; + while (u > q){ + k++; + p *= ((double)lambda / (double) k); + q += p; + } + }while((double)pow < lambda); + return k; +} + +template +/* Rejection Method for Poisson distribution based on gammainc approximation */ +QUALIFIERS unsigned int curand_poisson_gammainc(T state, float lambda){ + float y, x, t, z,v; + float logl = __cr_log (lambda); + while (true) { + y = curand_uniform (state); + x = __cr_pgammaincinv (lambda, y); + x = floorf (x); + z = curand_uniform (state); + v = (__cr_pgammainc (lambda, x + 1.0f) - __cr_pgammainc (lambda, x)) * 1.3f; + z = z*v; + t = (float)__cr_exp (-lambda + x * logl - (float)__cr_lgamma_integer ((int)(1.0f + x))); + if ((z < t) && (v>=1e-20)) + break; + } + return (unsigned int)x; +} + +template +/* Rejection Method for Poisson distribution based on gammainc approximation */ +QUALIFIERS uint4 curand_poisson_gammainc4(T state, float lambda){ + uint4 result; + float y, x, t, z,v; + float logl = __cr_log (lambda); + while (true) { + y = curand_uniform(state); + x = __cr_pgammaincinv (lambda, y); + x = floorf (x); + z = curand_uniform (state); + v = (__cr_pgammainc (lambda, x + 1.0f) - __cr_pgammainc (lambda, x)) * 1.3f; + z = z*v; + t = (float)__cr_exp (-lambda + x * logl - (float)__cr_lgamma_integer ((int)(1.0f + x))); + if ((z < t) && (v>=1e-20)) + break; + } + result.x = (unsigned int)x; + + while (true) { + y = curand_uniform(state); + x = __cr_pgammaincinv (lambda, y); + x = floorf (x); + z = curand_uniform (state); + v = (__cr_pgammainc (lambda, x + 1.0f) - __cr_pgammainc (lambda, x)) * 1.3f; + z = z*v; + t = (float)__cr_exp (-lambda + x * logl - (float)__cr_lgamma_integer ((int)(1.0f + x))); + if ((z < t) && (v>=1e-20)) + break; + } + result.y = (unsigned int)x; + + while (true) { + y = curand_uniform(state); + x = __cr_pgammaincinv (lambda, y); + x = floorf (x); + z = curand_uniform (state); + v = (__cr_pgammainc (lambda, x + 1.0f) - __cr_pgammainc (lambda, x)) * 1.3f; + z = z*v; + t = (float)__cr_exp (-lambda + x * logl - (float)__cr_lgamma_integer ((int)(1.0f + x))); + if ((z < t) && (v>=1e-20)) + break; + } + result.z = (unsigned int)x; + + while (true) { + y = curand_uniform(state); + x = __cr_pgammaincinv (lambda, y); + x = floorf (x); + z = curand_uniform (state); + v = (__cr_pgammainc (lambda, x + 1.0f) - __cr_pgammainc (lambda, x)) * 1.3f; + z = z*v; + t = (float)__cr_exp (-lambda + x * logl - (float)__cr_lgamma_integer ((int)(1.0f + x))); + if ((z < t) && (v>=1e-20)) + break; + } + result.w = (unsigned int)x; + + return result; +} +// Note below that the round to nearest integer, where needed,is done in line with code that +// assumes the range of values is < 2**32 + +template +QUALIFIERS unsigned int _curand_poisson(T x, double lambda) +{ + if (lambda < 1000) + return _curand_poisson_ITR_double(x, lambda); + return (unsigned int)((sqrt(lambda) * _curand_normal_icdf_double(x)) + lambda + 0.5); //Round to nearest +} + +template +QUALIFIERS unsigned int _curand_poisson_from_normal(T x, double lambda) +{ + return (unsigned int)((sqrt(lambda) * _curand_normal_icdf(x)) + lambda + 0.5); //Round to nearest +} + +template +QUALIFIERS unsigned int curand_poisson_from_normal(STATE state, double lambda) +{ + return (unsigned int)((sqrt(lambda) * curand_normal(state)) + lambda + 0.5); //Round to nearest +} + +template +QUALIFIERS uint4 curand_poisson_from_normal4(STATE state, double lambda) +{ + uint4 result; + float4 _res; + + _res = curand_normal4(state); + + result.x = (unsigned int)((sqrt(lambda) * _res.x) + lambda + 0.5); //Round to nearest + result.y = (unsigned int)((sqrt(lambda) * _res.y) + lambda + 0.5); //Round to nearest + result.z = (unsigned int)((sqrt(lambda) * _res.z) + lambda + 0.5); //Round to nearest + result.w = (unsigned int)((sqrt(lambda) * _res.w) + lambda + 0.5); //Round to nearest + return result; //Round to nearest +} + +/** + * \brief Return a Poisson-distributed unsigned int from a XORWOW generator. + * + * Return a single unsigned int from a Poisson + * distribution with lambda \p lambda from the XORWOW generator in \p state, + * increment the position of the generator by a variable amount, depending + * on the algorithm used. + * + * \param state - Pointer to state to update + * \param lambda - Lambda of the Poisson distribution + * + * \return Poisson-distributed unsigned int with lambda \p lambda + */ +QUALIFIERS unsigned int curand_poisson(curandStateXORWOW_t *state, double lambda) +{ + if (lambda < 64) + return curand_poisson_knuth(state, (float)lambda); + if (lambda > 4000) + return (unsigned int)((sqrt(lambda) * curand_normal_double(state)) + lambda + 0.5); //Round to nearest + return curand_poisson_gammainc(state, (float)lambda); +} + +/** + * \brief Return a Poisson-distributed unsigned int from a Philox4_32_10 generator. + * + * Return a single unsigned int from a Poisson + * distribution with lambda \p lambda from the Philox4_32_10 generator in \p state, + * increment the position of the generator by a variable amount, depending + * on the algorithm used. + * + * \param state - Pointer to state to update + * \param lambda - Lambda of the Poisson distribution + * + * \return Poisson-distributed unsigned int with lambda \p lambda + */ +QUALIFIERS unsigned int curand_poisson(curandStatePhilox4_32_10_t *state, double lambda) +{ + if (lambda < 64) + return curand_poisson_knuth(state, (float)lambda); + if (lambda > 4000) + return (unsigned int)((sqrt(lambda) * curand_normal_double(state)) + lambda + 0.5); //Round to nearest + return curand_poisson_gammainc(state, (float)lambda); +} +/** + * \brief Return four Poisson-distributed unsigned ints from a Philox4_32_10 generator. + * + * Return a four unsigned ints from a Poisson + * distribution with lambda \p lambda from the Philox4_32_10 generator in \p state, + * increment the position of the generator by a variable amount, depending + * on the algorithm used. + * + * \param state - Pointer to state to update + * \param lambda - Lambda of the Poisson distribution + * + * \return Poisson-distributed unsigned int with lambda \p lambda + */ +QUALIFIERS uint4 curand_poisson4(curandStatePhilox4_32_10_t *state, double lambda) +{ + uint4 result; + double4 _res; + if (lambda < 64) + return curand_poisson_knuth4(state, (float)lambda); + if (lambda > 4000) { + _res = curand_normal4_double(state); + result.x = (unsigned int)((sqrt(lambda) * _res.x) + lambda + 0.5); //Round to nearest + result.y = (unsigned int)((sqrt(lambda) * _res.y) + lambda + 0.5); //Round to nearest + result.z = (unsigned int)((sqrt(lambda) * _res.z) + lambda + 0.5); //Round to nearest + result.w = (unsigned int)((sqrt(lambda) * _res.w) + lambda + 0.5); //Round to nearest + return result; + } + return curand_poisson_gammainc4(state, (float)lambda); +} + + + +/** + * \brief Return a Poisson-distributed unsigned int from a MRG32k3A generator. + * + * Return a single unsigned int from a Poisson + * distribution with lambda \p lambda from the MRG32k3a generator in \p state, + * increment the position of the generator by a variable amount, depending + * on the algorithm used. + * + * \param state - Pointer to state to update + * \param lambda - Lambda of the Poisson distribution + * + * \return Poisson-distributed unsigned int with lambda \p lambda + */ +QUALIFIERS unsigned int curand_poisson(curandStateMRG32k3a_t *state, double lambda) +{ + if (lambda < 64) + return curand_poisson_knuth(state, (float)lambda); + if (lambda > 4000) + return (unsigned int)((sqrt(lambda) * curand_normal_double(state)) + lambda + 0.5); //Round to nearest + return curand_poisson_gammainc(state, (float)lambda); +} + +/** + * \brief Return a Poisson-distributed unsigned int from a MTGP32 generator. + * + * Return a single int from a Poisson + * distribution with lambda \p lambda from the MTGP32 generator in \p state, + * increment the position of the generator by one. + * + * \param state - Pointer to state to update + * \param lambda - Lambda of the Poisson distribution + * + * \return Poisson-distributed unsigned int with lambda \p lambda + */ +QUALIFIERS unsigned int curand_poisson(curandStateMtgp32_t *state, double lambda) +{ + return _curand_poisson(curand(state), lambda); +} + +/** + * \brief Return a Poisson-distributed unsigned int from a Sobol32 generator. + * + * Return a single unsigned int from a Poisson + * distribution with lambda \p lambda from the Sobol32 generator in \p state, + * increment the position of the generator by one. + * + * \param state - Pointer to state to update + * \param lambda - Lambda of the Poisson distribution + * + * \return Poisson-distributed unsigned int with lambda \p lambda + */ + +QUALIFIERS unsigned int curand_poisson(curandStateSobol32_t *state, double lambda) +{ + return _curand_poisson(curand(state), lambda); +} + +/** + * \brief Return a Poisson-distributed unsigned int from a scrambled Sobol32 generator. + * + * Return a single unsigned int from a Poisson + * distribution with lambda \p lambda from the scrambled Sobol32 generator in \p state, + * increment the position of the generator by one. + * + * \param state - Pointer to state to update + * \param lambda - Lambda of the Poisson distribution + * + * \return Poisson-distributed unsigned int with lambda \p lambda + */ +QUALIFIERS unsigned int curand_poisson(curandStateScrambledSobol32_t *state, double lambda) +{ + return _curand_poisson(curand(state), lambda); +} + +/** + * \brief Return a Poisson-distributed unsigned int from a Sobol64 generator. + * + * Return a single unsigned int from a Poisson + * distribution with lambda \p lambda from the Sobol64 generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * \param lambda - Lambda of the Poisson distribution + * + * \return Poisson-distributed unsigned int with lambda \p lambda + */ +QUALIFIERS unsigned int curand_poisson(curandStateSobol64_t *state, double lambda) +{ + return _curand_poisson(curand(state), lambda); +} + +/** + * \brief Return a Poisson-distributed unsigned int from a scrambled Sobol64 generator. + * + * Return a single unsigned int from a Poisson + * distribution with lambda \p lambda from the scrambled Sobol64 generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * \param lambda - Lambda of the Poisson distribution + * + * \return Poisson-distributed unsigned int with lambda \p lambda + */ +QUALIFIERS unsigned int curand_poisson(curandStateScrambledSobol64_t *state, double lambda) +{ + return _curand_poisson(curand(state), lambda); +} +#endif // !defined(CURAND_POISSON_H_) diff --git a/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_uniform.h b/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_uniform.h new file mode 100644 index 0000000000000000000000000000000000000000..7a4af8afa328c186d9ea33a8c8226e19aba4793e --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/curand/include/curand_uniform.h @@ -0,0 +1,498 @@ + + /* Copyright 2010-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + + +#if !defined(CURAND_UNIFORM_H_) +#define CURAND_UNIFORM_H_ + +/** + * \defgroup DEVICE Device API + * + * @{ + */ + +#ifndef __CUDACC_RTC__ +#include +#endif // __CUDACC_RTC__ + +#include "curand_mrg32k3a.h" +#include "curand_mtgp32_kernel.h" +#include "curand_philox4x32_x.h" + + +QUALIFIERS float _curand_uniform(unsigned int x) +{ + return x * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f); +} + +QUALIFIERS float4 _curand_uniform4(uint4 x) +{ + float4 y; + y.x = x.x * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f); + y.y = x.y * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f); + y.z = x.z * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f); + y.w = x.w * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f); + return y; +} + +QUALIFIERS float _curand_uniform(unsigned long long x) +{ + unsigned int t; + t = (unsigned int)(x >> 32); + return t * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f); +} + +QUALIFIERS double _curand_uniform_double(unsigned int x) +{ + return x * CURAND_2POW32_INV_DOUBLE + CURAND_2POW32_INV_DOUBLE; +} + +QUALIFIERS double _curand_uniform_double(unsigned long long x) +{ + return (x >> 11) * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/2.0); +} + +QUALIFIERS double _curand_uniform_double_hq(unsigned int x, unsigned int y) +{ + unsigned long long z = (unsigned long long)x ^ + ((unsigned long long)y << (53 - 32)); + return z * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/2.0); +} + +QUALIFIERS float curand_uniform(curandStateTest_t *state) +{ + return _curand_uniform(curand(state)); +} + +QUALIFIERS double curand_uniform_double(curandStateTest_t *state) +{ + return _curand_uniform_double(curand(state)); +} + +/** + * \brief Return a uniformly distributed float from an XORWOW generator. + * + * Return a uniformly distributed float between \p 0.0f and \p 1.0f + * from the XORWOW generator in \p state, increment position of generator. + * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating + * point outputs are never returned. + * + * The implementation may use any number of calls to \p curand() to + * get enough random bits to create the return value. The current + * implementation uses one call. + * + * \param state - Pointer to state to update + * + * \return uniformly distributed float between \p 0.0f and \p 1.0f + */ +QUALIFIERS float curand_uniform(curandStateXORWOW_t *state) +{ + return _curand_uniform(curand(state)); +} + +/** + * \brief Return a uniformly distributed double from an XORWOW generator. + * + * Return a uniformly distributed double between \p 0.0 and \p 1.0 + * from the XORWOW generator in \p state, increment position of generator. + * Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating + * point outputs are never returned. + * + * The implementation may use any number of calls to \p curand() to + * get enough random bits to create the return value. The current + * implementation uses exactly two calls. + * + * \param state - Pointer to state to update + * + * \return uniformly distributed double between \p 0.0 and \p 1.0 + */ +QUALIFIERS double curand_uniform_double(curandStateXORWOW_t *state) +{ + unsigned int x, y; + x = curand(state); + y = curand(state); + return _curand_uniform_double_hq(x, y); +} +/** + * \brief Return a uniformly distributed float from an MRG32k3a generator. + * + * Return a uniformly distributed float between \p 0.0f and \p 1.0f + * from the MRG32k3a generator in \p state, increment position of generator. + * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating + * point outputs are never returned. + * + * The implementation returns up to 23 bits of mantissa, with the minimum + * return value \f$ 2^{-32} \f$ + * + * \param state - Pointer to state to update + * + * \return uniformly distributed float between \p 0.0f and \p 1.0f + */ +QUALIFIERS float curand_uniform(curandStateMRG32k3a_t *state) +{ + return ((float)(curand_MRG32k3a(state)*MRG32K3A_NORM)); +} + +/** + * \brief Return a uniformly distributed double from an MRG32k3a generator. + * + * Return a uniformly distributed double between \p 0.0 and \p 1.0 + * from the MRG32k3a generator in \p state, increment position of generator. + * Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating + * point outputs are never returned. + * + * Note the implementation returns at most 32 random bits of mantissa as + * outlined in the seminal paper by L'Ecuyer. + * + * \param state - Pointer to state to update + * + * \return uniformly distributed double between \p 0.0 and \p 1.0 + */ +QUALIFIERS double curand_uniform_double(curandStateMRG32k3a_t *state) +{ + return curand_MRG32k3a(state)*MRG32K3A_NORM; +} + + + +/** + * \brief Return a uniformly distributed tuple of 2 doubles from an Philox4_32_10 generator. + * + * Return a uniformly distributed 2 doubles (double4) between \p 0.0 and \p 1.0 + * from the Philox4_32_10 generator in \p state, increment position of generator by 4. + * Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating + * point outputs are never returned. + * + * \param state - Pointer to state to update + * + * \return 2 uniformly distributed doubles between \p 0.0 and \p 1.0 + */ + +QUALIFIERS double2 curand_uniform2_double(curandStatePhilox4_32_10_t *state) +{ + uint4 _x; + double2 result; + _x = curand4(state); + result.x = _curand_uniform_double_hq(_x.x,_x.y); + result.y = _curand_uniform_double_hq(_x.z,_x.w); + return result; +} + + +// not a part of API +QUALIFIERS double4 curand_uniform4_double(curandStatePhilox4_32_10_t *state) +{ + uint4 _x, _y; + double4 result; + _x = curand4(state); + _y = curand4(state); + result.x = _curand_uniform_double_hq(_x.x,_x.y); + result.y = _curand_uniform_double_hq(_x.z,_x.w); + result.z = _curand_uniform_double_hq(_y.x,_y.y); + result.w = _curand_uniform_double_hq(_y.z,_y.w); + return result; +} + +/** + * \brief Return a uniformly distributed float from a Philox4_32_10 generator. + * + * Return a uniformly distributed float between \p 0.0f and \p 1.0f + * from the Philox4_32_10 generator in \p state, increment position of generator. + * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating + * point outputs are never returned. + * + * \param state - Pointer to state to update + * + * \return uniformly distributed float between \p 0.0 and \p 1.0 + * + */ +QUALIFIERS float curand_uniform(curandStatePhilox4_32_10_t *state) +{ + return _curand_uniform(curand(state)); +} + +/** + * \brief Return a uniformly distributed tuple of 4 floats from a Philox4_32_10 generator. + * + * Return a uniformly distributed 4 floats between \p 0.0f and \p 1.0f + * from the Philox4_32_10 generator in \p state, increment position of generator by 4. + * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating + * point outputs are never returned. + * + * \param state - Pointer to state to update + * + * \return uniformly distributed float between \p 0.0 and \p 1.0 + * + */ +QUALIFIERS float4 curand_uniform4(curandStatePhilox4_32_10_t *state) +{ + return _curand_uniform4(curand4(state)); +} + +/** + * \brief Return a uniformly distributed float from a MTGP32 generator. + * + * Return a uniformly distributed float between \p 0.0f and \p 1.0f + * from the MTGP32 generator in \p state, increment position of generator. + * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating + * point outputs are never returned. + * + * \param state - Pointer to state to update + * + * \return uniformly distributed float between \p 0.0f and \p 1.0f + */ +QUALIFIERS float curand_uniform(curandStateMtgp32_t *state) +{ + return _curand_uniform(curand(state)); +} +/** + * \brief Return a uniformly distributed double from a MTGP32 generator. + * + * Return a uniformly distributed double between \p 0.0f and \p 1.0f + * from the MTGP32 generator in \p state, increment position of generator. + * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating + * point outputs are never returned. + * + * Note that the implementation uses only 32 random bits to generate a single double + * precision value. + * + * \param state - Pointer to state to update + * + * \return uniformly distributed double between \p 0.0f and \p 1.0f + */ +QUALIFIERS double curand_uniform_double(curandStateMtgp32_t *state) +{ + return _curand_uniform_double(curand(state)); +} + +/** + * \brief Return a uniformly distributed double from a Philox4_32_10 generator. + * + * Return a uniformly distributed double between \p 0.0f and \p 1.0f + * from the Philox4_32_10 generator in \p state, increment position of generator. + * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating + * point outputs are never returned. + * + * Note that the implementation uses only 32 random bits to generate a single double + * precision value. + * + * \p curand_uniform2_double() is recommended for higher quality uniformly distributed + * double precision values. + * + * \param state - Pointer to state to update + * + * \return uniformly distributed double between \p 0.0f and \p 1.0f + */ + +QUALIFIERS double curand_uniform_double(curandStatePhilox4_32_10_t *state) +{ + return _curand_uniform_double(curand(state)); +} + + +/** + * \brief Return a uniformly distributed float from a Sobol32 generator. + * + * Return a uniformly distributed float between \p 0.0f and \p 1.0f + * from the Sobol32 generator in \p state, increment position of generator. + * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating + * point outputs are never returned. + * + * The implementation is guaranteed to use a single call to \p curand(). + * + * \param state - Pointer to state to update + * + * \return uniformly distributed float between \p 0.0f and \p 1.0f + */ +QUALIFIERS float curand_uniform(curandStateSobol32_t *state) +{ + return _curand_uniform(curand(state)); +} + +/** + * \brief Return a uniformly distributed double from a Sobol32 generator. + * + * Return a uniformly distributed double between \p 0.0 and \p 1.0 + * from the Sobol32 generator in \p state, increment position of generator. + * Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating + * point outputs are never returned. + * + * The implementation is guaranteed to use a single call to \p curand() + * to preserve the quasirandom properties of the sequence. + * + * Note that the implementation uses only 32 random bits to generate a single double + * precision value. + * + * \param state - Pointer to state to update + * + * \return uniformly distributed double between \p 0.0 and \p 1.0 + */ +QUALIFIERS double curand_uniform_double(curandStateSobol32_t *state) +{ + return _curand_uniform_double(curand(state)); +} +/** + * \brief Return a uniformly distributed float from a scrambled Sobol32 generator. + * + * Return a uniformly distributed float between \p 0.0f and \p 1.0f + * from the scrambled Sobol32 generator in \p state, increment position of generator. + * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating + * point outputs are never returned. + * + * The implementation is guaranteed to use a single call to \p curand(). + * + * \param state - Pointer to state to update + * + * \return uniformly distributed float between \p 0.0f and \p 1.0f + */ +QUALIFIERS float curand_uniform(curandStateScrambledSobol32_t *state) +{ + return _curand_uniform(curand(state)); +} + +/** + * \brief Return a uniformly distributed double from a scrambled Sobol32 generator. + * + * Return a uniformly distributed double between \p 0.0 and \p 1.0 + * from the scrambled Sobol32 generator in \p state, increment position of generator. + * Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating + * point outputs are never returned. + * + * The implementation is guaranteed to use a single call to \p curand() + * to preserve the quasirandom properties of the sequence. + * + * Note that the implementation uses only 32 random bits to generate a single double + * precision value. + * + * \param state - Pointer to state to update + * + * \return uniformly distributed double between \p 0.0 and \p 1.0 + */ +QUALIFIERS double curand_uniform_double(curandStateScrambledSobol32_t *state) +{ + return _curand_uniform_double(curand(state)); +} +/** + * \brief Return a uniformly distributed float from a Sobol64 generator. + * + * Return a uniformly distributed float between \p 0.0f and \p 1.0f + * from the Sobol64 generator in \p state, increment position of generator. + * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating + * point outputs are never returned. + * + * The implementation is guaranteed to use a single call to \p curand(). + * + * \param state - Pointer to state to update + * + * \return uniformly distributed float between \p 0.0f and \p 1.0f + */ +QUALIFIERS float curand_uniform(curandStateSobol64_t *state) +{ + return _curand_uniform(curand(state)); +} + +/** + * \brief Return a uniformly distributed double from a Sobol64 generator. + * + * Return a uniformly distributed double between \p 0.0 and \p 1.0 + * from the Sobol64 generator in \p state, increment position of generator. + * Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating + * point outputs are never returned. + * + * The implementation is guaranteed to use a single call to \p curand() + * to preserve the quasirandom properties of the sequence. + * + * \param state - Pointer to state to update + * + * \return uniformly distributed double between \p 0.0 and \p 1.0 + */ +QUALIFIERS double curand_uniform_double(curandStateSobol64_t *state) +{ + return _curand_uniform_double(curand(state)); +} +/** + * \brief Return a uniformly distributed float from a scrambled Sobol64 generator. + * + * Return a uniformly distributed float between \p 0.0f and \p 1.0f + * from the scrambled Sobol64 generator in \p state, increment position of generator. + * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating + * point outputs are never returned. + * + * The implementation is guaranteed to use a single call to \p curand(). + * + * \param state - Pointer to state to update + * + * \return uniformly distributed float between \p 0.0f and \p 1.0f + */ +QUALIFIERS float curand_uniform(curandStateScrambledSobol64_t *state) +{ + return _curand_uniform(curand(state)); +} + +/** + * \brief Return a uniformly distributed double from a scrambled Sobol64 generator. + * + * Return a uniformly distributed double between \p 0.0 and \p 1.0 + * from the scrambled Sobol64 generator in \p state, increment position of generator. + * Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating + * point outputs are never returned. + * + * The implementation is guaranteed to use a single call to \p curand() + * to preserve the quasirandom properties of the sequence. + * + * \param state - Pointer to state to update + * + * \return uniformly distributed double between \p 0.0 and \p 1.0 + */ +QUALIFIERS double curand_uniform_double(curandStateScrambledSobol64_t *state) +{ + return _curand_uniform_double(curand(state)); +} + +#endif // !defined(CURAND_UNIFORM_H_) diff --git a/venv/lib/python3.10/site-packages/sympy-1.12.dist-info/AUTHORS b/venv/lib/python3.10/site-packages/sympy-1.12.dist-info/AUTHORS new file mode 100644 index 0000000000000000000000000000000000000000..1dcaf1544d9081c65917f6230afa1c4d7c5b2dcf --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy-1.12.dist-info/AUTHORS @@ -0,0 +1,1210 @@ +All people who contributed to SymPy by sending at least a patch or +more (in the order of the date of their first contribution), except +those who explicitly didn't want to be mentioned. People with a * next +to their names are not found in the metadata of the git history. This +file is generated automatically by running `./bin/authors_update.py`. + +There are a total of 1202 authors. + +Ondřej Čertík +Fabian Pedregosa +Jurjen N.E. Bos +Mateusz Paprocki +*Marc-Etienne M.Leveille +Brian Jorgensen +Jason Gedge +Robert Schwarz +Pearu Peterson +Fredrik Johansson +Chris Wu +*Ulrich Hecht +Goutham Lakshminarayan +David Lawrence +Jaroslaw Tworek +David Marek +Bernhard R. Link +Andrej Tokarčík +Or Dvory +Saroj Adhikari +Pauli Virtanen +Robert Kern +James Aspnes +Nimish Telang +Abderrahim Kitouni +Pan Peng +Friedrich Hagedorn +Elrond der Elbenfuerst +Rizgar Mella +Felix Kaiser +Roberto Nobrega +David Roberts +Sebastian Krämer +Vinzent Steinberg +Riccardo Gori +Case Van Horsen +Stepan Roucka +Ali Raza Syed +Stefano Maggiolo +Robert Cimrman +Bastian Weber +Sebastian Krause +Sebastian Kreft +*Dan +Alan Bromborsky +Boris Timokhin +Robert +Andy R. Terrel +Hubert Tsang +Konrad Meyer +Henrik Johansson +Priit Laes +Freddie Witherden +Brian E. Granger +Andrew Straw +Kaifeng Zhu +Ted Horst +Andrew Docherty +Akshay Srinivasan +Aaron Meurer +Barry Wardell +Tomasz Buchert +Vinay Kumar +Johann Cohen-Tanugi +Jochen Voss +Luke Peterson +Chris Smith +Thomas Sidoti +Florian Mickler +Nicolas Pourcelot +Ben Goodrich +Toon Verstraelen +Ronan Lamy +James Abbatiello +Ryan Krauss +Bill Flynn +Kevin Goodsell +Jorn Baayen +Eh Tan +Renato Coutinho +Oscar Benjamin +Øyvind Jensen +Julio Idichekop Filho +Łukasz Pankowski +*Chu-Ching Huang +Fernando Perez +Raffaele De Feo +Christian Muise +Matt Curry +Kazuo Thow +Christian Schubert +Jezreel Ng +James Pearson +Matthew Brett +Addison Cugini +Nicholas J.S. Kinar +Harold Erbin +Thomas Dixon +Cristóvão Sousa +Andre de Fortier Smit +Mark Dewing +Alexey U. Gudchenko +Gary Kerr +Sherjil Ozair +Oleksandr Gituliar +Sean Vig +Prafullkumar P. Tale +Vladimir Perić +Tom Bachmann +Yuri Karadzhov +Vladimir Lagunov +Matthew Rocklin +Saptarshi Mandal +Gilbert Gede +Anatolii Koval +Tomo Lazovich +Pavel Fedotov +Jack McCaffery +Jeremias Yehdegho +Kibeom Kim +Gregory Ksionda +Tomáš Bambas +Raymond Wong +Luca Weihs +Shai 'Deshe' Wyborski +Thomas Wiecki +Óscar Nájera +Mario Pernici +Benjamin McDonald +Sam Magura +Stefan Krastanov +Bradley Froehle +Min Ragan-Kelley +Emma Hogan +Nikhil Sarda +Julien Rioux +Roberto Colistete, Jr. +Raoul Bourquin +Gert-Ludwig Ingold +Srinivas Vasudevan +Jason Moore +Miha Marolt +Tim Lahey +Luis Garcia +Matt Rajca +David Li +Alexandr Gudulin +Bilal Akhtar +Grzegorz Świrski +Matt Habel +David Ju +Nichita Utiu +Nikolay Lazarov +Steve Anton +Imran Ahmed Manzoor +Ljubiša Moćić <3rdslasher@gmail.com> +Piotr Korgul +Jim Zhang +Sam Sleight +tborisova +Chancellor Arkantos +Stepan Simsa +Tobias Lenz +Siddhanathan Shanmugam +Tiffany Zhu +Tristan Hume +Alexey Subach +Joan Creus +Geoffry Song +Puneeth Chaganti +Marcin Kostrzewa <> +Natalia Nawara +vishal +Shruti Mangipudi +Davy Mao +Swapnil Agarwal +Dhia Kennouche +jerryma1121 +Joachim Durchholz +Martin Povišer +Siddhant Jain +Kevin Hunter +Michael Mayorov +Nathan Alison +Christian Bühler +Carsten Knoll +Bharath M R +Matthias Toews +Sergiu Ivanov +Jorge E. Cardona +Sanket Agarwal +Manoj Babu K. +Sai Nikhil +Aleksandar Makelov +Sachin Irukula +Raphael Michel +Ashwini Oruganti +Andreas Klöckner +Prateek Papriwal +Arpit Goyal +Angadh Nanjangud +Comer Duncan +Jens H. Nielsen +Joseph Dougherty +Elliot Marshall +Guru Devanla +George Waksman +Alexandr Popov +Tarun Gaba +Takafumi Arakaki +Saurabh Jha +Rom le Clair +Angus Griffith <16sn6uv@gmail.com> +Timothy Reluga +Brian Stephanik +Alexander Eberspächer +Sachin Joglekar +Tyler Pirtle +Vasily Povalyaev +Colleen Lee +Matthew Hoff +Niklas Thörne +Huijun Mai +Marek Šuppa +Ramana Venkata +Prasoon Shukla +Stefen Yin +Thomas Hisch +Madeleine Ball +Mary Clark +Rishabh Dixit +Manoj Kumar +Akshit Agarwal +CJ Carey +Patrick Lacasse +Ananya H +Tarang Patel +Christopher Dembia +Benjamin Fishbein +Sean Ge +Amit Jamadagni +Ankit Agrawal +Björn Dahlgren +Christophe Saint-Jean +Demian Wassermann +Khagesh Patel +Stephen Loo +hm +Patrick Poitras +Katja Sophie Hotz +Varun Joshi +Chetna Gupta +Thilina Rathnayake +Max Hutchinson +Shravas K Rao +Matthew Tadd +Alexander Hirzel +Randy Heydon +Oliver Lee +Seshagiri Prabhu +Pradyumna +Erik Welch +Eric Nelson +Roland Puntaier +Chris Conley +Tim Swast +Dmitry Batkovich +Francesco Bonazzi +Yuriy Demidov +Rick Muller +Manish Gill +Markus Müller +Amit Saha +Jeremy +QuaBoo +Stefan van der Walt +David Joyner +Lars Buitinck +Alkiviadis G. Akritas +Vinit Ravishankar +Mike Boyle +Heiner Kirchhoffer +Pablo Puente +James Fiedler +Harsh Gupta +Tuomas Airaksinen +Paul Strickland +James Goppert +rathmann +Avichal Dayal +Paul Scott +Shipra Banga +Pramod Ch +Akshay +Buck Shlegeris +Jonathan Miller +Edward Schembor +Rajath Shashidhara +Zamrath Nizam +Aditya Shah +Rajat Aggarwal +Sambuddha Basu +Zeel Shah +Abhinav Chanda +Jim Crist +Sudhanshu Mishra +Anurag Sharma +Soumya Dipta Biswas +Sushant Hiray +Ben Lucato +Kunal Arora +Henry Gebhardt +Dammina Sahabandu +Manish Shukla +Ralph Bean +richierichrawr +John Connor +Juan Luis Cano Rodríguez +Sahil Shekhawat +Kundan Kumar +Stas Kelvich +sevaader +Dhruvesh Vijay Parikh +Venkatesh Halli +Lennart Fricke +Vlad Seghete +Shashank Agarwal +carstimon +Pierre Haessig +Maciej Baranski +Benjamin Gudehus +Faisal Anees +Mark Shoulson +Robert Johansson +Kalevi Suominen +Kaushik Varanasi +Fawaz Alazemi +Ambar Mehrotra +David P. Sanders +Peter Brady +John V. Siratt +Sarwar Chahal +Nathan Woods +Colin B. Macdonald +Marcus Näslund +Clemens Novak +Mridul Seth +Craig A. Stoudt +Raj +Mihai A. Ionescu +immerrr +Chai Wah Wu +Leonid Blouvshtein +Peleg Michaeli +ck Lux +zsc347 +Hamish Dickson +Michael Gallaspy +Roman Inflianskas +Duane Nykamp +Ted Dokos +Sunny Aggarwal +Victor Brebenar +Akshat Jain +Shivam Vats +Longqi Wang +Juan Felipe Osorio +Ray Cathcart +Lukas Zorich +Eric Miller +Cody Herbst +Nishith Shah +Amit Kumar +Yury G. Kudryashov +Guillaume Gay +Mihir Wadwekar +Tuan Manh Lai +Asish Panda +Darshan Chaudhary +Alec Kalinin +Ralf Stephan +Aaditya Nair +Jayesh Lahori +Harshil Goel +Luv Agarwal +Jason Ly +Lokesh Sharma +Sartaj Singh +Chris Swierczewski +Konstantin Togoi +Param Singh +Sumith Kulal +Juha Remes +Philippe Bouafia +Peter Schmidt +Jiaxing Liang +Lucas Jones +Gregory Ashton +Jennifer White +Renato Orsino +Michael Boyle +Alistair Lynn +Govind Sahai +Adam Bloomston +Kyle McDaniel +Nguyen Truong Duy +Alex Lindsay +Mathew Chong +Jason Siefken +Gaurav Dhingra +Gao, Xiang +Kevin Ventullo +mao8 +Isuru Fernando +Shivam Tyagi +Richard Otis +Rich LaSota +dustyrockpyle +Anton Akhmerov +Michael Zingale +Chak-Pong Chung +David T +Phil Ruffwind +Sebastian Koslowski +Kumar Krishna Agrawal +Dustin Gadal +João Moura +Yu Kobayashi +Shashank Kumar +Timothy Cyrus +Devyani Kota +Keval Shah +Dzhelil Rufat +Pastafarianist +Sourav Singh +Jacob Garber +Vinay Singh +GolimarOurHero +Prashant Tyagi +Matthew Davis +Tschijnmo TSCHAU +Alexander Bentkamp +Jack Kemp +Kshitij Saraogi +Thomas Baruchel +Nicolás Guarín-Zapata +Jens Jørgen Mortensen +Sampad Kumar Saha +Eva Charlotte Mayer +Laura Domine +Justin Blythe +Meghana Madhyastha +Tanu Hari Dixit +Shekhar Prasad Rajak +Aqnouch Mohammed +Arafat Dad Khan +Boris Atamanovskiy +Sam Tygier +Jai Luthra +Guo Xingjian +Sandeep Veethu +Archit Verma +Shubham Tibra +Ashutosh Saboo +Michael S. Hansen +Anish Shah +Guillaume Jacquenot +Bhautik Mavani +Michał Radwański +Jerry Li +Pablo Zubieta +Shivam Agarwal +Chaitanya Sai Alaparthi +Arihant Parsoya +Ruslan Pisarev +Akash Trehan +Nishant Nikhil +Vladimir Poluhsin +Akshay Nagar +James Brandon Milam +Abhinav Agarwal +Rishabh Daal +Sanya Khurana +Aman Deep +Aravind Reddy +Abhishek Verma +Matthew Parnell +Thomas Hickman +Akshay Siramdas +YiDing Jiang +Jatin Yadav +Matthew Thomas +Rehas Sachdeva +Michael Mueller +Srajan Garg +Prabhjot Singh +Haruki Moriguchi +Tom Gijselinck +Nitin Chaudhary +Alex Argunov +Nathan Musoke +Abhishek Garg +Dana Jacobsen +Vasiliy Dommes +Phillip Berndt +Haimo Zhang +Anthony Scopatz +bluebrook +Leonid Kovalev +Josh Burkart +Dimitra Konomi +Christina Zografou +Fiach Antaw +Langston Barrett +Krit Karan +G. D. McBain +Prempal Singh +Gabriel Orisaka +Matthias Bussonnier +rahuldan +Colin Marquardt +Andrew Taber +Yash Reddy +Peter Stangl +elvis-sik +Nikos Karagiannakis +Jainul Vaghasia +Dennis Meckel +Harshil Meena +Micky +Nick Curtis +Michele Zaffalon +Martha Giannoudovardi +Devang Kulshreshtha +Steph Papanik +Mohammad Sadeq Dousti +Arif Ahmed +Abdullah Javed Nesar +Lakshya Agrawal +shruti +Rohit Rango +Hong Xu +Ivan Petuhov +Alsheh +Marcel Stimberg +Alexey Pakhocmhik +Tommy Olofsson +Zulfikar +Blair Azzopardi +Danny Hermes +Sergey Pestov +Mohit Chandra +Karthik Chintapalli +Marcin Briański +andreo +Flamy Owl +Yicong Guo +Varun Garg +Rishabh Madan +Aditya Kapoor +Karan Sharma +Vedant Rathore +Johan Blåbäck +Pranjal Tale +Jason Tokayer +Raghav Jajodia +Rajat Thakur +Dhruv Bhanushali +Anjul Kumar Tyagi +Barun Parruck +Bao Chau +Tanay Agrawal +Ranjith Kumar +Shikhar Makhija +Yathartha Joshi +Valeriia Gladkova +Sagar Bharadwaj +Daniel Mahler +Ka Yi +Rishat Iskhakov +Szymon Mieszczak +Sachin Agarwal +Priyank Patel +Satya Prakash Dwibedi +tools4origins +Nico Schlömer +Fermi Paradox +Ekansh Purohit +Vedarth Sharma +Peeyush Kushwaha +Jayjayyy +Christopher J. Wright +Jakub Wilk +Mauro Garavello +Chris Tefer +Shikhar Jaiswal +Chiu-Hsiang Hsu +Carlos Cordoba +Fabian Ball +Yerniyaz +Christiano Anderson +Robin Neatherway +Thomas Hunt +Theodore Han +Duc-Minh Phan +Lejla Metohajrova +Samyak Jain +Aditya Rohan +Vincent Delecroix +Michael Sparapany +Harsh Jain +Nathan Goldbaum +latot +Kenneth Lyons +Stan Schymanski +David Daly +Ayush Shridhar +Javed Nissar +Jiri Kuncar +vedantc98 +Rupesh Harode +Rob Zinkov +James Harrop +James Taylor +Ishan Joshi +Marco Mancini +Boris Ettinger +Micah Fitch +Daniel Wennberg +ylemkimon +Akash Vaish +Peter Enenkel +Waldir Pimenta +Jithin D. George +Lev Chelyadinov +Lucas Wiman +Rhea Parekh +James Cotton +Robert Pollak +anca-mc +Sourav Ghosh +Jonathan Allan +Nikhil Pappu +Ethan Ward +Cezary Marczak +dps7ud +Nilabja Bhattacharya +Itay4 <31018228+Itay4@users.noreply.github.com> +Poom Chiarawongse +Yang Yang +Cavendish McKay +Bradley Gannon +B McG +Rob Drynkin +Seth Ebner +Akash Kundu +Mark Jeromin +Roberto Díaz Pérez +Gleb Siroki +Segev Finer +Alex Lubbock +Ayodeji Ige +Matthew Wardrop +Hugo van Kemenade +Austin Palmer +der-blaue-elefant +Filip Gokstorp +Yuki Matsuda +Aaron Miller +Salil Vishnu Kapur +Atharva Khare +Shubham Maheshwari +Pavel Tkachenko +Ashish Kumar Gaurav +Rajeev Singh +Keno Goertz +Lucas Gallindo +Himanshu +David Menéndez Hurtado +Amit Manchanda +Rohit Jain +Jonathan A. Gross +Unknown +Sayan Goswami +Subhash Saurabh +Rastislav Rabatin +Vishal +Jeremey Gluck +Akshat Maheshwari +symbolique +Saloni Jain +Arighna Chakrabarty +Abhigyan Khaund +Jashanpreet Singh +Saurabh Agarwal +luzpaz +P. Sai Prasanth +Nirmal Sarswat +Cristian Di Pietrantonio +Ravi charan +Nityananda Gohain +Cédric Travelletti +Nicholas Bollweg +Himanshu Ladia +Adwait Baokar +Mihail Tarigradschi +Saketh +rushyam +sfoo +Rahil Hastu +Zach Raines +Sidhant Nagpal +Gagandeep Singh +Rishav Chakraborty +Malkhan Singh +Joaquim Monserrat +Mayank Singh +Rémy Léone +Maxence Mayrand <35958639+maxencemayrand@users.noreply.github.com> +Nikoleta Glynatsi +helo9 +Ken Wakita +Carl Sandrock +Fredrik Eriksson +Ian Swire +Bulat +Ehren Metcalfe +Dmitry Savransky +Kiyohito Yamazaki +Caley Finn +Zhi-Qiang Zhou +Alexander Pozdneev +Wes Turner <50891+westurner@users.noreply.github.com> +JMSS-Unknown <31131631+JMSS-Unknown@users.noreply.github.com> +Arshdeep Singh +cym1 <16437732+cym1@users.noreply.github.com> +Stewart Wadsworth +Jared Lumpe +Avi Shrivastava +ramvenkat98 +Bilal Ahmed +Dimas Abreu Archanjo Dutra +Yatna Verma +S.Y. Lee +Miro Hrončok +Sudarshan Kamath +Ayushman Koul +Robert Dougherty-Bliss +Andrey Grozin +Bavish Kulur +Arun Singh +sirnicolaf <43586954+sirnicolaf@users.noreply.github.com> +Zachariah Etienne +Prayush Dawda <35144226+iamprayush@users.noreply.github.com> +2torus +Faisal Riyaz +Martin Roelfs +SirJohnFranklin +Anthony Sottile +ViacheslavP +Safiya03 +Alexander Dunlap +Rohit Sharma <31184621+rohitx007@users.noreply.github.com> +Jonathan Warner +Mohit Balwani +Marduk Bolaños +amsuhane +Matthias Geier +klaasvanaarsen <44929042+klaasvanaarsen@users.noreply.github.com> +Shubham Kumar Jha +rationa-kunal +Animesh Sinha +Gaurang Tandon <1gaurangtandon@gmail.com> +Matthew Craven +Daniel Ingram +Jogi Miglani +Takumasa Nakamura +Ritu Raj Singh +Rajiv Ranjan Singh +Vera Lozhkina +adhoc-king <46354827+adhoc-king@users.noreply.github.com> +Mikel Rouco +Oscar Gustafsson +damianos +Supreet Agrawal +shiksha11 +Martin Ueding +sharma-kunal +Divyanshu Thakur +Susumu Ishizuka +Samnan Rahee +Fredrik Andersson +Bhavya Srivastava +Alpesh Jamgade +Shubham Abhang +Vishesh Mangla +Nicko van Someren +dandiez <47832466+dandiez@users.noreply.github.com> +Frédéric Chapoton +jhanwar +Noumbissi valere Gille Geovan +Salmista-94 +Shivani Kohli +Parker Berry +Pragyan Mehrotra +Nabanita Dash +Gaetano Guerriero +Ankit Raj Pandey +Ritesh Kumar +kangzhiq <709563092@qq.com> +Jun Lin +Petr Kungurtsev +Anway De +znxftw +Denis Ivanenko +Orestis Vaggelis +Nikhil Maan +Abhinav Anand +Qingsha Shi +Juan Barbosa +Prionti Nasir +Bharat Raghunathan +arooshiverma +Christoph Gohle +Charalampos Tsiagkalis +Daniel Sears +Megan Ly +Sean P. Cornelius +Erik R. Gomez +Riccardo Magliocchetti +Henry Metlov +pekochun +Bendik Samseth +Vighnesh Shenoy +Versus Void +Denys Rybalka +Mark Dickinson +Rimi +rimibis <33387803+rimibis@users.noreply.github.com> +Steven Lee +Gilles Schintgen +Abhi58 +Tomasz Pytel +Aadit Kamat +Samesh +Velibor Zeli +Gabriel Bernardino +Joseph Redfern +Cameron King +Miguel Marco +David Hagen +Hannah Kari +Soniya Nayak +Harsh Agarwal +Enric Florit +Yogesh Mishra +Denis Rykov +Ivan Tkachenko +Kenneth Emeka Odoh +Stephan Seitz +Yeshwanth N +Oscar Gerardo Lazo Arjona +Srinivasa Arun Yeragudipati +Kirtan Mali +TitanSnow +Pengning Chao <8857165+PengningChao@users.noreply.github.com> +Louis Abraham +Morten Olsen Lysgaard +Akash Nagaraj (akasnaga) +Akash Nagaraj +Lauren Glattly +Hou-Rui +George Korepanov +dranknight09 +aditisingh2362 +Gina +gregmedlock +Georgios Giapitzakis Tzintanos +Eric Wieser +Bradley Dowling <34559056+btdow@users.noreply.github.com> +Maria Marginean <33810762+mmargin@users.noreply.github.com> +Akash Agrawall +jgulian +Sourav Goyal +Zlatan Vasović +Alex Meiburg +Smit Lunagariya +Naman Gera +Julien Palard +Dhruv Mendiratta +erdOne <36414270+erdOne@users.noreply.github.com> +risubaba +abhinav28071999 <41710346+abhinav28071999@users.noreply.github.com> +Jisoo Song +Jaime R <38530589+Jaime02@users.noreply.github.com> +Vikrant Malik +Hardik Saini <43683678+Guardianofgotham@users.noreply.github.com> +Abhishek +Johannes Hartung +Milan Jolly +faizan2700 +mohit <39158356+mohitacecode@users.noreply.github.com> +Mohit Gupta +Psycho-Pirate +Chanakya-Ekbote +Rashmi Shehana +Jonty16117 +Anubhav Gupta +Michal Grňo +vezeli <37907135+vezeli@users.noreply.github.com> +Tim Gates +Sandeep Murthy +Neil +V1krant <46847915+V1krant@users.noreply.github.com> +alejandro +Riyan Dhiman +sbt4104 +Seth Troisi +Bhaskar Gupta +Smit Gajjar +rbl +Ilya Pchelintsev +Omar Wagih +prshnt19 +Johan Guzman +Vasileios Kalos +BasileiosKal <61801875+BasileiosKal@users.noreply.github.com> +Shubham Thorat <37049710+sbt4104@users.noreply.github.com> +Arpan Chattopadhyay +Ashutosh Hathidara +Moses Paul R +Saanidhya vats +tnzl +Vatsal Srivastava +Jean-Luc Herren +Dhruv Kothari +seadavis <45022599+seadavis@users.noreply.github.com> +kamimura +slacker404 +Jaime Resano +Ebrahim Byagowi +wuyudi +Akira Kyle +Calvin Jay Ross +Martin Thoma +Thomas A Caswell +Lagaras Stelios +Jerry James +Jan Kruse +Nathan Taylor +Vaishnav Damani +Mohit Shah +Mathias Louboutin +Marijan Smetko +Dave Witte Morris +soumi7 +Zhongshi +Wes Galbraith +KaustubhDamania +w495 +Akhil Rajput +Markus Mohrhard +Benjamin Wolba +彭于斌 <1931127624@qq.com> +Rudr Tiwari +Aaryan Dewan +Benedikt Placke +Sneha Goddu +goddus <39923708+goddus@users.noreply.github.com> +Shivang Dubey +Michael Greminger +Peter Cock +Willem Melching +Elias Basler +Brandon David +Abhay_Dhiman +Tasha Kim +Ayush Malik +Devesh Sawant +Wolfgang Stöcher +Sudeep Sidhu +foice +Ben Payne +Muskan Kumar <31043527+muskanvk@users.noreply.github.com> +noam simcha finkelstein +Garrett Folbe +Islam Mansour +Sayandip Halder +Shubham Agrawal +numbermaniac <5206120+numbermaniac@users.noreply.github.com> +Sakirul Alam +Mohammed Bilal +Chris du Plessis +Coder-RG +Ansh Mishra +Alex Malins +Lorenzo Contento +Naveen Sai +Shital Mule +Amanda Dsouza +Nijso Beishuizen +Harry Zheng +Felix Yan +Constantin Mateescu +Eva Tiwari +Aditya Kumar Sinha +Soumi Bardhan <51290447+Soumi7@users.noreply.github.com> +Kaustubh Chaudhari +Kristian Brünn +Neel Gorasiya +Akshat Sood <68052998+akshatsood2249@users.noreply.github.com> +Jose M. Gomez +Stefan Petrea +Praveen Sahu +Mark Bell +AlexCQY +Fabian Froehlich +Nikhil Gopalam +Kartik Sethi +Muhammed Abdul Quadir Owais +Harshit Yadav +Sidharth Mundhra +Suryam Arnav Kalra +Prince Gupta +Kunal Singh +Mayank Raj +Achal Jain <2achaljain@gmail.com> +Mario Maio +Aaron Stiff <69512633+AaronStiff@users.noreply.github.com> +Wyatt Peak +Bhaskar Joshi +Aditya Jindal +Vaibhav Bhat +Priyansh Rathi +Saket Kumar Singh +Yukai Chou +Qijia Liu +Paul Mandel +Nisarg Chaudhari <54911392+Nisarg-Chaudhari@users.noreply.github.com> +Dominik Stańczak +Rodrigo Luger +Marco Antônio Habitzreuter +Ayush Bisht +Akshansh Bhatt +Brandon T. Willard +Thomas Aarholt +Hiren Chalodiya +Roland Dixon +dimasvq +Sagar231 +Michael Chu +Abby Ng +Angad Sandhu <55819847+angadsinghsandhu@users.noreply.github.com> +Alexander Cockburn +Yaser AlOsh +Davide Sandonà +Jonathan Gutow +Nihir Agarwal +Lee Johnston +Zach Carmichael <20629897+craymichael@users.noreply.github.com> +Vijairam Ganesh Moorthy +Hanspeter Schmid +Ben Oostendorp +Nikita +Aman +Shashank KS +Aman Sharma +Anup Parikh +Lucy Mountain +Miguel Torres Costa +Rikard Nordgren +Arun sanganal <74652697+ArunSanganal@users.noreply.github.com> +Kamlesh Joshi <72374645+kamleshjoshi8102@users.noreply.github.com> +Joseph Rance <56409230+Joseph-Rance@users.noreply.github.com> +Huangduirong +Nils Schulte <47043622+Schnilz@users.noreply.github.com> +Matt Bogosian +Elisha Hollander +Aditya Ravuri +Mamidi Ratna Praneeth +Jeffrey Ryan +Jonathan Daniel <36337649+jond01@users.noreply.github.com> +Robin Richard +Gautam Menghani +Remco de Boer <29308176+redeboer@users.noreply.github.com> +Sebastian East +Evani Balasubramanyam +Rahil Parikh +Jason Ross +Joannah Nanjekye +Ayush Kumar +Kshitij +Daniel Hyams +alijosephine +Matthias Köppe +mohajain +Anibal M. Medina-Mardones +Travis Ens +Evgenia Karunus +Risiraj Dey +lastcodestanding +Andrey Lekar +Abbas Mohammed <42001049+iam-abbas@users.noreply.github.com> +anutosh491 +Steve Kieffer +Paul Spiering +Pieter Gijsbers +Wang Ran (汪然) +naelsondouglas +Aman Thakur +S. Hanko +Dennis Sweeney +Gurpartap Singh +Hampus Malmberg +scimax +Nikhil Date +Kuldeep Borkar Jr +AkuBrain <76952313+Franck2111@users.noreply.github.com> +Leo Battle +Advait Pote +Anurag Bhat +Jeremy Monat +Diane Tchuindjo +Tom Fryers <61272761+TomFryers@users.noreply.github.com> +Zouhair +zzj <29055749+zjzh@users.noreply.github.com> +shubhayu09 +Siddhant Jain +Tirthankar Mazumder <63574588+wermos@users.noreply.github.com> +Sumit Kumar +Shivam Sagar +Gaurav Jain +Andrii Oriekhov +Luis Talavera +Arie Bovenberg +Carson McManus +Jack Schmidt <1107865+jackschmidt@users.noreply.github.com> +Riley Britten +Georges Khaznadar +Donald Wilson +Timo Stienstra +dispasha +Saksham Alok +Varenyam Bhardwaj +oittaa <8972248+oittaa@users.noreply.github.com> +Omkaar <79257339+Pysics@users.noreply.github.com> +Islem BOUZENIA +extraymond +Alexander Behrens +user202729 <25191436+user202729@users.noreply.github.com> +Pieter Eendebak +Zaz Brown +ritikBhandari +viocha <66580331+viocha@users.noreply.github.com> +Arthur Ryman +Xiang Wu +tttc3 +Seth Poulsen +cocolato +Anton Golovanov +Gareth Ma +Clément M.T. Robert +Glenn Horton-Smith +Karan +Stefan Behnle <84378403+behnle@users.noreply.github.com> +Shreyash Mishra <72146041+Shreyash-cyber@users.noreply.github.com> +Arthur Milchior +NotWearingPants <26556598+NotWearingPants@users.noreply.github.com> +Ishan Pandhare +Carlos García Montoro +Parcly Taxel +Saicharan +Kunal Sheth +Biswadeep Purkayastha <98874428+metabiswadeep@users.noreply.github.com> +Jyn Spring 琴春 +Phil LeMaitre +Chris Kerr +José Senart +Uwe L. Korn +ForeverHaibara <69423537+ForeverHaibara@users.noreply.github.com> +Yves Tumushimire +wookie184 +Costor +Klaus Rettinghaus +Sam Brockie +Abhishek Patidar <1e9abhi1e10@gmail.com> +Eric Demer +Pontus von Brömssen +Victor Immanuel +Evandro Bernardes +Michele Ceccacci +Ayush Aryan +Kishore Gopalakrishnan +Jan-Philipp Hoffmann +haru-44 <36563693+haru-44@users.noreply.github.com> +Sayan Mitra +Aman Kumar Shukla +Zoufiné Lauer-Baré +Charles Harris +Tejaswini Sanapathi +Devansh +Aaron Gokaslan diff --git a/venv/lib/python3.10/site-packages/sympy-1.12.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/sympy-1.12.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy-1.12.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/sympy-1.12.dist-info/LICENSE b/venv/lib/python3.10/site-packages/sympy-1.12.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..0744f229d697ca3ed1b1b257bfdb70e3eecf0b9e --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy-1.12.dist-info/LICENSE @@ -0,0 +1,153 @@ +Copyright (c) 2006-2023 SymPy Development Team + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + a. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + b. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + c. Neither the name of SymPy nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +-------------------------------------------------------------------------------- + +Patches that were taken from the Diofant project (https://github.com/diofant/diofant) +are licensed as: + +Copyright (c) 2006-2018 SymPy Development Team, + 2013-2023 Sergey B Kirpichev + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + a. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + b. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + c. Neither the name of Diofant or SymPy nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +-------------------------------------------------------------------------------- + +Submodules taken from the multipledispatch project (https://github.com/mrocklin/multipledispatch) +are licensed as: + +Copyright (c) 2014 Matthew Rocklin + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + a. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + b. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + c. Neither the name of multipledispatch nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +-------------------------------------------------------------------------------- + +The files under the directory sympy/parsing/autolev/tests/pydy-example-repo +are directly copied from PyDy project and are licensed as: + +Copyright (c) 2009-2023, PyDy Authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +* Neither the name of this project nor the names of its contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL PYDY AUTHORS BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +The files under the directory sympy/parsing/latex +are directly copied from latex2sympy project and are licensed as: + +Copyright 2016, latex2sympy + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/sympy-1.12.dist-info/METADATA b/venv/lib/python3.10/site-packages/sympy-1.12.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..6b495e976e13ab24d46fef5e950e03c81d79e5f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy-1.12.dist-info/METADATA @@ -0,0 +1,307 @@ +Metadata-Version: 2.1 +Name: sympy +Version: 1.12 +Summary: Computer algebra system (CAS) in Python +Home-page: https://sympy.org +Author: SymPy development team +Author-email: sympy@googlegroups.com +License: BSD +Project-URL: Source, https://github.com/sympy/sympy +Keywords: Math CAS +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Scientific/Engineering :: Physics +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Requires-Python: >=3.8 +Description-Content-Type: text/markdown +License-File: LICENSE +License-File: AUTHORS +Requires-Dist: mpmath (>=0.19) + +# SymPy + +[![pypi version](https://img.shields.io/pypi/v/sympy.svg)](https://pypi.python.org/pypi/sympy) +[![Join the chat at https://gitter.im/sympy/sympy](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/sympy/sympy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Zenodo Badge](https://zenodo.org/badge/18918/sympy/sympy.svg)](https://zenodo.org/badge/latestdoi/18918/sympy/sympy) +[![Downloads](https://pepy.tech/badge/sympy/month)](https://pepy.tech/project/sympy) +[![GitHub Issues](https://img.shields.io/badge/issue_tracking-github-blue.svg)](https://github.com/sympy/sympy/issues) +[![Git Tutorial](https://img.shields.io/badge/PR-Welcome-%23FF8300.svg?)](https://git-scm.com/book/en/v2/GitHub-Contributing-to-a-Project) +[![Powered by NumFocus](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://numfocus.org) +[![Commits since last release](https://img.shields.io/github/commits-since/sympy/sympy/latest.svg?longCache=true&style=flat-square&logo=git&logoColor=fff)](https://github.com/sympy/sympy/releases) + +[![SymPy Banner](https://github.com/sympy/sympy/raw/master/banner.svg)](https://sympy.org/) + + +See the [AUTHORS](AUTHORS) file for the list of authors. + +And many more people helped on the SymPy mailing list, reported bugs, +helped organize SymPy's participation in the Google Summer of Code, the +Google Highly Open Participation Contest, Google Code-In, wrote and +blogged about SymPy... + +License: New BSD License (see the [LICENSE](LICENSE) file for details) covers all +files in the sympy repository unless stated otherwise. + +Our mailing list is at +. + +We have a community chat at [Gitter](https://gitter.im/sympy/sympy). Feel +free to ask us anything there. We have a very welcoming and helpful +community. + +## Download + +The recommended installation method is through Anaconda, + + +You can also get the latest version of SymPy from + + +To get the git version do + + $ git clone https://github.com/sympy/sympy.git + +For other options (tarballs, debs, etc.), see +. + +## Documentation and Usage + +For in-depth instructions on installation and building the +documentation, see the [SymPy Documentation Style Guide](https://docs.sympy.org/dev/documentation-style-guide.html). + +Everything is at: + + + +You can generate everything at the above site in your local copy of +SymPy by: + + $ cd doc + $ make html + +Then the docs will be in \_build/html. If +you don't want to read that, here is a short usage: + +From this directory, start Python and: + +``` python +>>> from sympy import Symbol, cos +>>> x = Symbol('x') +>>> e = 1/cos(x) +>>> print(e.series(x, 0, 10)) +1 + x**2/2 + 5*x**4/24 + 61*x**6/720 + 277*x**8/8064 + O(x**10) +``` + +SymPy also comes with a console that is a simple wrapper around the +classic python console (or IPython when available) that loads the SymPy +namespace and executes some common commands for you. + +To start it, issue: + + $ bin/isympy + +from this directory, if SymPy is not installed or simply: + + $ isympy + +if SymPy is installed. + +## Installation + +SymPy has a hard dependency on the [mpmath](http://mpmath.org/) library +(version \>= 0.19). You should install it first, please refer to the +mpmath installation guide: + + + +To install SymPy using PyPI, run the following command: + + $ pip install sympy + +To install SymPy using Anaconda, run the following command: + + $ conda install -c anaconda sympy + +To install SymPy from GitHub source, first clone SymPy using `git`: + + $ git clone https://github.com/sympy/sympy.git + +Then, in the `sympy` repository that you cloned, simply run: + + $ pip install . + +See for more information. + +## Contributing + +We welcome contributions from anyone, even if you are new to open +source. Please read our [Introduction to Contributing](https://github.com/sympy/sympy/wiki/Introduction-to-contributing) +page and the [SymPy Documentation Style Guide](https://docs.sympy.org/dev/documentation-style-guide.html). If you +are new and looking for some way to contribute, a good place to start is +to look at the issues tagged [Easy to Fix](https://github.com/sympy/sympy/issues?q=is%3Aopen+is%3Aissue+label%3A%22Easy+to+Fix%22). + +Please note that all participants in this project are expected to follow +our Code of Conduct. By participating in this project you agree to abide +by its terms. See [CODE\_OF\_CONDUCT.md](CODE_OF_CONDUCT.md). + +## Tests + +To execute all tests, run: + + $./setup.py test + +in the current directory. + +For the more fine-grained running of tests or doctests, use `bin/test` +or respectively `bin/doctest`. The master branch is automatically tested +by GitHub Actions. + +To test pull requests, use +[sympy-bot](https://github.com/sympy/sympy-bot). + +## Regenerate Experimental LaTeX Parser/Lexer + +The parser and lexer were generated with the [ANTLR4](http://antlr4.org) +toolchain in `sympy/parsing/latex/_antlr` and checked into the repo. +Presently, most users should not need to regenerate these files, but +if you plan to work on this feature, you will need the `antlr4` +command-line tool (and you must ensure that it is in your `PATH`). +One way to get it is: + + $ conda install -c conda-forge antlr=4.11.1 + +Alternatively, follow the instructions on the ANTLR website and download +the `antlr-4.11.1-complete.jar`. Then export the `CLASSPATH` as instructed +and instead of creating `antlr4` as an alias, make it an executable file +with the following contents: +``` bash +#!/bin/bash +java -jar /usr/local/lib/antlr-4.11.1-complete.jar "$@" +``` + +After making changes to `sympy/parsing/latex/LaTeX.g4`, run: + + $ ./setup.py antlr + +## Clean + +To clean everything (thus getting the same tree as in the repository): + + $ git clean -Xdf + +which will clear everything ignored by `.gitignore`, and: + + $ git clean -df + +to clear all untracked files. You can revert the most recent changes in +git with: + + $ git reset --hard + +WARNING: The above commands will all clear changes you may have made, +and you will lose them forever. Be sure to check things with `git +status`, `git diff`, `git clean -Xn`, and `git clean -n` before doing any +of those. + +## Bugs + +Our issue tracker is at . Please +report any bugs that you find. Or, even better, fork the repository on +GitHub and create a pull request. We welcome all changes, big or small, +and we will help you make the pull request if you are new to git (just +ask on our mailing list or Gitter Channel). If you further have any queries, you can find answers +on Stack Overflow using the [sympy](https://stackoverflow.com/questions/tagged/sympy) tag. + +## Brief History + +SymPy was started by Ondřej Čertík in 2005, he wrote some code during +the summer, then he wrote some more code during summer 2006. In February +2007, Fabian Pedregosa joined the project and helped fix many things, +contributed documentation, and made it alive again. 5 students (Mateusz +Paprocki, Brian Jorgensen, Jason Gedge, Robert Schwarz, and Chris Wu) +improved SymPy incredibly during summer 2007 as part of the Google +Summer of Code. Pearu Peterson joined the development during the summer +2007 and he has made SymPy much more competitive by rewriting the core +from scratch, which has made it from 10x to 100x faster. Jurjen N.E. Bos +has contributed pretty-printing and other patches. Fredrik Johansson has +written mpmath and contributed a lot of patches. + +SymPy has participated in every Google Summer of Code since 2007. You +can see for +full details. Each year has improved SymPy by bounds. Most of SymPy's +development has come from Google Summer of Code students. + +In 2011, Ondřej Čertík stepped down as lead developer, with Aaron +Meurer, who also started as a Google Summer of Code student, taking his +place. Ondřej Čertík is still active in the community but is too busy +with work and family to play a lead development role. + +Since then, a lot more people have joined the development and some +people have also left. You can see the full list in doc/src/aboutus.rst, +or online at: + + + +The git history goes back to 2007 when development moved from svn to hg. +To see the history before that point, look at +. + +You can use git to see the biggest developers. The command: + + $ git shortlog -ns + +will show each developer, sorted by commits to the project. The command: + + $ git shortlog -ns --since="1 year" + +will show the top developers from the last year. + +## Citation + +To cite SymPy in publications use + +> Meurer A, Smith CP, Paprocki M, Čertík O, Kirpichev SB, Rocklin M, +> Kumar A, Ivanov S, Moore JK, Singh S, Rathnayake T, Vig S, Granger BE, +> Muller RP, Bonazzi F, Gupta H, Vats S, Johansson F, Pedregosa F, Curry +> MJ, Terrel AR, Roučka Š, Saboo A, Fernando I, Kulal S, Cimrman R, +> Scopatz A. (2017) SymPy: symbolic computing in Python. *PeerJ Computer +> Science* 3:e103 + +A BibTeX entry for LaTeX users is + +``` bibtex +@article{10.7717/peerj-cs.103, + title = {SymPy: symbolic computing in Python}, + author = {Meurer, Aaron and Smith, Christopher P. and Paprocki, Mateusz and \v{C}ert\'{i}k, Ond\v{r}ej and Kirpichev, Sergey B. and Rocklin, Matthew and Kumar, Amit and Ivanov, Sergiu and Moore, Jason K. and Singh, Sartaj and Rathnayake, Thilina and Vig, Sean and Granger, Brian E. and Muller, Richard P. and Bonazzi, Francesco and Gupta, Harsh and Vats, Shivam and Johansson, Fredrik and Pedregosa, Fabian and Curry, Matthew J. and Terrel, Andy R. and Rou\v{c}ka, \v{S}t\v{e}p\'{a}n and Saboo, Ashutosh and Fernando, Isuru and Kulal, Sumith and Cimrman, Robert and Scopatz, Anthony}, + year = 2017, + month = Jan, + keywords = {Python, Computer algebra system, Symbolics}, + abstract = { + SymPy is an open-source computer algebra system written in pure Python. It is built with a focus on extensibility and ease of use, through both interactive and programmatic applications. These characteristics have led SymPy to become a popular symbolic library for the scientific Python ecosystem. This paper presents the architecture of SymPy, a description of its features, and a discussion of select submodules. The supplementary material provides additional examples and further outlines details of the architecture and features of SymPy. + }, + volume = 3, + pages = {e103}, + journal = {PeerJ Computer Science}, + issn = {2376-5992}, + url = {https://doi.org/10.7717/peerj-cs.103}, + doi = {10.7717/peerj-cs.103} +} +``` + +SymPy is BSD licensed, so you are free to use it whatever you like, be +it academic, commercial, creating forks or derivatives, as long as you +copy the BSD statement if you redistribute it (see the LICENSE file for +details). That said, although not required by the SymPy license, if it +is convenient for you, please cite SymPy when using it in your work and +also consider contributing all your changes back, so that we can +incorporate it and all of us will benefit in the end. diff --git a/venv/lib/python3.10/site-packages/sympy-1.12.dist-info/RECORD b/venv/lib/python3.10/site-packages/sympy-1.12.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..1e7bef64eda8c3a887ab68025effba698194066c --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy-1.12.dist-info/RECORD @@ -0,0 +1,2932 @@ +../../../bin/isympy,sha256=d2bVl7ayZb1x2IeBT0eEd7kWBJ39tgMY4YabhXnKCPQ,234 +../../../share/man/man1/isympy.1,sha256=9DZdSOIQLikrATHlbkdDZ04LBQigZDUE0_oCXBDvdBs,6659 +__pycache__/isympy.cpython-310.pyc,, +isympy.py,sha256=gAoHa7OM0y9G5IBO7wO-uTpD-CPnd6sbmjJ_GGB0yzg,11207 +sympy-1.12.dist-info/AUTHORS,sha256=wlSBGC-YWljenH44cUwI510RfR4iTZamMi_aKjJwpUU,48572 +sympy-1.12.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +sympy-1.12.dist-info/LICENSE,sha256=B6XpgZ9ye0mGrSgpx6KaYyDUJXX3IOsk1xt_71c6AoY,7885 +sympy-1.12.dist-info/METADATA,sha256=PsPCJVJrEv6F-QpnHbsxepSvVwxvt2rx2RmuTXXrJqY,12577 +sympy-1.12.dist-info/RECORD,, +sympy-1.12.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92 +sympy-1.12.dist-info/entry_points.txt,sha256=Sp-vLJom4PRlhGfY6RpUre7SjYm33JNq9NCwCGeW-fQ,39 +sympy-1.12.dist-info/top_level.txt,sha256=elXb5xfjLdjgSSoQFk4_2Qu3lp2CIaglF9MQtfIoH7o,13 +sympy/__init__.py,sha256=85o5Yfq2EeAiES9e85A0ZD6n9GvrpanvEdUeu-V5e2w,29005 +sympy/__pycache__/__init__.cpython-310.pyc,, +sympy/__pycache__/abc.cpython-310.pyc,, +sympy/__pycache__/conftest.cpython-310.pyc,, +sympy/__pycache__/galgebra.cpython-310.pyc,, +sympy/__pycache__/release.cpython-310.pyc,, +sympy/__pycache__/this.cpython-310.pyc,, +sympy/abc.py,sha256=P1iQKfXl7Iut6Z5Y97QmGr_UqiAZ6qR-eoRMtYacGfA,3748 +sympy/algebras/__init__.py,sha256=7PRGOW30nlMOTeUPR7iy8l5xGoE2yCBEfRbjqDKWOgU,62 +sympy/algebras/__pycache__/__init__.cpython-310.pyc,, +sympy/algebras/__pycache__/quaternion.cpython-310.pyc,, +sympy/algebras/quaternion.py,sha256=RjAU_1jKNq7LQl4Iuf0BhQ2NtbbCOL3Ytyr_PPjxxlQ,47563 +sympy/algebras/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/algebras/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/algebras/tests/__pycache__/test_quaternion.cpython-310.pyc,, +sympy/algebras/tests/test_quaternion.py,sha256=WTnJxcMkapyNR4QYJFisbwc2kStw2ZYQuEV3hNalhYE,15921 +sympy/assumptions/__init__.py,sha256=PFS8djTqiNbGVMjg7PaPjEfwmjyZVfioXiRVzqqA3E0,550 +sympy/assumptions/__pycache__/__init__.cpython-310.pyc,, +sympy/assumptions/__pycache__/ask.cpython-310.pyc,, +sympy/assumptions/__pycache__/ask_generated.cpython-310.pyc,, +sympy/assumptions/__pycache__/assume.cpython-310.pyc,, +sympy/assumptions/__pycache__/cnf.cpython-310.pyc,, +sympy/assumptions/__pycache__/facts.cpython-310.pyc,, +sympy/assumptions/__pycache__/refine.cpython-310.pyc,, +sympy/assumptions/__pycache__/satask.cpython-310.pyc,, +sympy/assumptions/__pycache__/sathandlers.cpython-310.pyc,, +sympy/assumptions/__pycache__/wrapper.cpython-310.pyc,, +sympy/assumptions/ask.py,sha256=MQZg3JiVEvaZuzMlOUeXjPLuAQlhb5-QNDU8Mw5mNnI,18800 +sympy/assumptions/ask_generated.py,sha256=DSsSGSwjV0K3ASMvWvatFEXviYKXR-1xPwySPsLL-c4,17083 +sympy/assumptions/assume.py,sha256=_gcFc4h_YGs9-tshoD0gmLl_RtPivDQWMWhWWLX9seo,14606 +sympy/assumptions/cnf.py,sha256=axPy2EMLHkIX83_kcsKoRFlpq3x_0YxOEjzt7FHgxc4,12706 +sympy/assumptions/facts.py,sha256=q0SDVbzmU46_8mf63Uao5pYE4MgyrhR9vn94QJqQSv8,7609 +sympy/assumptions/handlers/__init__.py,sha256=lvjAfPdz0MDjTxjuzbBSGBco2OmpZRiGixSG0oaiZi0,330 +sympy/assumptions/handlers/__pycache__/__init__.cpython-310.pyc,, +sympy/assumptions/handlers/__pycache__/calculus.cpython-310.pyc,, +sympy/assumptions/handlers/__pycache__/common.cpython-310.pyc,, +sympy/assumptions/handlers/__pycache__/matrices.cpython-310.pyc,, +sympy/assumptions/handlers/__pycache__/ntheory.cpython-310.pyc,, +sympy/assumptions/handlers/__pycache__/order.cpython-310.pyc,, +sympy/assumptions/handlers/__pycache__/sets.cpython-310.pyc,, +sympy/assumptions/handlers/calculus.py,sha256=ul36wLjxrU_LUxEWX63dWklWHgHWw5xVT0d7BkZCdFE,7198 +sympy/assumptions/handlers/common.py,sha256=sW_viw2xdO9Klqf31x3YlYcGlhgRj52HV1JFmwrgtb4,4064 +sympy/assumptions/handlers/matrices.py,sha256=Gdauk2xk1hKPRr4i6RpvOMHtDnyVD34x1OyhL-Oh8Hc,22321 +sympy/assumptions/handlers/ntheory.py,sha256=2i-EhgO9q1LfDLzN3BZVzHNfaXSsce131XtBr5TEh2I,7213 +sympy/assumptions/handlers/order.py,sha256=Y6Txiykbj4gkibX0mrcUUlhtRWE27p-4lpG4WACX3Ik,12222 +sympy/assumptions/handlers/sets.py,sha256=2Jh2G6Ce1qz9Imzv5et_v-sMxY62j3rFdnp1UZ_PGB8,23818 +sympy/assumptions/predicates/__init__.py,sha256=q1C7iWpvdDymEUZNyzJvZLsLtgwSkYtCixME-fYyIDw,110 +sympy/assumptions/predicates/__pycache__/__init__.cpython-310.pyc,, +sympy/assumptions/predicates/__pycache__/calculus.cpython-310.pyc,, +sympy/assumptions/predicates/__pycache__/common.cpython-310.pyc,, +sympy/assumptions/predicates/__pycache__/matrices.cpython-310.pyc,, +sympy/assumptions/predicates/__pycache__/ntheory.cpython-310.pyc,, +sympy/assumptions/predicates/__pycache__/order.cpython-310.pyc,, +sympy/assumptions/predicates/__pycache__/sets.cpython-310.pyc,, +sympy/assumptions/predicates/calculus.py,sha256=vFnlYVYZVd6D9OwA7-3bDK_Q0jf2iCZCZiMlWenw0Vg,1889 +sympy/assumptions/predicates/common.py,sha256=zpByACpa_tF0nVNB0J_rJehnXkHtkxhchn1DvkVVS-s,2279 +sympy/assumptions/predicates/matrices.py,sha256=X3vbkEf3zwJLyanEjf6ijYXuRfFfSv-yatl1tJ25wDk,12142 +sympy/assumptions/predicates/ntheory.py,sha256=wvFNFSf0S4egbY7REw0V0ANC03CuiRU9PLmdi16VfHo,2546 +sympy/assumptions/predicates/order.py,sha256=ZI4u_WfusMPAEsMFawkSN9QvaMwI3-Jt3-U_xIcGl_8,9508 +sympy/assumptions/predicates/sets.py,sha256=anp-DeJaU2nun3K4O71G_fbqpETozSKynRGuLhiO8xI,8937 +sympy/assumptions/refine.py,sha256=GlC16HC3VNtCHFZNul1tnDCNPy-iOPKZBGjpTbTlbh4,11950 +sympy/assumptions/relation/__init__.py,sha256=t2tZNEIK7w-xXshRQIRL8tIyiNe1W5fMhN7QNRPnQFo,261 +sympy/assumptions/relation/__pycache__/__init__.cpython-310.pyc,, +sympy/assumptions/relation/__pycache__/binrel.cpython-310.pyc,, +sympy/assumptions/relation/__pycache__/equality.cpython-310.pyc,, +sympy/assumptions/relation/binrel.py,sha256=3iwnSEE53-vRsPv-bOnjydgOkCpbB12FTFR_sQ3CwvE,6313 +sympy/assumptions/relation/equality.py,sha256=RbwztgBBVlnfc9-M-IYKonybITSr8WdqWQqwlp2j3V8,7160 +sympy/assumptions/satask.py,sha256=ld_ZWQlxh9R3ElMUBjnqVfwEJ2irPYtJ6vV5mWdzSs0,11280 +sympy/assumptions/sathandlers.py,sha256=Uu_ur8XtxUH5uaAlfGQHEyx2S1-3Q00EFmezDYaGxT0,9428 +sympy/assumptions/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/assumptions/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/assumptions/tests/__pycache__/test_assumptions_2.cpython-310.pyc,, +sympy/assumptions/tests/__pycache__/test_context.cpython-310.pyc,, +sympy/assumptions/tests/__pycache__/test_matrices.cpython-310.pyc,, +sympy/assumptions/tests/__pycache__/test_query.cpython-310.pyc,, +sympy/assumptions/tests/__pycache__/test_refine.cpython-310.pyc,, +sympy/assumptions/tests/__pycache__/test_satask.cpython-310.pyc,, +sympy/assumptions/tests/__pycache__/test_sathandlers.cpython-310.pyc,, +sympy/assumptions/tests/__pycache__/test_wrapper.cpython-310.pyc,, +sympy/assumptions/tests/test_assumptions_2.py,sha256=oNgIDOoW-GpBbXxbtw05SWnE8I7sGislYmB3MDogwB4,1070 +sympy/assumptions/tests/test_context.py,sha256=I5gES7AY9_vz1-CEaCchy4MXABtX85ncNkvoRuLskG8,1153 +sympy/assumptions/tests/test_matrices.py,sha256=nzSofuawc18hNe9Nj0dN_lTeDwa2KbPjt4K2rvb3xmw,12258 +sympy/assumptions/tests/test_query.py,sha256=teHsXTfPw_q4197tXcz2Ov-scVxDHP-T_LpcELmOMnI,97999 +sympy/assumptions/tests/test_refine.py,sha256=bHxYUnCOEIzA1yPU3B2xbU9JZfhDv6RkmPm8esetisQ,8834 +sympy/assumptions/tests/test_satask.py,sha256=IIqqIxzkLfANpTNBKEsCGCp3Bm8zmDnYd23woqKh9EE,15741 +sympy/assumptions/tests/test_sathandlers.py,sha256=jMCZQb3G6pVQ5MHaSTWV_0eULHaCF8Mowu12Ll72rgs,1842 +sympy/assumptions/tests/test_wrapper.py,sha256=iE32j83rrerCz85HHt2hTolgJkqb44KddfEpI3H1Fb8,1159 +sympy/assumptions/wrapper.py,sha256=nZ3StKi-Q0q_HmdwpzZEcE7WQFcVtnB28QBvYe_O220,5514 +sympy/benchmarks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/benchmarks/__pycache__/__init__.cpython-310.pyc,, +sympy/benchmarks/__pycache__/bench_discrete_log.cpython-310.pyc,, +sympy/benchmarks/__pycache__/bench_meijerint.cpython-310.pyc,, +sympy/benchmarks/__pycache__/bench_symbench.cpython-310.pyc,, +sympy/benchmarks/bench_discrete_log.py,sha256=CNchIJ5HFMPpNlVZh2vOU0GgQ3bse6hqyqDovpDHlKE,2473 +sympy/benchmarks/bench_meijerint.py,sha256=dSNdZhoc8a4h50wRtbOxLwpmgUiuMFpe6ytTLURcplY,11610 +sympy/benchmarks/bench_symbench.py,sha256=UMD3eYf_Poht0qxjdH2_axGwwON6cZo1Sp700Ci1M1M,2997 +sympy/calculus/__init__.py,sha256=IWDc6qPbEcWyTm9QM6V8vSAs-5OtGNijimykoWz3Clc,828 +sympy/calculus/__pycache__/__init__.cpython-310.pyc,, +sympy/calculus/__pycache__/accumulationbounds.cpython-310.pyc,, +sympy/calculus/__pycache__/euler.cpython-310.pyc,, +sympy/calculus/__pycache__/finite_diff.cpython-310.pyc,, +sympy/calculus/__pycache__/singularities.cpython-310.pyc,, +sympy/calculus/__pycache__/util.cpython-310.pyc,, +sympy/calculus/accumulationbounds.py,sha256=DpFXDYbjSxx0icrx1HagArBeyVx5aSAX83vYuXSGMRI,28692 +sympy/calculus/euler.py,sha256=0QrHD9TYKlSZuO8drnU3bUFJrSu8v5SncqtkRSWLjGM,3436 +sympy/calculus/finite_diff.py,sha256=X7qZJ5GmHlHKokUUMFoaQqrqX2jLRq4b7W2G5aWntzM,17053 +sympy/calculus/singularities.py,sha256=ctVHpnE4Z7iE6tNAssMWmdXu9qWXOXzVJasLxC-cToQ,11757 +sympy/calculus/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/calculus/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/calculus/tests/__pycache__/test_accumulationbounds.cpython-310.pyc,, +sympy/calculus/tests/__pycache__/test_euler.cpython-310.pyc,, +sympy/calculus/tests/__pycache__/test_finite_diff.cpython-310.pyc,, +sympy/calculus/tests/__pycache__/test_singularities.cpython-310.pyc,, +sympy/calculus/tests/__pycache__/test_util.cpython-310.pyc,, +sympy/calculus/tests/test_accumulationbounds.py,sha256=a_Ry2nKX5WbhSe1Bk2k0W6-VWOpVTg0FnA9u8rNSIV4,11195 +sympy/calculus/tests/test_euler.py,sha256=YWpts4pWSiYEwRsi5DLQ16JgC9109-9NKZIL_IO6_Aw,2683 +sympy/calculus/tests/test_finite_diff.py,sha256=V52uNDNvarcK_FXnWrPZjifFMRWTy_2H4lt3FmvA4W4,7760 +sympy/calculus/tests/test_singularities.py,sha256=zVCHJyjVFw9xpQ_EFCsA33zBGwCQ8gSeLtbLGA9t0uQ,4215 +sympy/calculus/tests/test_util.py,sha256=S5_YEGW0z7xzzthShrSsg2wAmzE9mR4u4Ndzuzw_Gx8,15034 +sympy/calculus/util.py,sha256=ViXMvleQIIStquHN01CpTUPYxu3jgC57GaCOkuXRsoU,26097 +sympy/categories/__init__.py,sha256=XiKBVC6pbDED-OVtNlSH-fGB8dB_jWLqwCEO7wBTAyA,984 +sympy/categories/__pycache__/__init__.cpython-310.pyc,, +sympy/categories/__pycache__/baseclasses.cpython-310.pyc,, +sympy/categories/__pycache__/diagram_drawing.cpython-310.pyc,, +sympy/categories/baseclasses.py,sha256=G3wCiNCgNiTLLFZxGLd2ZFmnsbiRxhapSfZWlWSC508,31411 +sympy/categories/diagram_drawing.py,sha256=W88A89uDs8qKZlxVLqWuqmEOBwTMomtl_u8sFe9wqdU,95500 +sympy/categories/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/categories/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/categories/tests/__pycache__/test_baseclasses.cpython-310.pyc,, +sympy/categories/tests/__pycache__/test_drawing.cpython-310.pyc,, +sympy/categories/tests/test_baseclasses.py,sha256=SwD6QsfSlrEdpD2dbkcN62CPVIRP5SadjCplLrMAoa8,5767 +sympy/categories/tests/test_drawing.py,sha256=IELPpadmnQyQ2x5a5qHC8ioq5kfT1UnAl4h1vO3gbqg,27848 +sympy/codegen/__init__.py,sha256=sQcJsyLyoRh9ccOPhv2eZ-wHjQrArByOON9ndj-MYgQ,974 +sympy/codegen/__pycache__/__init__.cpython-310.pyc,, +sympy/codegen/__pycache__/abstract_nodes.cpython-310.pyc,, +sympy/codegen/__pycache__/algorithms.cpython-310.pyc,, +sympy/codegen/__pycache__/approximations.cpython-310.pyc,, +sympy/codegen/__pycache__/ast.cpython-310.pyc,, +sympy/codegen/__pycache__/cfunctions.cpython-310.pyc,, +sympy/codegen/__pycache__/cnodes.cpython-310.pyc,, +sympy/codegen/__pycache__/cutils.cpython-310.pyc,, +sympy/codegen/__pycache__/cxxnodes.cpython-310.pyc,, +sympy/codegen/__pycache__/fnodes.cpython-310.pyc,, +sympy/codegen/__pycache__/futils.cpython-310.pyc,, +sympy/codegen/__pycache__/matrix_nodes.cpython-310.pyc,, +sympy/codegen/__pycache__/numpy_nodes.cpython-310.pyc,, +sympy/codegen/__pycache__/pynodes.cpython-310.pyc,, +sympy/codegen/__pycache__/pyutils.cpython-310.pyc,, +sympy/codegen/__pycache__/rewriting.cpython-310.pyc,, +sympy/codegen/__pycache__/scipy_nodes.cpython-310.pyc,, +sympy/codegen/abstract_nodes.py,sha256=TY4ecftqnym5viYInnb59zGPPFXdeSGQwi--xTz6Pvo,490 +sympy/codegen/algorithms.py,sha256=_isSQBzQzn1xKkYhYEF7nVK1sCa7n78Qo5AoCeNs8eU,5056 +sympy/codegen/approximations.py,sha256=UnVbikz2vjJo8DtE02ipa6ZEsCe5lXOT_r16F5ByW4Q,6447 +sympy/codegen/ast.py,sha256=tBRSHBvDz4_Z_FiFy1d48x1URHPtAVCJUiwQihpc5zA,56374 +sympy/codegen/cfunctions.py,sha256=SGLPIMgGE9o9RhaThTgVcmnFCKbxNZvukqp3uvqv0Vw,11812 +sympy/codegen/cnodes.py,sha256=ZFBxHsRBUcQ14EJRURZXh9EjTsSSJGwmWubfmpE0-p4,2823 +sympy/codegen/cutils.py,sha256=vlzMs8OkC5Bu4sIP-AF2mYf_tIo7Uo4r2DAI_LNhZzM,383 +sympy/codegen/cxxnodes.py,sha256=Om-EBfYduFF97tgXOF68rr8zYbngem9kBRm9SJiKLSM,342 +sympy/codegen/fnodes.py,sha256=P7I-TD-4H4Dr4bxFNS7p46OD9bi32l8SpFEezVWutSY,18931 +sympy/codegen/futils.py,sha256=k-mxMJKr_Q_afTy6NrKNl_N2XQLBmSdZAssO5hBonNY,1792 +sympy/codegen/matrix_nodes.py,sha256=Hhip0cbBj27i-4JwVinkEt4PHRbAIe5ERxwyywoSJm8,2089 +sympy/codegen/numpy_nodes.py,sha256=23inRIlvAF2wzaJGhi1NUg8R7NRbhtDrqICDZN909jw,3137 +sympy/codegen/pynodes.py,sha256=Neo1gFQ9kC31T-gH8TeeCaDDNaDe5deIP97MRZFgMHk,243 +sympy/codegen/pyutils.py,sha256=HfF6SP710Y7yExZcSesI0usVaDiWdEPEmMtyMD3JtOY,838 +sympy/codegen/rewriting.py,sha256=EeSOC-fawTxFiueMIuMlSFPuES_97hhxC2hjoZ_6pPQ,11591 +sympy/codegen/scipy_nodes.py,sha256=hYlxtGyTM0Z64Nazm1TeMZ3Y8dMsiD_HNhNvbU9eiQY,2508 +sympy/codegen/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/codegen/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_abstract_nodes.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_algorithms.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_applications.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_approximations.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_ast.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_cfunctions.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_cnodes.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_cxxnodes.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_fnodes.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_numpy_nodes.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_pynodes.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_pyutils.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_rewriting.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_scipy_nodes.cpython-310.pyc,, +sympy/codegen/tests/test_abstract_nodes.py,sha256=a_GKf3FpeNN8zfMc-V8AaSrQtEI1oiLfJOco2VKiSKI,451 +sympy/codegen/tests/test_algorithms.py,sha256=gvDTHZnC_lZ4Uvt7BTSfjMuDTyM0Bilm-sWMUpSM06I,4700 +sympy/codegen/tests/test_applications.py,sha256=DWDpSsiVQy7S6pjnBSErWxDpPDRRLL8ncTMWWwaI3R4,2189 +sympy/codegen/tests/test_approximations.py,sha256=SZpOUzahb_bJOceD0DLdmeiw-jN37OPmf5TRp1dyRgM,2035 +sympy/codegen/tests/test_ast.py,sha256=aAWk-yAVVNAmFMkyUlYBbVA8mPlTFqULOtmXMEi3LO8,21688 +sympy/codegen/tests/test_cfunctions.py,sha256=EuRwj9U00iLc2--qtY2YD7TpICndQ0gVsCXTYHrIFhQ,4613 +sympy/codegen/tests/test_cnodes.py,sha256=FlI5XP39K3kC1QWKQ-QKkzNQw8TROjj5mKXJhK1UU2c,3039 +sympy/codegen/tests/test_cxxnodes.py,sha256=5OwN8D_ZtKN9z5uNeUwbUkyAGzNLrTgIKUlcRWmOSpE,366 +sympy/codegen/tests/test_fnodes.py,sha256=r206n8YM0D1vFP0vdjUaAR7QRpmUWw8VmqSMFxh8FU8,6643 +sympy/codegen/tests/test_numpy_nodes.py,sha256=VcG7eGVlzx9sSKRp1n9zfK0NjigxY5WOW6F_nQnnnSs,1658 +sympy/codegen/tests/test_pynodes.py,sha256=Gso18KKzSwA-1AHC55SgHPAfH1GrGUCGaN6QR7iuEO0,432 +sympy/codegen/tests/test_pyutils.py,sha256=jr5QGvUP0M1Rr2_7vHTazlMaJOoMHztqFTxT6EkBcb4,285 +sympy/codegen/tests/test_rewriting.py,sha256=ELPziNI3CsJ4VS7mUbk4QWyG_94FbgZCdBKieMN20Vc,15852 +sympy/codegen/tests/test_scipy_nodes.py,sha256=LBWpjTRfgWN5NLTchLZEp6m7IMtu7HbiKoztLc6KNGY,1495 +sympy/combinatorics/__init__.py,sha256=Dx9xakpHuTIgy4G8zVjAY6pTu8J9_K3d_jKPizRMdVo,1500 +sympy/combinatorics/__pycache__/__init__.cpython-310.pyc,, +sympy/combinatorics/__pycache__/coset_table.cpython-310.pyc,, +sympy/combinatorics/__pycache__/fp_groups.cpython-310.pyc,, +sympy/combinatorics/__pycache__/free_groups.cpython-310.pyc,, +sympy/combinatorics/__pycache__/galois.cpython-310.pyc,, +sympy/combinatorics/__pycache__/generators.cpython-310.pyc,, +sympy/combinatorics/__pycache__/graycode.cpython-310.pyc,, +sympy/combinatorics/__pycache__/group_constructs.cpython-310.pyc,, +sympy/combinatorics/__pycache__/group_numbers.cpython-310.pyc,, +sympy/combinatorics/__pycache__/homomorphisms.cpython-310.pyc,, +sympy/combinatorics/__pycache__/named_groups.cpython-310.pyc,, +sympy/combinatorics/__pycache__/partitions.cpython-310.pyc,, +sympy/combinatorics/__pycache__/pc_groups.cpython-310.pyc,, +sympy/combinatorics/__pycache__/perm_groups.cpython-310.pyc,, +sympy/combinatorics/__pycache__/permutations.cpython-310.pyc,, +sympy/combinatorics/__pycache__/polyhedron.cpython-310.pyc,, +sympy/combinatorics/__pycache__/prufer.cpython-310.pyc,, +sympy/combinatorics/__pycache__/rewritingsystem.cpython-310.pyc,, +sympy/combinatorics/__pycache__/rewritingsystem_fsm.cpython-310.pyc,, +sympy/combinatorics/__pycache__/schur_number.cpython-310.pyc,, +sympy/combinatorics/__pycache__/subsets.cpython-310.pyc,, +sympy/combinatorics/__pycache__/tensor_can.cpython-310.pyc,, +sympy/combinatorics/__pycache__/testutil.cpython-310.pyc,, +sympy/combinatorics/__pycache__/util.cpython-310.pyc,, +sympy/combinatorics/coset_table.py,sha256=A3O5l1tkFmF1mEqiab08eBcR6lAdiqKJ2uPao3Ucvlk,42935 +sympy/combinatorics/fp_groups.py,sha256=QjeCEGBfTBbMZd-WpCOY5iEUyt8O7eJXa3RDLfMC7wk,47800 +sympy/combinatorics/free_groups.py,sha256=OnsEnMF6eehIFdM5m7RHkc9R_LFIahGJL3bAEv1pR6k,39534 +sympy/combinatorics/galois.py,sha256=0kz71xGJDKgJm-9dXr4YTMkfaHPowCUImpK9x-n3VNU,17863 +sympy/combinatorics/generators.py,sha256=vUIe0FgHGVFA5omJH-qHQP6NmqmnuVVV8n2RFnpTrKc,7481 +sympy/combinatorics/graycode.py,sha256=xbtr8AaFYb4SMmwUi7mf7913U87jH-XEYF_3pGZfj0o,11207 +sympy/combinatorics/group_constructs.py,sha256=IKx12_yWJqEQ7g-oBuAWd5VRLbCOWyL0LG4PQu43BS8,2021 +sympy/combinatorics/group_numbers.py,sha256=QuB-EvXmTulg5MuI4aLE3GlmFNTGKulAP-DQW9TBXU4,3073 +sympy/combinatorics/homomorphisms.py,sha256=s8bzIv4liVXwqJT2IuYPseQW4MBW2-zDpdHUXQsf7dU,18828 +sympy/combinatorics/named_groups.py,sha256=zd_C9epKDrMG0drafGUcHuuJJkcMaDt1Nf2ik4NXNq8,8378 +sympy/combinatorics/partitions.py,sha256=ZXqVmVNjmauhMeiTWtCCqOP38b9MJg7UlBdZa-7aICQ,20841 +sympy/combinatorics/pc_groups.py,sha256=IROCLM63p4ATazWsK9qRxmx8bZjoMhWxOrTm0Q5RRpo,21351 +sympy/combinatorics/perm_groups.py,sha256=mhAE82DSVM7x2YoS4ADdwLoWxzuGLVOjeaVGJnz9EY8,185087 +sympy/combinatorics/permutations.py,sha256=2f63LyIytpdDUbPyv44DqcGUJxtbfMEJFpyGuSq4xoY,87647 +sympy/combinatorics/polyhedron.py,sha256=OYRMNVwTxT97p4sG4EScl4a2QnBIvyutIPFBzxAfCLU,35942 +sympy/combinatorics/prufer.py,sha256=v-lHZN2ZhjOTS3_jLjw44Q9F7suS3VdgXThh1Sg6CRI,12086 +sympy/combinatorics/rewritingsystem.py,sha256=XTQUZpLIr6H1UBLao_ni1UAoIMB8V5Bpfp8BBCV9g5c,17097 +sympy/combinatorics/rewritingsystem_fsm.py,sha256=CKGhLqyvxY0mlmy8_Hb4WzkSdWYPUaU2yZYhz-0iZ5w,2433 +sympy/combinatorics/schur_number.py,sha256=YdsyA7n_z9tyfRTSRfIjEjtnGo5EuDGBMUS09AQ2MxU,4437 +sympy/combinatorics/subsets.py,sha256=oxuExuGyFnvunkmktl-vBYiLbiN66A2Q2MyzwWfy46A,16047 +sympy/combinatorics/tensor_can.py,sha256=h6NTaH99oG0g1lVxhShBY2Fc4IwXyMUc0Ih31KI6kFw,40776 +sympy/combinatorics/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/combinatorics/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_coset_table.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_fp_groups.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_free_groups.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_galois.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_generators.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_graycode.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_group_constructs.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_group_numbers.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_homomorphisms.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_named_groups.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_partitions.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_pc_groups.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_perm_groups.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_permutations.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_polyhedron.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_prufer.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_rewriting.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_schur_number.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_subsets.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_tensor_can.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_testutil.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_util.cpython-310.pyc,, +sympy/combinatorics/tests/test_coset_table.py,sha256=cEUF0OH6SNhN_kh069wMsq6h4eSVqbDLghrg2r9Ht48,28474 +sympy/combinatorics/tests/test_fp_groups.py,sha256=7ATMwzPvAoWiH7Cex-D63nmlOa20h70zO5TWGVisFwM,9969 +sympy/combinatorics/tests/test_free_groups.py,sha256=h3tPyjMA79M9QMc0rOlgVXU31lZ0s_xoY_YIVsVz0Fg,6161 +sympy/combinatorics/tests/test_galois.py,sha256=w35JRx8lmlXCdzUBNdocgATPYWBOEZ6LH-tAxOPwCQ8,2763 +sympy/combinatorics/tests/test_generators.py,sha256=6YpOp0i5PRGtySPNZseQ8mjSXbwpfGfz0hDB4kfk40Q,3567 +sympy/combinatorics/tests/test_graycode.py,sha256=pI4e7Y615d5Bmmxui6fdEeyca6j6KSD0YmeychV6ORk,2800 +sympy/combinatorics/tests/test_group_constructs.py,sha256=jJLwMdhuUalKv4Aql9SzV2utK8Ex-IYdMecggr95pi8,450 +sympy/combinatorics/tests/test_group_numbers.py,sha256=nRxK4R8Cdq4Ni9e_6n4fRjir3VBOmXMzAIXnlRNQD3Y,989 +sympy/combinatorics/tests/test_homomorphisms.py,sha256=UwBj5loCuZAiuvmqy5VAbwhCQTph8o6BzTaGrH0rzB4,3745 +sympy/combinatorics/tests/test_named_groups.py,sha256=tsuDVGv4iHGEZ0BVR87_ENhyAfZvFIl0M6Dv_HX1VoY,1931 +sympy/combinatorics/tests/test_partitions.py,sha256=oppszKJLLSpcEzHgespIveSmEC3fDZ0qkus1k7MBt4E,4097 +sympy/combinatorics/tests/test_pc_groups.py,sha256=wfkY_ilpG0XWrhaWMVK6r7yWMeXfM8WNTyti5oE9bdk,2728 +sympy/combinatorics/tests/test_perm_groups.py,sha256=t-bERPQXU4pKAEHR3caHemGMnQ2qh9leIOz0-hB8vjo,41191 +sympy/combinatorics/tests/test_permutations.py,sha256=IfOxSCY18glt_8lqovnjtXyz9OX02ZQaUE47aCUzKIA,20149 +sympy/combinatorics/tests/test_polyhedron.py,sha256=3SWkFQKeF-p1QWP4Iu9NIA1oTxAFo1BLRrrLerBFAhw,4180 +sympy/combinatorics/tests/test_prufer.py,sha256=OTJp0NxjiVswWkOuCIlnGFU2Gw4noRsrPpUJtp2XhEs,2649 +sympy/combinatorics/tests/test_rewriting.py,sha256=3COHq74k6knt2rqE7hfd4ZP_6whf0Kg14tYxFmTtYrI,1787 +sympy/combinatorics/tests/test_schur_number.py,sha256=wg13uTumFltWIGbVg_PEr6nhXIru19UWitsEZiakoRI,1727 +sympy/combinatorics/tests/test_subsets.py,sha256=6pyhLYV5HuXvx63r-gGVHr8LSrGRXcpDudhFn9fBqX8,2635 +sympy/combinatorics/tests/test_tensor_can.py,sha256=olH5D5wwTBOkZXjtqvLO6RKbvCG9KoMVK4__wDe95N4,24676 +sympy/combinatorics/tests/test_testutil.py,sha256=uJlO09XgD-tImCWu1qkajiC07rK3GoN91v3_OqT5-qo,1729 +sympy/combinatorics/tests/test_util.py,sha256=sOYMWHxlbM0mqalqA7jNrYMm8DKcf_GwL5YBjs96_C4,4499 +sympy/combinatorics/testutil.py,sha256=Nw0En7kI9GMjca287aht1HNaTjBFv8ulq0E1rgtpO6Q,11152 +sympy/combinatorics/util.py,sha256=LIu_8__RKMv8EfXAfkr08UKYSMq5hGJBLHyDSS5nd-8,16297 +sympy/concrete/__init__.py,sha256=2HDmg3VyLgM_ZPw3XsGpkOClGiQnyTlUNHSwVTtizA0,144 +sympy/concrete/__pycache__/__init__.cpython-310.pyc,, +sympy/concrete/__pycache__/delta.cpython-310.pyc,, +sympy/concrete/__pycache__/expr_with_intlimits.cpython-310.pyc,, +sympy/concrete/__pycache__/expr_with_limits.cpython-310.pyc,, +sympy/concrete/__pycache__/gosper.cpython-310.pyc,, +sympy/concrete/__pycache__/guess.cpython-310.pyc,, +sympy/concrete/__pycache__/products.cpython-310.pyc,, +sympy/concrete/__pycache__/summations.cpython-310.pyc,, +sympy/concrete/delta.py,sha256=xDtz1yXnd-WRIu3nnJFBIrA01PLOUT3XU1znPeVATU0,9958 +sympy/concrete/expr_with_intlimits.py,sha256=vj4PjttB9xE5aUYu37R1A4_KtGgxcPa65jzjv8-krsc,11352 +sympy/concrete/expr_with_limits.py,sha256=txn7gbh-Yqw0-ZBGvN9iFNsPW13wD2z7alf8EyQVZ4U,21832 +sympy/concrete/gosper.py,sha256=3q8gkZz_oAeBOBUfObMvwArBkBKYReHR0prVXMIqrNE,5557 +sympy/concrete/guess.py,sha256=Ha12uphLNfo3AbfsGy85JsPxhbiAXJemwpz9QXRtp48,17472 +sympy/concrete/products.py,sha256=s6E_Z0KuHx8MzbJzaJo2NP5aTpgIo3-oqGwgYh_osnE,18608 +sympy/concrete/summations.py,sha256=jhmU5WCz98Oon3oosHUsM8sp6ErjPGCz25rbKn5hqS8,55371 +sympy/concrete/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/concrete/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/concrete/tests/__pycache__/test_delta.cpython-310.pyc,, +sympy/concrete/tests/__pycache__/test_gosper.cpython-310.pyc,, +sympy/concrete/tests/__pycache__/test_guess.cpython-310.pyc,, +sympy/concrete/tests/__pycache__/test_products.cpython-310.pyc,, +sympy/concrete/tests/__pycache__/test_sums_products.cpython-310.pyc,, +sympy/concrete/tests/test_delta.py,sha256=uI7xjMx7JuVb3kkN7cLR6_pGsKS4Ulq22p-Z9oti5Jc,23869 +sympy/concrete/tests/test_gosper.py,sha256=ZHiZfYGCeCS9I-0oqN6sFbiYa-284GeFoGsNbhIWq4I,7987 +sympy/concrete/tests/test_guess.py,sha256=TPW6Hy11Po6VLZG_dx95x3sMBYl5kcQH8wjJ6TOtu-k,3370 +sympy/concrete/tests/test_products.py,sha256=caYc-xlEIrX9I_A-KPQdwp5oDprVJSbfcOaKg_qUnsM,14521 +sympy/concrete/tests/test_sums_products.py,sha256=0ti3g4D8hBpvpsSrc2CYIRxVwqLORKO5K88offDwKfM,64458 +sympy/conftest.py,sha256=3vg-GlDw8Y8MGoa324FoRJR3HaRaJhZpiXdTTVoNAoI,2245 +sympy/core/__init__.py,sha256=LQBkB1S-CYmQ3P24ei_kHcsMwtbDobn3BqzJQ-rJ1Hs,3050 +sympy/core/__pycache__/__init__.cpython-310.pyc,, +sympy/core/__pycache__/_print_helpers.cpython-310.pyc,, +sympy/core/__pycache__/add.cpython-310.pyc,, +sympy/core/__pycache__/alphabets.cpython-310.pyc,, +sympy/core/__pycache__/assumptions.cpython-310.pyc,, +sympy/core/__pycache__/assumptions_generated.cpython-310.pyc,, +sympy/core/__pycache__/backend.cpython-310.pyc,, +sympy/core/__pycache__/basic.cpython-310.pyc,, +sympy/core/__pycache__/cache.cpython-310.pyc,, +sympy/core/__pycache__/compatibility.cpython-310.pyc,, +sympy/core/__pycache__/containers.cpython-310.pyc,, +sympy/core/__pycache__/core.cpython-310.pyc,, +sympy/core/__pycache__/coreerrors.cpython-310.pyc,, +sympy/core/__pycache__/decorators.cpython-310.pyc,, +sympy/core/__pycache__/evalf.cpython-310.pyc,, +sympy/core/__pycache__/expr.cpython-310.pyc,, +sympy/core/__pycache__/exprtools.cpython-310.pyc,, +sympy/core/__pycache__/facts.cpython-310.pyc,, +sympy/core/__pycache__/function.cpython-310.pyc,, +sympy/core/__pycache__/kind.cpython-310.pyc,, +sympy/core/__pycache__/logic.cpython-310.pyc,, +sympy/core/__pycache__/mod.cpython-310.pyc,, +sympy/core/__pycache__/mul.cpython-310.pyc,, +sympy/core/__pycache__/multidimensional.cpython-310.pyc,, +sympy/core/__pycache__/numbers.cpython-310.pyc,, +sympy/core/__pycache__/operations.cpython-310.pyc,, +sympy/core/__pycache__/parameters.cpython-310.pyc,, +sympy/core/__pycache__/power.cpython-310.pyc,, +sympy/core/__pycache__/random.cpython-310.pyc,, +sympy/core/__pycache__/relational.cpython-310.pyc,, +sympy/core/__pycache__/rules.cpython-310.pyc,, +sympy/core/__pycache__/singleton.cpython-310.pyc,, +sympy/core/__pycache__/sorting.cpython-310.pyc,, +sympy/core/__pycache__/symbol.cpython-310.pyc,, +sympy/core/__pycache__/sympify.cpython-310.pyc,, +sympy/core/__pycache__/trace.cpython-310.pyc,, +sympy/core/__pycache__/traversal.cpython-310.pyc,, +sympy/core/_print_helpers.py,sha256=GQo9dI_BvAJtYHVFFfmroNr0L8d71UeI-tU7SGJgctk,2388 +sympy/core/add.py,sha256=9VDeDODPv3Y72EWa4Xiypy3i67DzbNlPUYAEZXhEwEw,43747 +sympy/core/alphabets.py,sha256=vWBs2atOvfRK6Xfg6hc5IKiB7s_0sZIiVJpcCUJL0N4,266 +sympy/core/assumptions.py,sha256=P7c11DL5VD_94v1Dc5LofIy6Atrth7FZp03rDr4ftQ4,23582 +sympy/core/assumptions_generated.py,sha256=0TJKYIHSIFyQcVHZdIHZ19b7tqst_sY7iZwjKzcvZBM,42817 +sympy/core/backend.py,sha256=AUgGtYmz0mIoVmjKVMAa5ZzlC1p5anxk-N4Sy7pePNo,3842 +sympy/core/basic.py,sha256=1wRiJLAILhJK2uVTAtuxlCFWKXCKT-PECXve4rfXWs0,72857 +sympy/core/benchmarks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/core/benchmarks/__pycache__/__init__.cpython-310.pyc,, +sympy/core/benchmarks/__pycache__/bench_arit.cpython-310.pyc,, +sympy/core/benchmarks/__pycache__/bench_assumptions.cpython-310.pyc,, +sympy/core/benchmarks/__pycache__/bench_basic.cpython-310.pyc,, +sympy/core/benchmarks/__pycache__/bench_expand.cpython-310.pyc,, +sympy/core/benchmarks/__pycache__/bench_numbers.cpython-310.pyc,, +sympy/core/benchmarks/__pycache__/bench_sympify.cpython-310.pyc,, +sympy/core/benchmarks/bench_arit.py,sha256=gfrnvKSXLCaUoFFxMgJhnLUp7rG9Pa_YT7OKgOrPP8E,412 +sympy/core/benchmarks/bench_assumptions.py,sha256=evfZzTgOUUvvvlK0DRdDZQRqxIlGLfJYzKu8QDMxSks,177 +sympy/core/benchmarks/bench_basic.py,sha256=YF0tTJ_AN_Wz11qidzM4bIhlwEhEqVc-IGVGrUx6SaA,210 +sympy/core/benchmarks/bench_expand.py,sha256=xgQYQMwqgXJtKajM4JVhuL-7AW8TLY-vdBpO6uyMDoQ,427 +sympy/core/benchmarks/bench_numbers.py,sha256=fvcbOkslXdADqiX_amiL-BEUtrXBfdiTZeOtbiI2auI,1105 +sympy/core/benchmarks/bench_sympify.py,sha256=G5iGInhhbkkxSY2pS08BNG945m9m4eZlNT1aJutGt5M,138 +sympy/core/cache.py,sha256=AyG7kganyV0jVx-aNBEUFogqRLHQqqFn8xU3ZSfJoaM,6172 +sympy/core/compatibility.py,sha256=XQH7ezmRi6l3R23qMHN2wfA-YMRWbh2YYjPY7LRo3lo,1145 +sympy/core/containers.py,sha256=ic6uSNItz5JgL8Dx8T87gcnpiGwOxvf6FaQVgIRWWoo,11315 +sympy/core/core.py,sha256=3pIrJokfb2Rn8S2XudM3JyQVEqY1vZhSEZ-1tkUmqYg,1797 +sympy/core/coreerrors.py,sha256=OKpJwk_yE3ZMext49R-QwtTudZaXZbmTspaq1ZMMpAU,272 +sympy/core/decorators.py,sha256=de6eYm3D_YdEW1rEKOIES_aEyvbjqRM98I67l8QGGVU,8217 +sympy/core/evalf.py,sha256=HL9frdDL3OXiF08CXISADkmCx7_KjcAt_nYu4m_IKyM,61889 +sympy/core/expr.py,sha256=_lGEDOkQX57uMh275-NGY3Mus6lrQP-cCW_b6xngy_w,142568 +sympy/core/exprtools.py,sha256=mCUxyyQZDSceU7eHPxV3C0mBUWI4a2Qz_LhZxJ5FXY8,51459 +sympy/core/facts.py,sha256=54pFKhJwEzU8LkO7rL25TwGjIb5y5CvZleHEy_TpD68,19546 +sympy/core/function.py,sha256=TuxxpFyc9y5s5dQH3hZnjEovhoZM0nDQNPjfKw5I4ug,115552 +sympy/core/kind.py,sha256=9kQvtDxm-SSRGi-155XsBl_rs-oN_7dw7fNNT3mDu2Q,11540 +sympy/core/logic.py,sha256=Ai2_N-pUmHngJN3usiMTNO6kfLWFVQa3WOet3VhehE8,10865 +sympy/core/mod.py,sha256=survk3e5EyNifVHKpqLZ5NUobFdS0-wEYN4XoUkzMI8,7484 +sympy/core/mul.py,sha256=d7TAZK5YQWT7dsHt84y-2K9Q17FUxi6ilpfgd0GPZ30,78458 +sympy/core/multidimensional.py,sha256=NWX1okybO_nZCl9IhIOE8QYalY1WoC0zlzsvBg_E1eE,4233 +sympy/core/numbers.py,sha256=yNkmRw8ehaQWREJAYv61YP2pGkXy1yAo7ehGrXTVamY,139169 +sympy/core/operations.py,sha256=vasCAsT4aU9XJxfrEGjL-zeVIl2FsI1ktzVtPaJq_0c,25185 +sympy/core/parameters.py,sha256=09LVewtoOyKABQvYeMaJuc-HG7TjJusyT_WMw5NQDDs,3733 +sympy/core/power.py,sha256=WYVmJPNPFsaxeec2D2M_Tb9vUrIG3K8CiAqHca1YVPE,77148 +sympy/core/random.py,sha256=miFdVpNKfutbkpYiIOzG9kVNUm5GTk-_nnmQqUhVDZs,6647 +sympy/core/relational.py,sha256=XcPZ8xUKl8pMAcGk9OBYssCcTH-7lueak2WrsTpzs8g,50608 +sympy/core/rules.py,sha256=AJuZztmYKZ_yUITLZB6rhZjDy6ROBCtajcYqPa50sjc,1496 +sympy/core/singleton.py,sha256=0TrQk5Q4U-GvSXTe4Emih6B2JJg2WMu_u0pSj92wqVA,6542 +sympy/core/sorting.py,sha256=ynZfmQPXWq5Te6WOz6CzaR8crlJfcfKTP24gzVf-QF0,10671 +sympy/core/symbol.py,sha256=eciLIZCLMlmBKBF5XcJqVRYXf2Z3M13kQ3dJ_-ok43g,28555 +sympy/core/sympify.py,sha256=pZuEWvH-kcUGNq0epaVm11G8cmXZQtMyoeoywBVcbYU,20399 +sympy/core/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/core/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_args.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_arit.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_assumptions.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_basic.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_cache.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_compatibility.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_complex.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_constructor_postprocessor.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_containers.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_count_ops.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_diff.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_equal.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_eval.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_evalf.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_expand.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_expr.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_exprtools.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_facts.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_function.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_kind.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_logic.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_match.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_multidimensional.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_noncommutative.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_numbers.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_operations.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_parameters.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_power.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_priority.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_random.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_relational.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_rules.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_singleton.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_sorting.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_subs.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_symbol.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_sympify.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_traversal.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_truediv.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_var.cpython-310.pyc,, +sympy/core/tests/test_args.py,sha256=IeGS8dWg2nM8LncK-_XH4yuCyoBjSIHgemDGEpiVEnc,178389 +sympy/core/tests/test_arit.py,sha256=DwlTHtg2BllVwn0lGNJs89TsKgeAf7wdrXCZR7BkfGo,77847 +sympy/core/tests/test_assumptions.py,sha256=MjJdF_ymVL6mtgQx-aSr_rsNNxaTi2pHFLjyaPCBq5Q,41573 +sympy/core/tests/test_basic.py,sha256=cgAhl2-bLXBkx2EaV5KtnY7-MKOEL9Mov25JUoAmLSo,9496 +sympy/core/tests/test_cache.py,sha256=p6Ci75a_T-bBXE_5HVxRKla62uSay_0Vuf57gUuH6sI,2001 +sympy/core/tests/test_compatibility.py,sha256=7pvNUEGIcRrfWl3doqHlm3AdNkGlcChO69gos3Fk09A,240 +sympy/core/tests/test_complex.py,sha256=koNGFMt6UMmzahJADSja_eD24gr-GG5gGCtyDgCRtPI,21906 +sympy/core/tests/test_constructor_postprocessor.py,sha256=0d7vbVuKi3GCm3PKLtiNqv_Au7v6RYt1rzRdHiD08tM,2441 +sympy/core/tests/test_containers.py,sha256=bFaqu8Bu82-rpgpNEPU4-R3rGwhqNdlLlWCqtHsBqN0,7434 +sympy/core/tests/test_count_ops.py,sha256=eIA2WvCuWKXVBJEGfWoJrn6WfUshX_NXttrrfyLbNnI,5665 +sympy/core/tests/test_diff.py,sha256=6j4Vk9UCNRv8Oyx_4iv1ePjocwBg7_-3ftrSJ8u0cPo,5421 +sympy/core/tests/test_equal.py,sha256=RoOJuu4kMe4Rkk7eNyVOJov5S1770YHiVAiziNIKd2o,1678 +sympy/core/tests/test_eval.py,sha256=o0kZn3oaMidVYdNjeZYtx4uUKBoE3A2tWn2NS4hu72Q,2366 +sympy/core/tests/test_evalf.py,sha256=ShOta18xc-jFlSnnlHhyWsDumLyQRr91YiC1j_gL9Sw,28307 +sympy/core/tests/test_expand.py,sha256=-Rl7sRQevvVBMck3jSA8kg6jgvWeI2yxh9cbSuy0fOA,13383 +sympy/core/tests/test_expr.py,sha256=RRZ7r-AltCCz7Cxfun8is5xVVUklXjbBfDVDoFopAf0,76520 +sympy/core/tests/test_exprtools.py,sha256=L7fi319z1EeFag6pH8myqDQYQ32H193QLKMdqlxACsY,19021 +sympy/core/tests/test_facts.py,sha256=YEZMZ-116VFnFqJ48h9bQsF2flhiB65trnZvJsRSh_o,11579 +sympy/core/tests/test_function.py,sha256=vVoXYyGzdTO3EtlRu0sONxjB3fprXxZ7_9Ve6HdH84s,51420 +sympy/core/tests/test_kind.py,sha256=NLJbwCpugzlNbaSyUlbb6NHoT_9dHuoXj023EDQMrNI,2048 +sympy/core/tests/test_logic.py,sha256=_YKSIod6Q0oIz9lDs78UQQrv9LU-uKaztd7w8LWwuwY,5634 +sympy/core/tests/test_match.py,sha256=2ewD4Ao9cYNvbt2TAId8oZCU0GCNWsSDx4qO5-_Xhwc,22716 +sympy/core/tests/test_multidimensional.py,sha256=Fr-lagme3lwLrBpdaWP7O7oPezhIatn5X8fYYs-8bN8,848 +sympy/core/tests/test_noncommutative.py,sha256=IkGPcvLO4ACVj5LMT2IUgyj68F1RBvMKbm01iqTOK04,4436 +sympy/core/tests/test_numbers.py,sha256=AgFd3RJAMakI6AxCDzfOrGgSX7UeAjxvPHs3Rzk2ns4,75434 +sympy/core/tests/test_operations.py,sha256=mRxftKlrxxrn3zS3UPwqkF6Nr15l5Cv6j3c2RJX46s4,2859 +sympy/core/tests/test_parameters.py,sha256=lRZSShirTW7GRfYgU3A3LRlW79xEPqi62XtoJeaMuDs,2799 +sympy/core/tests/test_power.py,sha256=LptUWHOYrFfNg1-8cNEMxDoQzCdDtguihgVoGb0QC9M,24434 +sympy/core/tests/test_priority.py,sha256=g9dGW-qT647yL4uk1D_v3M2S8rgV1Wi4JBUFyTSwUt4,3190 +sympy/core/tests/test_random.py,sha256=H58NfH5BYeQ3RIscbDct6SZkHQVRJjichVUSuSrhvAU,1233 +sympy/core/tests/test_relational.py,sha256=jebPjr32VQsL-W3laOMxKuYkyo9SFpkdXrTFfqDL3e4,42972 +sympy/core/tests/test_rules.py,sha256=iwmMX7hxC_73CuX9BizeAci-cO4JDq-y1sicKBXEGA4,349 +sympy/core/tests/test_singleton.py,sha256=xLJJgXwmkbKhsot_qTs-o4dniMjHUh3_va0xsA5h-KA,3036 +sympy/core/tests/test_sorting.py,sha256=6BZKYqUedAR-jeHcIgsJelJHFWuougml2c1NNilxGZg,902 +sympy/core/tests/test_subs.py,sha256=7ITJFDplgWBRImkcHfjRdnHqaKgjTxWb4j4WoRysvR8,30106 +sympy/core/tests/test_symbol.py,sha256=zYhPWsdyQp7_NiLVthpoCB1RyP9pmJcNlTdTN2kMdfY,13043 +sympy/core/tests/test_sympify.py,sha256=gVUNWYtarpDrx3vk4r0Vjnrijr21YgHUUSfJmeyabCo,27866 +sympy/core/tests/test_traversal.py,sha256=cmgvMW8G-LZ20ZXy-wg5Vz5ogI_oq2p2bJSwMy9IMF0,4311 +sympy/core/tests/test_truediv.py,sha256=RYfJX39-mNhekRE3sj5TGFZXKra4ML9vGvObsRYuD3k,854 +sympy/core/tests/test_var.py,sha256=hexP-0q2nN9h_dyhKLCuvqFXgLC9e_Hroni8Ldb16Ko,1594 +sympy/core/trace.py,sha256=9WC8p3OpBL6TdHmZWMDK9jaCG-16f4uZV2VptduVH98,348 +sympy/core/traversal.py,sha256=M-ZMt-DRUgyZed_I1gikxEbSYEJLwi7mwpjd-_iFKC8,8962 +sympy/crypto/__init__.py,sha256=i8GcbScXhIPbMEe7uuMgXqh_cU2mZm2f6hspIgmW5uM,2158 +sympy/crypto/__pycache__/__init__.cpython-310.pyc,, +sympy/crypto/__pycache__/crypto.cpython-310.pyc,, +sympy/crypto/crypto.py,sha256=Qb0O_f78q-CtHabvHS7VRJmncbkuqowWTF3_drmMgxI,89426 +sympy/crypto/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/crypto/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/crypto/tests/__pycache__/test_crypto.cpython-310.pyc,, +sympy/crypto/tests/test_crypto.py,sha256=-GJYezqcuQ3KUq_IqCEJAWa-zWAPWFku2WdLj7Aonrc,19763 +sympy/diffgeom/__init__.py,sha256=cWj4N7AfNgrYcGIBexX-UrWxfd1bP9DTNqUmLWUJ9nA,991 +sympy/diffgeom/__pycache__/__init__.cpython-310.pyc,, +sympy/diffgeom/__pycache__/diffgeom.cpython-310.pyc,, +sympy/diffgeom/__pycache__/rn.cpython-310.pyc,, +sympy/diffgeom/diffgeom.py,sha256=CCkZEwNcJYrmhyuBVr94KwMFjHsbL6mOJZ2f5aGcARU,72322 +sympy/diffgeom/rn.py,sha256=kvgth6rNJWt94kzVospZwiH53C-s4VSiorktQNmMobQ,6264 +sympy/diffgeom/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/diffgeom/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/diffgeom/tests/__pycache__/test_class_structure.cpython-310.pyc,, +sympy/diffgeom/tests/__pycache__/test_diffgeom.cpython-310.pyc,, +sympy/diffgeom/tests/__pycache__/test_function_diffgeom_book.cpython-310.pyc,, +sympy/diffgeom/tests/__pycache__/test_hyperbolic_space.cpython-310.pyc,, +sympy/diffgeom/tests/test_class_structure.py,sha256=LbRyxhhp-NnnfJ2gTn1SdlgCBQn2rhyB7xApOgcd_rM,1048 +sympy/diffgeom/tests/test_diffgeom.py,sha256=3BepCr6ned-4C_3me4zScu06HXG9Qx_dBBxIpiXAvy4,14145 +sympy/diffgeom/tests/test_function_diffgeom_book.py,sha256=0YU63iHyY6O-4LR9lRS5kLZMpcMpuNxEsgqtXALV7ic,5258 +sympy/diffgeom/tests/test_hyperbolic_space.py,sha256=c4xQJ_bBS4xrMj3pfx1Ms3oC2_LwuJuNYXNZxs-cVG8,2598 +sympy/discrete/__init__.py,sha256=A_Seud0IRr2gPYlz6JMQZa3sBhRL3O7gVqhIvMRRvE0,772 +sympy/discrete/__pycache__/__init__.cpython-310.pyc,, +sympy/discrete/__pycache__/convolutions.cpython-310.pyc,, +sympy/discrete/__pycache__/recurrences.cpython-310.pyc,, +sympy/discrete/__pycache__/transforms.cpython-310.pyc,, +sympy/discrete/convolutions.py,sha256=xeXCLxPSpBNfrKNlPGGpuU3D9Azf0uR01OpDGCOAALg,14505 +sympy/discrete/recurrences.py,sha256=FqU5QG4qNNLSVBqcpL7HtKa7rQOlmHMXDQRzHZ_P_s0,5124 +sympy/discrete/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/discrete/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/discrete/tests/__pycache__/test_convolutions.cpython-310.pyc,, +sympy/discrete/tests/__pycache__/test_recurrences.cpython-310.pyc,, +sympy/discrete/tests/__pycache__/test_transforms.cpython-310.pyc,, +sympy/discrete/tests/test_convolutions.py,sha256=m6LrKCMIeNeuicfuMMFG3-Ke-7oyjTsD1QRbKdTRVYk,16626 +sympy/discrete/tests/test_recurrences.py,sha256=s5ZEZQ262gcnBLpCjJVmeKlTKQByRTQBrc-N9p_4W8c,3019 +sympy/discrete/tests/test_transforms.py,sha256=vEORFaPvxmPSsw0f4Z2hLEN1wD0FdyQOYHDEY9aVm5A,5546 +sympy/discrete/transforms.py,sha256=lf-n6IN881uCfTUAxPNjdUaSguiRbYW0omuR96vKNlE,11681 +sympy/external/__init__.py,sha256=C6s4654Elc_X-D9UgI2cUQWiQyGDt9LG3IKUc8qqzuo,578 +sympy/external/__pycache__/__init__.cpython-310.pyc,, +sympy/external/__pycache__/gmpy.cpython-310.pyc,, +sympy/external/__pycache__/importtools.cpython-310.pyc,, +sympy/external/__pycache__/pythonmpq.cpython-310.pyc,, +sympy/external/gmpy.py,sha256=V3Z0HQyg7SOgviwOvBik8dUtSxO6yiNqFqjARnjTO3I,2982 +sympy/external/importtools.py,sha256=Q7tS2cdGZ9a4NI_1sgGuoVcSDv_rIk-Av0BpFTa6EzA,7671 +sympy/external/pythonmpq.py,sha256=WOMTvHxYLXNp_vQ1F3jE_haeRlnGicbRlCTOp4ZNuo8,11243 +sympy/external/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/external/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/external/tests/__pycache__/test_autowrap.cpython-310.pyc,, +sympy/external/tests/__pycache__/test_codegen.cpython-310.pyc,, +sympy/external/tests/__pycache__/test_importtools.cpython-310.pyc,, +sympy/external/tests/__pycache__/test_numpy.cpython-310.pyc,, +sympy/external/tests/__pycache__/test_pythonmpq.cpython-310.pyc,, +sympy/external/tests/__pycache__/test_scipy.cpython-310.pyc,, +sympy/external/tests/test_autowrap.py,sha256=tRDOkHdndNTmsa9sGjlZ1lFIh1rL2Awck4ec1iolb7c,9755 +sympy/external/tests/test_codegen.py,sha256=zOgdevzcR5pK73FnXe3Su_2D6cuvrkP2FMqsro83G-c,12676 +sympy/external/tests/test_importtools.py,sha256=KrfontKYv11UvpazQ0vS1qyhxIvgZrCOXh1JFeACjeo,1394 +sympy/external/tests/test_numpy.py,sha256=7-YWZ--nbVX0h_rzah18AEjiz7JyvEzjHtklhwaAGhI,10123 +sympy/external/tests/test_pythonmpq.py,sha256=L_FdZmmk5N-VEivE_O_qZa98BZhT1WSxRfdmG817bA0,5797 +sympy/external/tests/test_scipy.py,sha256=CVaw7D0-6DORgg78Q6b35SNKn05PlKwWJuqXOuU-qdY,1172 +sympy/functions/__init__.py,sha256=fxnbVbZruEHXQxB5DaQTC6k1Qi8BrWaQ3LwBuSZZryk,5229 +sympy/functions/__pycache__/__init__.cpython-310.pyc,, +sympy/functions/combinatorial/__init__.py,sha256=WqXI3qU_TTJ7nJA8m3Z-7ZAYKoApT8f9Xs0u2bTwy_c,53 +sympy/functions/combinatorial/__pycache__/__init__.cpython-310.pyc,, +sympy/functions/combinatorial/__pycache__/factorials.cpython-310.pyc,, +sympy/functions/combinatorial/__pycache__/numbers.cpython-310.pyc,, +sympy/functions/combinatorial/factorials.py,sha256=OkQ_U2FhDCU0wnpLWyK4f6HMup-EAxh1fsQns74hYjE,37546 +sympy/functions/combinatorial/numbers.py,sha256=iXGk2kGB866puhbfk49KfFogYW8lUVTk_tm_nQw_gg4,83429 +sympy/functions/combinatorial/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/functions/combinatorial/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/functions/combinatorial/tests/__pycache__/test_comb_factorials.cpython-310.pyc,, +sympy/functions/combinatorial/tests/__pycache__/test_comb_numbers.cpython-310.pyc,, +sympy/functions/combinatorial/tests/test_comb_factorials.py,sha256=aM7qyHno3THToCxy2HMo1SJlINm4Pj7SjoLtALl6DJ0,26176 +sympy/functions/combinatorial/tests/test_comb_numbers.py,sha256=COdo810q8vjVyHiOYsgD5TcAE4G3bQUzQXlEroDWsj0,34317 +sympy/functions/elementary/__init__.py,sha256=Fj8p5qE-Rr1lqAyHI0aSgC3RYX56O-gWwo6wu-eUQYA,50 +sympy/functions/elementary/__pycache__/__init__.cpython-310.pyc,, +sympy/functions/elementary/__pycache__/_trigonometric_special.cpython-310.pyc,, +sympy/functions/elementary/__pycache__/complexes.cpython-310.pyc,, +sympy/functions/elementary/__pycache__/exponential.cpython-310.pyc,, +sympy/functions/elementary/__pycache__/hyperbolic.cpython-310.pyc,, +sympy/functions/elementary/__pycache__/integers.cpython-310.pyc,, +sympy/functions/elementary/__pycache__/miscellaneous.cpython-310.pyc,, +sympy/functions/elementary/__pycache__/piecewise.cpython-310.pyc,, +sympy/functions/elementary/__pycache__/trigonometric.cpython-310.pyc,, +sympy/functions/elementary/_trigonometric_special.py,sha256=PiQ1eg280vWAnSaMMw6RheEJI0oIiwYa4K_sHmUWEgc,7245 +sympy/functions/elementary/benchmarks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/functions/elementary/benchmarks/__pycache__/__init__.cpython-310.pyc,, +sympy/functions/elementary/benchmarks/__pycache__/bench_exp.cpython-310.pyc,, +sympy/functions/elementary/benchmarks/bench_exp.py,sha256=PFBYa9eMovH5XOFN5XTxWr1VDj1EBoKwn4mAtj-_DdM,185 +sympy/functions/elementary/complexes.py,sha256=wwyEdwEaTyps_ZPEA667W7b_VLdYwaZ2cdE2vd5d5NI,43263 +sympy/functions/elementary/exponential.py,sha256=UrXHbvLi3r-uxLw_XYWiEUAnWVF5agcgDDkqWyA_r5Q,42694 +sympy/functions/elementary/hyperbolic.py,sha256=YEnCb_IbSgyUxicldCV61qCcPTrPt-eTexR_c6LRpv8,66628 +sympy/functions/elementary/integers.py,sha256=hM3NvuUHfTH-V8tGHc2ocOwGyXhsLe1gWO_8KJGw0So,19074 +sympy/functions/elementary/miscellaneous.py,sha256=TAIoqthhfqx_wlcNbDdDHpLQrosWxX_nGy48BJk3R_w,27933 +sympy/functions/elementary/piecewise.py,sha256=o8y2TUKcn9varebhrcZSQQg-DOqjJHR2aP02CohgDEo,57858 +sympy/functions/elementary/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/functions/elementary/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/functions/elementary/tests/__pycache__/test_complexes.cpython-310.pyc,, +sympy/functions/elementary/tests/__pycache__/test_exponential.cpython-310.pyc,, +sympy/functions/elementary/tests/__pycache__/test_hyperbolic.cpython-310.pyc,, +sympy/functions/elementary/tests/__pycache__/test_integers.cpython-310.pyc,, +sympy/functions/elementary/tests/__pycache__/test_interface.cpython-310.pyc,, +sympy/functions/elementary/tests/__pycache__/test_miscellaneous.cpython-310.pyc,, +sympy/functions/elementary/tests/__pycache__/test_piecewise.cpython-310.pyc,, +sympy/functions/elementary/tests/__pycache__/test_trigonometric.cpython-310.pyc,, +sympy/functions/elementary/tests/test_complexes.py,sha256=nUSm7w9s2H_F1g8FB841ZoL0skV95PGV5w4_x8Ygh3Q,33513 +sympy/functions/elementary/tests/test_exponential.py,sha256=r8pqvffIEsu8K8VKeXCSsH4IXUJKzDa2wdx-pClsdmk,29566 +sympy/functions/elementary/tests/test_hyperbolic.py,sha256=gz7Is98WR0hCrZwDkocpi2CYWn6FqX11OzGCtpzvbZI,53361 +sympy/functions/elementary/tests/test_integers.py,sha256=g7FE4C8d8BuyZApycbQbq5uPs81eyR_4YdwP6A2P1Gc,20930 +sympy/functions/elementary/tests/test_interface.py,sha256=dBHnagyfDEXsQWlxVzWpqgCBdiJM0oUIv2QONbEYo9s,2054 +sympy/functions/elementary/tests/test_miscellaneous.py,sha256=eCL30UmsusBhjvqICQNmToa1aJTML8fXav1L1J6b7FU,17148 +sympy/functions/elementary/tests/test_piecewise.py,sha256=OOSlqsR7ZZG7drmSO7v5PlrPcbrqpv7sEt6h8pLNYyU,61520 +sympy/functions/elementary/tests/test_trigonometric.py,sha256=xsf5N30ILb_mdpx6Cb5E0o1QY5V4impDX2wqANJnXBE,86394 +sympy/functions/elementary/trigonometric.py,sha256=gnerAnDl9qfqxzvhMr2E5tRdq1GiBfdut6OLxRwuwTc,113966 +sympy/functions/special/__init__.py,sha256=5pjIq_RVCMsuCe1b-FlwIty30KxoUowZYKLmpIT9KHQ,59 +sympy/functions/special/__pycache__/__init__.cpython-310.pyc,, +sympy/functions/special/__pycache__/bessel.cpython-310.pyc,, +sympy/functions/special/__pycache__/beta_functions.cpython-310.pyc,, +sympy/functions/special/__pycache__/bsplines.cpython-310.pyc,, +sympy/functions/special/__pycache__/delta_functions.cpython-310.pyc,, +sympy/functions/special/__pycache__/elliptic_integrals.cpython-310.pyc,, +sympy/functions/special/__pycache__/error_functions.cpython-310.pyc,, +sympy/functions/special/__pycache__/gamma_functions.cpython-310.pyc,, +sympy/functions/special/__pycache__/hyper.cpython-310.pyc,, +sympy/functions/special/__pycache__/mathieu_functions.cpython-310.pyc,, +sympy/functions/special/__pycache__/polynomials.cpython-310.pyc,, +sympy/functions/special/__pycache__/singularity_functions.cpython-310.pyc,, +sympy/functions/special/__pycache__/spherical_harmonics.cpython-310.pyc,, +sympy/functions/special/__pycache__/tensor_functions.cpython-310.pyc,, +sympy/functions/special/__pycache__/zeta_functions.cpython-310.pyc,, +sympy/functions/special/benchmarks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/functions/special/benchmarks/__pycache__/__init__.cpython-310.pyc,, +sympy/functions/special/benchmarks/__pycache__/bench_special.cpython-310.pyc,, +sympy/functions/special/benchmarks/bench_special.py,sha256=wzAoKTccuEaG4xrEYTlYfIJuLi3kUTMTEJ9iA113Wog,164 +sympy/functions/special/bessel.py,sha256=3q5Ti0vVqSPQZ9oSZovJNAviFWuOXLUMbJvpRkdTxWs,63415 +sympy/functions/special/beta_functions.py,sha256=NXwFSRAtpoVkSybCUqicQDKqc8SNBeq3SOB1QS-Ge84,12603 +sympy/functions/special/bsplines.py,sha256=GxW_6tXuiuWap-pc4T0v1PMcfw8FXaq3mSEf50OkLoU,10152 +sympy/functions/special/delta_functions.py,sha256=NPneFMqLdwwMGZweS5C-Bok6ch1roYyO481ZNOiWp8I,19866 +sympy/functions/special/elliptic_integrals.py,sha256=rn4asENf-mFTc-iTpMOht-E-q_-vmhNc0Bd4xMPGfOE,14694 +sympy/functions/special/error_functions.py,sha256=syaTdbOA7xJBtMuuDSFZsOerSc2-Z5pm77SQ7Qn_eCU,77081 +sympy/functions/special/gamma_functions.py,sha256=OjPRUlD9wXr0XfBhn3Ocbwpey7Qd0H1JPyHeZkevxSc,42596 +sympy/functions/special/hyper.py,sha256=aby7IOWh0OtlCclHWv0cz3-cqKvuSIVHvQ8qFgOtQs8,37290 +sympy/functions/special/mathieu_functions.py,sha256=-3EsPJHwU1upnYz5rsc1Zy43aPpjXD1Nnmn2yA9LS6U,6606 +sympy/functions/special/polynomials.py,sha256=PBrr6UpHvs_FtYsTD_y2jre2tYNcqneOGwkm1omY2jk,46718 +sympy/functions/special/singularity_functions.py,sha256=5yDHvwQN16YS0L7C0kj34XI3o0q-_k4OgxIURo_9SZQ,7988 +sympy/functions/special/spherical_harmonics.py,sha256=Ivwi76IeFMZhukm_TnvJYT4QEqyW2DrGF5rj4_B-dJg,10997 +sympy/functions/special/tensor_functions.py,sha256=ZzMc93n_4Y4L-WVd9nmMh0nZQPYMB7uKqcnaFdupEXE,12277 +sympy/functions/special/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/functions/special/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_bessel.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_beta_functions.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_bsplines.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_delta_functions.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_elliptic_integrals.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_error_functions.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_gamma_functions.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_hyper.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_mathieu.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_singularity_functions.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_spec_polynomials.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_spherical_harmonics.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_tensor_functions.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_zeta_functions.cpython-310.pyc,, +sympy/functions/special/tests/test_bessel.py,sha256=Gx6cjelB0aXGDKMwG5O-wpPjyt6rFJVaNenNmD5Qb3E,34191 +sympy/functions/special/tests/test_beta_functions.py,sha256=yxfgu-wmNEeMfaFABiDHYmuZpZup9FTp0ZYerlc6hhc,3786 +sympy/functions/special/tests/test_bsplines.py,sha256=6UYg7IqXTi8fcSOut8TEzNVkxIA4ff-CyG22qJnbIYA,7145 +sympy/functions/special/tests/test_delta_functions.py,sha256=8xhSWG4SLL86z1QKFfLk_3b--bCrxjvCaxHlODBVToE,7138 +sympy/functions/special/tests/test_elliptic_integrals.py,sha256=AazZYMow9szbvC_WfK10c5j-LQRAzno6V1WJCbtp4MU,6860 +sympy/functions/special/tests/test_error_functions.py,sha256=0U78aiO9zvGOrqQ7tiVTUhqnpj0FDD9shNb-8AOhp68,31222 +sympy/functions/special/tests/test_gamma_functions.py,sha256=exHmFEtyZMJhVYTWFSBlMZhWdhQk6M2cjgNkvImD7o4,29910 +sympy/functions/special/tests/test_hyper.py,sha256=El56dyyIzJkyBV_1gH-bGX8iF6Jzn0EhpmJEK57gvKs,15990 +sympy/functions/special/tests/test_mathieu.py,sha256=pqoFbnC84NDL6EQkigFtx5OQ1RFYppckTjzsm9XT0PY,1282 +sympy/functions/special/tests/test_singularity_functions.py,sha256=tqMJQIOOsBrveXctXPkPFIYdThG-wwKsjfdRHshEpfw,5467 +sympy/functions/special/tests/test_spec_polynomials.py,sha256=wuiZaR_LwaM8SlNuGl3B1p4eOHC_-zZVSXMPNfzKRB4,19561 +sympy/functions/special/tests/test_spherical_harmonics.py,sha256=pUFtFpNPBnJTdnqou0jniSchijyh1rdzKv8H24RT9FU,3850 +sympy/functions/special/tests/test_tensor_functions.py,sha256=bblSDkPABZ6N1j1Rb2Bb5TZIzZoK1D8ks3fHizi69ZI,5546 +sympy/functions/special/tests/test_zeta_functions.py,sha256=2r59_aC0QOXQsBNXqxsHPr2PkJExusI6qvSydZBPbfw,10474 +sympy/functions/special/zeta_functions.py,sha256=IdshdejjEv60nNZ4gQOVG0RIgxyo22psmglxZnzwHHw,24064 +sympy/galgebra.py,sha256=yEosUPSnhLp9a1NWXvpCLoU20J6TQ58XNIvw07POkVk,123 +sympy/geometry/__init__.py,sha256=BU2MiKm8qJyZJ_hz1qC-3nFJTPEcuvx4hYd02jHjqSM,1240 +sympy/geometry/__pycache__/__init__.cpython-310.pyc,, +sympy/geometry/__pycache__/curve.cpython-310.pyc,, +sympy/geometry/__pycache__/ellipse.cpython-310.pyc,, +sympy/geometry/__pycache__/entity.cpython-310.pyc,, +sympy/geometry/__pycache__/exceptions.cpython-310.pyc,, +sympy/geometry/__pycache__/line.cpython-310.pyc,, +sympy/geometry/__pycache__/parabola.cpython-310.pyc,, +sympy/geometry/__pycache__/plane.cpython-310.pyc,, +sympy/geometry/__pycache__/point.cpython-310.pyc,, +sympy/geometry/__pycache__/polygon.cpython-310.pyc,, +sympy/geometry/__pycache__/util.cpython-310.pyc,, +sympy/geometry/curve.py,sha256=F7b6XrlhUZ0QWLDoZJVojWfC5LeyOU-69OTFnYAREg8,10170 +sympy/geometry/ellipse.py,sha256=MMuWG_YOUngfW5137yu6iAOugjRxehrfkgidvD1J6RM,50851 +sympy/geometry/entity.py,sha256=fvHhtSb6RvE6v-8yMyCNvm0ekLPoO7EO9J8TEsGyQGU,20668 +sympy/geometry/exceptions.py,sha256=XtUMA44UTdrBWt771jegFC-TXsobhDiI-10TDH_WNFM,131 +sympy/geometry/line.py,sha256=JSc0dcjKV2m1R6b7tIaPjffhdGz3ZdtjFKvsH72Luqo,78343 +sympy/geometry/parabola.py,sha256=JalFtxCzBR8oE09agrzDtpGI9hrP4GJ-4zkg2r8Yj94,10707 +sympy/geometry/plane.py,sha256=A-CgWLjFC9k_OjyqJFaq7kDAdsSqmYET4aZl_eH2U10,26928 +sympy/geometry/point.py,sha256=8DtGkhQUyleVIi5WfptZOEk2zn0kwVAZv5aeNI498tg,36652 +sympy/geometry/polygon.py,sha256=hI1bRJdjCgsSKlPejO69z65LKO9iakcHx9ftJfSSLFA,81664 +sympy/geometry/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/geometry/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/geometry/tests/__pycache__/test_curve.cpython-310.pyc,, +sympy/geometry/tests/__pycache__/test_ellipse.cpython-310.pyc,, +sympy/geometry/tests/__pycache__/test_entity.cpython-310.pyc,, +sympy/geometry/tests/__pycache__/test_geometrysets.cpython-310.pyc,, +sympy/geometry/tests/__pycache__/test_line.cpython-310.pyc,, +sympy/geometry/tests/__pycache__/test_parabola.cpython-310.pyc,, +sympy/geometry/tests/__pycache__/test_plane.cpython-310.pyc,, +sympy/geometry/tests/__pycache__/test_point.cpython-310.pyc,, +sympy/geometry/tests/__pycache__/test_polygon.cpython-310.pyc,, +sympy/geometry/tests/__pycache__/test_util.cpython-310.pyc,, +sympy/geometry/tests/test_curve.py,sha256=xL4uRWAal4mXZxuQhcs9QOhs6MheCbFNyH1asq_a2IQ,4479 +sympy/geometry/tests/test_ellipse.py,sha256=oe9Bvye-kLjdhP3bwJPB0N1-wDL3cmVwYLhEhrGAPHk,25735 +sympy/geometry/tests/test_entity.py,sha256=0pBKdmRIETq0pJYjxRj34B0j-o56f4iqzJy9J4buU7U,3897 +sympy/geometry/tests/test_geometrysets.py,sha256=vvOWrFrJuNAFgbrVh1wPY94o-H-85FWlnIyyo2Kst9c,1911 +sympy/geometry/tests/test_line.py,sha256=D2yAOzCt80dmd7hP_l2A7aaWS8Mtw7RCkqA99L7McXI,37421 +sympy/geometry/tests/test_parabola.py,sha256=kd0RU5sGOcfp6jgwgXMtvT2B6kG1-M3-iGOLnUJfZOw,6150 +sympy/geometry/tests/test_plane.py,sha256=QRcfoDsJtCtcvjFb18hBEHupycLgAT2OohF6GpNShyQ,12525 +sympy/geometry/tests/test_point.py,sha256=YO67zimsEVO07KGyLJVTVWa9795faGXJoFFcd2K4azc,16412 +sympy/geometry/tests/test_polygon.py,sha256=79iBkQjpX-CdO1mtMaX3lGvVfkopBiFhLC3QfWCreWA,27138 +sympy/geometry/tests/test_util.py,sha256=-LXPTiibkSQ0TO7ia6a-NYfMm2OJxw15Er7tr99dTVU,6204 +sympy/geometry/util.py,sha256=ZMXFHU2sxVAvc4_ywomdJC67hHCU-EyJN2SzW5TB9Zw,20170 +sympy/holonomic/__init__.py,sha256=BgHIokaSOo3nwJlGO_caJHz37n6yoA8GeM9Xjn4zMpc,784 +sympy/holonomic/__pycache__/__init__.cpython-310.pyc,, +sympy/holonomic/__pycache__/holonomic.cpython-310.pyc,, +sympy/holonomic/__pycache__/holonomicerrors.cpython-310.pyc,, +sympy/holonomic/__pycache__/numerical.cpython-310.pyc,, +sympy/holonomic/__pycache__/recurrence.cpython-310.pyc,, +sympy/holonomic/holonomic.py,sha256=XxLDC4TG_6ddHMQ5yZNWNJFb6s7n5Tg09kbufyiwVVw,94849 +sympy/holonomic/holonomicerrors.py,sha256=qDyUoGbrRjPtVax4SeEEf_o6-264mASEZO_rZETXH5o,1193 +sympy/holonomic/numerical.py,sha256=m35A7jO54xMNgA4w5Edn1i_SHbXWBlpQTRLMR8GgbZE,2730 +sympy/holonomic/recurrence.py,sha256=JFgSOT3hu6d7Mh9sdqvSxC3RxlVlH_cygsXpsX97YMY,10987 +sympy/holonomic/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/holonomic/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/holonomic/tests/__pycache__/test_holonomic.cpython-310.pyc,, +sympy/holonomic/tests/__pycache__/test_recurrence.cpython-310.pyc,, +sympy/holonomic/tests/test_holonomic.py,sha256=MrN7GVk7_zFWwDSfIhtD3FgoFgmFGlTpjOnnIzdP010,34760 +sympy/holonomic/tests/test_recurrence.py,sha256=HEbA3yCnIw4IDFV1rb3GjmM4SCDDZL7aYRlD7PWuQFg,1056 +sympy/integrals/__init__.py,sha256=aZr2Qn6i-gvFGH_5Hl_SRn2-Bd9Sf4zQdwo9VGLSeNY,1844 +sympy/integrals/__pycache__/__init__.cpython-310.pyc,, +sympy/integrals/__pycache__/deltafunctions.cpython-310.pyc,, +sympy/integrals/__pycache__/heurisch.cpython-310.pyc,, +sympy/integrals/__pycache__/integrals.cpython-310.pyc,, +sympy/integrals/__pycache__/intpoly.cpython-310.pyc,, +sympy/integrals/__pycache__/laplace.cpython-310.pyc,, +sympy/integrals/__pycache__/manualintegrate.cpython-310.pyc,, +sympy/integrals/__pycache__/meijerint.cpython-310.pyc,, +sympy/integrals/__pycache__/meijerint_doc.cpython-310.pyc,, +sympy/integrals/__pycache__/prde.cpython-310.pyc,, +sympy/integrals/__pycache__/quadrature.cpython-310.pyc,, +sympy/integrals/__pycache__/rationaltools.cpython-310.pyc,, +sympy/integrals/__pycache__/rde.cpython-310.pyc,, +sympy/integrals/__pycache__/risch.cpython-310.pyc,, +sympy/integrals/__pycache__/singularityfunctions.cpython-310.pyc,, +sympy/integrals/__pycache__/transforms.cpython-310.pyc,, +sympy/integrals/__pycache__/trigonometry.cpython-310.pyc,, +sympy/integrals/benchmarks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/integrals/benchmarks/__pycache__/__init__.cpython-310.pyc,, +sympy/integrals/benchmarks/__pycache__/bench_integrate.cpython-310.pyc,, +sympy/integrals/benchmarks/__pycache__/bench_trigintegrate.cpython-310.pyc,, +sympy/integrals/benchmarks/bench_integrate.py,sha256=vk6wAO1bqzFT9oW4qsW7nKGfc_gP0XaB5PMYKx5339Q,396 +sympy/integrals/benchmarks/bench_trigintegrate.py,sha256=8XU3uB3mcavigvzHQZA7H1sHI32zgT-9RkSnLa-Y3Vc,305 +sympy/integrals/deltafunctions.py,sha256=ysIQLdRBcG_YR-bVDoxt-sxEVU8TG77oSgM-J0gI0mE,7435 +sympy/integrals/heurisch.py,sha256=R3G0RXskAxXum4CyQ1AV1BNeVbcmvp_Ipg0mOcDFRPo,26296 +sympy/integrals/integrals.py,sha256=bC0WtE12WsV7WFzmZrKzct2nAbHUdbq6dKytpY7ZtlY,64606 +sympy/integrals/intpoly.py,sha256=qs1fQrEMKbsXwgfkBDUpEZ9f7x65Bdua8KS2lLBtLv4,43274 +sympy/integrals/laplace.py,sha256=eL7HjKsSLAspdo8BswrYADs2wd2U-9YEkinSD5JVjow,63518 +sympy/integrals/manualintegrate.py,sha256=E7NaMsl02Hy2lHU8mPcxNSsCQnQjVNPJqDrMyEOkAKw,75469 +sympy/integrals/meijerint.py,sha256=Yf80w6COiqdrvYLyMwS1P2-SGsNR1B7cqCmaERhx76U,80746 +sympy/integrals/meijerint_doc.py,sha256=mGlIu2CLmOulSGiN7n7kQ9w2DTcQfExJPaf-ee6HXlY,1165 +sympy/integrals/prde.py,sha256=VL_JEu6Bqhl8wSML1UY9nilOjafhkjFenVGCVV1pVbc,52021 +sympy/integrals/quadrature.py,sha256=6Bg3JmlIjIduIfaGfNVcwNfSrgEiLOszcN8WPzsXNqE,17064 +sympy/integrals/rationaltools.py,sha256=1OMhRhMBQ7igw2_YX5WR4q69QB_H0zMtGFtUkcbVD3Q,10922 +sympy/integrals/rde.py,sha256=AuiPDqP2awC4UlWJrsfNCn1l3OAQuZl64WI-lE2M5Ds,27392 +sympy/integrals/risch.py,sha256=S9r1kKx6WoJHomPWgNL2KCe73GWS8jIJ0AZt95QwBFI,67674 +sympy/integrals/singularityfunctions.py,sha256=BegUcpUW96FY9f8Yn0jHjK0LjCkM28NnCVg5S9cTWwU,2227 +sympy/integrals/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/integrals/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_deltafunctions.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_failing_integrals.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_heurisch.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_integrals.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_intpoly.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_laplace.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_lineintegrals.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_manual.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_meijerint.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_prde.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_quadrature.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_rationaltools.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_rde.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_risch.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_singularityfunctions.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_transforms.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_trigonometry.cpython-310.pyc,, +sympy/integrals/tests/test_deltafunctions.py,sha256=ivFjS-WlLQ4aMqjVS7ZzMChP2Mmw_JUPnwI9otiLnvs,3709 +sympy/integrals/tests/test_failing_integrals.py,sha256=hQJc23KfK0bUmbj4W3C04QdJ0K17_ghMVfTLuKjUBPc,7074 +sympy/integrals/tests/test_heurisch.py,sha256=r4RjbSRYScuzMXA_EjrxalO1T1G0i5ZsAmDQcrhFU3s,12468 +sympy/integrals/tests/test_integrals.py,sha256=jwaCvWJoW_5_CTkDDBJeRDtLCUHdYzyzs-f7GyJDaVc,77122 +sympy/integrals/tests/test_intpoly.py,sha256=NzGhkR2pUMfd8lIU2cFR9bFa0J89RzpHs3zDggAWtXo,37445 +sympy/integrals/tests/test_laplace.py,sha256=FQoGfwyNoIwqdVc5Nk_RcOIJU70EaW-ipmoQtq7nFLk,28893 +sympy/integrals/tests/test_lineintegrals.py,sha256=zcPJ2n7DYt9KsgAe38t0gq3ARApUlb-kBahLThuRcq8,450 +sympy/integrals/tests/test_manual.py,sha256=arqxMdxUJkFIoy98rOirOTIwj623wHx9NqoupZLqkU8,33231 +sympy/integrals/tests/test_meijerint.py,sha256=jglmmX-AtkvwJgqQafBOKdaygrm14QJ8H-NfheNpFME,32265 +sympy/integrals/tests/test_prde.py,sha256=2BZmEDasdx_3l64-9hioArysDj6Nl520GpQN2xnEE_A,16360 +sympy/integrals/tests/test_quadrature.py,sha256=iFMdqck36gkL-yksLflawIOYmw-0PzO2tFj_qdK6Hjg,19919 +sympy/integrals/tests/test_rationaltools.py,sha256=6sNOkkZmOvCAPTwXrdU6hehDFleXYyakheX2KQaUHWY,5299 +sympy/integrals/tests/test_rde.py,sha256=4d3vJupa-hRN4yNDISY8IC3rSI_cZW5BbtxoZm14y-Y,9571 +sympy/integrals/tests/test_risch.py,sha256=HaWg0JnErdrNzNmVfyz2Zz4XAgZPVVpZPt6Map3sQ58,38630 +sympy/integrals/tests/test_singularityfunctions.py,sha256=CSrHie59_NjNZ9B2GaHzKPNsMzxm5Kh6GuxlYk8zTuI,1266 +sympy/integrals/tests/test_transforms.py,sha256=Of9XEpzwB0CGy722z41oOdUEbfmAscsAhMute2_8oeA,27077 +sympy/integrals/tests/test_trigonometry.py,sha256=moMYr_Prc7gaYPjBK0McLjRpTEes2veUlN0vGv9UyEA,3869 +sympy/integrals/transforms.py,sha256=R625sYSQkNC1s9MiFdk0JzROTmoYjhgBTxoFE5Pc3rQ,51636 +sympy/integrals/trigonometry.py,sha256=iOoBDGFDZx8PNbgL3XeZEd80I8ro0WAizNuC4P-u8x0,11083 +sympy/interactive/__init__.py,sha256=yokwEO2HF3eN2Xu65JSpUUsN4iYmPvvU4m_64f3Q33o,251 +sympy/interactive/__pycache__/__init__.cpython-310.pyc,, +sympy/interactive/__pycache__/printing.cpython-310.pyc,, +sympy/interactive/__pycache__/session.cpython-310.pyc,, +sympy/interactive/__pycache__/traversal.cpython-310.pyc,, +sympy/interactive/printing.py,sha256=j7iVj-AhX3qBrQibPKtDNTMToCGhF6UKTdpUO8ME5CM,22700 +sympy/interactive/session.py,sha256=sG546e0mAtT0OrFkYNVM7QGvkWrDhAQZ5E1hfx03iBQ,15329 +sympy/interactive/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/interactive/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/interactive/tests/__pycache__/test_interactive.cpython-310.pyc,, +sympy/interactive/tests/__pycache__/test_ipython.cpython-310.pyc,, +sympy/interactive/tests/test_interactive.py,sha256=Pbopy9lODrd_P46_xxlWxLwqPfG6_4J3CWWC4IqfDL4,485 +sympy/interactive/tests/test_ipython.py,sha256=iYNmuETjveHBVpOywyv_jStQWkFwf1GuEBjoZUVhxK4,11799 +sympy/interactive/traversal.py,sha256=XbccdO6msNAvrG6FFJl2n4XmIiRISnvda4QflfEPg7U,3189 +sympy/liealgebras/__init__.py,sha256=K8tw7JqG33_y6mYl1LTr8ZNtKH5L21BqkjCHfLhP4aA,79 +sympy/liealgebras/__pycache__/__init__.cpython-310.pyc,, +sympy/liealgebras/__pycache__/cartan_matrix.cpython-310.pyc,, +sympy/liealgebras/__pycache__/cartan_type.cpython-310.pyc,, +sympy/liealgebras/__pycache__/dynkin_diagram.cpython-310.pyc,, +sympy/liealgebras/__pycache__/root_system.cpython-310.pyc,, +sympy/liealgebras/__pycache__/type_a.cpython-310.pyc,, +sympy/liealgebras/__pycache__/type_b.cpython-310.pyc,, +sympy/liealgebras/__pycache__/type_c.cpython-310.pyc,, +sympy/liealgebras/__pycache__/type_d.cpython-310.pyc,, +sympy/liealgebras/__pycache__/type_e.cpython-310.pyc,, +sympy/liealgebras/__pycache__/type_f.cpython-310.pyc,, +sympy/liealgebras/__pycache__/type_g.cpython-310.pyc,, +sympy/liealgebras/__pycache__/weyl_group.cpython-310.pyc,, +sympy/liealgebras/cartan_matrix.py,sha256=yr2LoZi_Gxmu-EMKgFuPOPNMYPOsxucLAS6oRpSYi2U,524 +sympy/liealgebras/cartan_type.py,sha256=xLklg8Y5s40je6sXwmLmG9iyYi9YEk9KoxTSFz1GtdI,1790 +sympy/liealgebras/dynkin_diagram.py,sha256=ZzGuBGNOJ3lPDdJDs4n8hvGbz6wLhC5mwb8zFkDmyPw,535 +sympy/liealgebras/root_system.py,sha256=GwWc4iploE7ogS9LTOkkjsij1mbPMQxbV2_pvNriYbE,6727 +sympy/liealgebras/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/liealgebras/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/liealgebras/tests/__pycache__/test_cartan_matrix.cpython-310.pyc,, +sympy/liealgebras/tests/__pycache__/test_cartan_type.cpython-310.pyc,, +sympy/liealgebras/tests/__pycache__/test_dynkin_diagram.cpython-310.pyc,, +sympy/liealgebras/tests/__pycache__/test_root_system.cpython-310.pyc,, +sympy/liealgebras/tests/__pycache__/test_type_A.cpython-310.pyc,, +sympy/liealgebras/tests/__pycache__/test_type_B.cpython-310.pyc,, +sympy/liealgebras/tests/__pycache__/test_type_C.cpython-310.pyc,, +sympy/liealgebras/tests/__pycache__/test_type_D.cpython-310.pyc,, +sympy/liealgebras/tests/__pycache__/test_type_E.cpython-310.pyc,, +sympy/liealgebras/tests/__pycache__/test_type_F.cpython-310.pyc,, +sympy/liealgebras/tests/__pycache__/test_type_G.cpython-310.pyc,, +sympy/liealgebras/tests/__pycache__/test_weyl_group.cpython-310.pyc,, +sympy/liealgebras/tests/test_cartan_matrix.py,sha256=KCsakn0fHKHRbIUcrUkHBIKkudl3_ISUdHrfJy-UOd4,303 +sympy/liealgebras/tests/test_cartan_type.py,sha256=t5PvYYDXbNIFL3CV59Je7SBIAeLLf-W3mOINPUoHK6E,339 +sympy/liealgebras/tests/test_dynkin_diagram.py,sha256=DSixbnt_yd0zrhKzXW_XqkXWXYe1Dk2MmXN-Rjb1dGg,260 +sympy/liealgebras/tests/test_root_system.py,sha256=YmGBdUeJ4PkLSfAfRgTF7GW62RCEd5nH27FSX9UaG5Q,927 +sympy/liealgebras/tests/test_type_A.py,sha256=x7QmpjxsGmXol-IYVtN1lmIOmM3HLYwpX1tSG5h6FMM,657 +sympy/liealgebras/tests/test_type_B.py,sha256=Gw0GP24wP2rPn38Wwla9W7BwWH4JtCGpaprZb5W6JVY,642 +sympy/liealgebras/tests/test_type_C.py,sha256=ysSy-vzE9lNwzAunrmvnFkLBoJwF7W2On7QpqS6RI1s,927 +sympy/liealgebras/tests/test_type_D.py,sha256=qrO4oCjrjkp1uDvrNtbgANVyaOExqOLNtIpIxD1uH0U,764 +sympy/liealgebras/tests/test_type_E.py,sha256=suG6DaZ2R74ovnJrY6GGyiu9A6FjUkouRNUFPnEczqk,775 +sympy/liealgebras/tests/test_type_F.py,sha256=yUQJ7LzTemv4Cd1XW_dr3x7KEI07BahsWAyJfXLS1eA,1378 +sympy/liealgebras/tests/test_type_G.py,sha256=wVa6qcAHbdrc9dA63samexHL35cWWJS606pom-6mH2Q,548 +sympy/liealgebras/tests/test_weyl_group.py,sha256=HrzojRECbhNUsdLFQAXYnJEt8LfktOSJZuqVE45aRnc,1501 +sympy/liealgebras/type_a.py,sha256=l5SUJknj1xLgwRVMuOsVmwbcxY2V6PU59jBtssylKH4,4314 +sympy/liealgebras/type_b.py,sha256=50xdcrec1nFFtyUWOmP2Qm9ZW1zpbrgwbz_YPKp55Go,4563 +sympy/liealgebras/type_c.py,sha256=bXGqPiLN3x4NAsM-ZHKJPxFO6RY7lDZUckCarIODEi0,4439 +sympy/liealgebras/type_d.py,sha256=Rgh7KpI5FQnDai6KVfoz_TREYaKxqvINDXu6Zdu-7EQ,4694 +sympy/liealgebras/type_e.py,sha256=Uf-QzI-6bRJeI91stGHsiesknwBEVYIjZaiNP-2bIiY,9780 +sympy/liealgebras/type_f.py,sha256=boKDhOxRcAWDBHsEYk4j14vUvT0mO3UkRq6QzqoPOes,4417 +sympy/liealgebras/type_g.py,sha256=Ife98dGPtarGd-ii8hJbXdB0SMsct4okDkSX2wLN8XI,2965 +sympy/liealgebras/weyl_group.py,sha256=5YFA8qC4GWDM0WLNR_6VgpuNFZDfyDA7fBFjBcZaLgA,14557 +sympy/logic/__init__.py,sha256=RfoXrq9MESnXdL7PkwpYEfWeaxH6wBPHiE4zCgLKvk0,456 +sympy/logic/__pycache__/__init__.cpython-310.pyc,, +sympy/logic/__pycache__/boolalg.cpython-310.pyc,, +sympy/logic/__pycache__/inference.cpython-310.pyc,, +sympy/logic/algorithms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/logic/algorithms/__pycache__/__init__.cpython-310.pyc,, +sympy/logic/algorithms/__pycache__/dpll.cpython-310.pyc,, +sympy/logic/algorithms/__pycache__/dpll2.cpython-310.pyc,, +sympy/logic/algorithms/__pycache__/minisat22_wrapper.cpython-310.pyc,, +sympy/logic/algorithms/__pycache__/pycosat_wrapper.cpython-310.pyc,, +sympy/logic/algorithms/dpll.py,sha256=zqiZDm1oD5sNxFqm_0Hen6NjfILIDp5uRgEOad1vYXI,9188 +sympy/logic/algorithms/dpll2.py,sha256=UbBxJjiUaqBbQPaivtrv3ZhNNuHHdUsJ5Us2vy8QmxA,20317 +sympy/logic/algorithms/minisat22_wrapper.py,sha256=uINcvkIHGWYJb8u-Q0OgnSgaHfVUd9tYYFbBAVNiASo,1317 +sympy/logic/algorithms/pycosat_wrapper.py,sha256=0vNFTbu9-YhSfjwYTsZsP_Z4HM8WpL11-xujLBS1kYg,1207 +sympy/logic/boolalg.py,sha256=-t3WrVge-B7WmoUF25BfOxK15rsC0tIfigdcCcgvbdQ,114180 +sympy/logic/inference.py,sha256=18eETh6ObPCteJJgrrtrkCK031ymDQdvQbveaUymCcM,8542 +sympy/logic/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/logic/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/logic/tests/__pycache__/test_boolalg.cpython-310.pyc,, +sympy/logic/tests/__pycache__/test_dimacs.cpython-310.pyc,, +sympy/logic/tests/__pycache__/test_inference.cpython-310.pyc,, +sympy/logic/tests/test_boolalg.py,sha256=L6hUEjRIhn2Dh65BDXifDrgXHuvBoATT89-6dYZHzgo,48838 +sympy/logic/tests/test_dimacs.py,sha256=EK_mA_k9zBLcQLTOKTZVrGhnGuQNza5mwXDQD_f-X1c,3886 +sympy/logic/tests/test_inference.py,sha256=DOlgb4clEULjMBp0cG3ZdCrXN8vFdxJZmSDf-13bWSA,13246 +sympy/logic/utilities/__init__.py,sha256=WTn2vBgHcmhONRWI79PdMYNk8UxYDzsxRlZWuc-wtNI,55 +sympy/logic/utilities/__pycache__/__init__.cpython-310.pyc,, +sympy/logic/utilities/__pycache__/dimacs.cpython-310.pyc,, +sympy/logic/utilities/dimacs.py,sha256=aaHdXUOD8kZHWbTzuZc6c5xMM8O1oHbRxyOxPpVMMdQ,1663 +sympy/matrices/__init__.py,sha256=BUbgKPUXTwvrhDbQjjG6c3jFBwmQ0WfRiMQTTFnPL90,2611 +sympy/matrices/__pycache__/__init__.cpython-310.pyc,, +sympy/matrices/__pycache__/common.cpython-310.pyc,, +sympy/matrices/__pycache__/decompositions.cpython-310.pyc,, +sympy/matrices/__pycache__/dense.cpython-310.pyc,, +sympy/matrices/__pycache__/determinant.cpython-310.pyc,, +sympy/matrices/__pycache__/eigen.cpython-310.pyc,, +sympy/matrices/__pycache__/graph.cpython-310.pyc,, +sympy/matrices/__pycache__/immutable.cpython-310.pyc,, +sympy/matrices/__pycache__/inverse.cpython-310.pyc,, +sympy/matrices/__pycache__/matrices.cpython-310.pyc,, +sympy/matrices/__pycache__/normalforms.cpython-310.pyc,, +sympy/matrices/__pycache__/reductions.cpython-310.pyc,, +sympy/matrices/__pycache__/repmatrix.cpython-310.pyc,, +sympy/matrices/__pycache__/solvers.cpython-310.pyc,, +sympy/matrices/__pycache__/sparse.cpython-310.pyc,, +sympy/matrices/__pycache__/sparsetools.cpython-310.pyc,, +sympy/matrices/__pycache__/subspaces.cpython-310.pyc,, +sympy/matrices/__pycache__/utilities.cpython-310.pyc,, +sympy/matrices/benchmarks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/matrices/benchmarks/__pycache__/__init__.cpython-310.pyc,, +sympy/matrices/benchmarks/__pycache__/bench_matrix.cpython-310.pyc,, +sympy/matrices/benchmarks/bench_matrix.py,sha256=vGMlg-2il2cFeAWrf0NJ6pzPX3Yd3ZQMxFgQ4q5ILQE,306 +sympy/matrices/common.py,sha256=LnBG-5vXn6c8Oe9C-Q4ziQvNyJSu5l_4DirQ-VZ2rfM,93370 +sympy/matrices/decompositions.py,sha256=MYLr-Qt5wZTDBrnVmBAudOM5QYIgkXWtLDA0coLWk50,48074 +sympy/matrices/dense.py,sha256=cTAq0K3GnLBiNkCgZNVr9rLt8H3rrnyhHaeLc_YTBok,30375 +sympy/matrices/determinant.py,sha256=IxURxqbmux4jXwkIXMm0cxJ3oygY6InrqkVo4ZnD-nk,30118 +sympy/matrices/eigen.py,sha256=7vgLspYAIVmiFtVJ9wNiVLKrQSTGhqLtPR_wqdX0WRc,39786 +sympy/matrices/expressions/__init__.py,sha256=IMqXCSsPh0Vp_MC9HZTudA5DGM4WBq_yB-Bst0azyM8,1692 +sympy/matrices/expressions/__pycache__/__init__.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/_shape.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/adjoint.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/applyfunc.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/blockmatrix.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/companion.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/determinant.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/diagonal.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/dotproduct.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/factorizations.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/fourier.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/funcmatrix.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/hadamard.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/inverse.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/kronecker.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/matadd.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/matexpr.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/matmul.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/matpow.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/permutation.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/sets.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/slice.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/special.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/trace.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/transpose.cpython-310.pyc,, +sympy/matrices/expressions/_shape.py,sha256=fgKRp_3LrDvFYBYz2M0BqTbjAlKLtx6Gpy9g78wHpVQ,3058 +sympy/matrices/expressions/adjoint.py,sha256=CbkYP2Hi9JVb7WO5HiCE14fwOn16fT3Le5HfV30cpCQ,1572 +sympy/matrices/expressions/applyfunc.py,sha256=wFgcMOp6uakZ6wkkF7mB7GwM35GS5SGzXz1LCeJbemE,6749 +sympy/matrices/expressions/blockmatrix.py,sha256=eKQ4GlVm4_6i2bah7T95qtJdXWLJJ28yry27ajGGfIo,31809 +sympy/matrices/expressions/companion.py,sha256=lXUJRbjQR6e1mdHQdJwNIJXMW80XmKbOVqNvUXjB57U,1705 +sympy/matrices/expressions/determinant.py,sha256=wmtIB5q1_cJpnHSSsQT2MjE6wJdDV1RtZudGOzDJmG4,3173 +sympy/matrices/expressions/diagonal.py,sha256=NtIFAfpoI_jhElfkJ6WCxc4r9iWN8VBOR3LLxKEzJsE,6326 +sympy/matrices/expressions/dotproduct.py,sha256=sKdUhwVKTB3LEvd8xMwCDexNoQ1Dz43DCYsmm3UwFWw,1911 +sympy/matrices/expressions/factorizations.py,sha256=zFNjMBsJqhsIcDD8Me4W8-Q-TV89WptfG3Dd9yK_tPE,1456 +sympy/matrices/expressions/fourier.py,sha256=dvaftgB9jgkR_8ETyhzyVLtf1ZJu_wQC-ZbpTYMXZGE,2094 +sympy/matrices/expressions/funcmatrix.py,sha256=q6R75wLn0UdV4xJdVJUrNaofV1k1egXLLQdBeZcPtiY,3520 +sympy/matrices/expressions/hadamard.py,sha256=S-vY0RFuV7Xyf6kBwgQiGXJnci7j5gpxN8nazW1IGwE,13918 +sympy/matrices/expressions/inverse.py,sha256=ZJSzuTgKz01zmb3dnmFKn6AmR6gXd_5zEYzHkk8cF2o,2732 +sympy/matrices/expressions/kronecker.py,sha256=_JPrC-FruT4N2Sgl4hQdjThjFFfHsHGTLubvU4m3uvU,13398 +sympy/matrices/expressions/matadd.py,sha256=LwznSmZRJQt_sDeq_lcXsUXlSyrcE8J-cwgvi9saUDg,4771 +sympy/matrices/expressions/matexpr.py,sha256=1pswXMAOjYk3YwUhPxCoax2lIZ1rQgnskPdlE1gWhHY,27471 +sympy/matrices/expressions/matmul.py,sha256=bewNxpEnQ0WaVzHzpVgfF_5VHdBLroewZbBAxJTvHgE,15586 +sympy/matrices/expressions/matpow.py,sha256=gF0cscUBvOuAzsGbzN6VgkMPSgz_2_3wShl67B6YGo8,4916 +sympy/matrices/expressions/permutation.py,sha256=gGIht-JI1zWyZz7VPvm5S1Ae2i-P0WUAJl3euLRXWtM,8046 +sympy/matrices/expressions/sets.py,sha256=KxGHZ-4p4nALQBj2f1clG43lB4qYu6M2P0zpubiH-ik,2001 +sympy/matrices/expressions/slice.py,sha256=aNdY1Ey4VJR-UCvoORX2kh2DmA6QjOp-waENvWg8WVE,3355 +sympy/matrices/expressions/special.py,sha256=UH0sOc_XhRHaW5ERyVVHtNTlmfHYiUdRmYzXjcSbCzE,7495 +sympy/matrices/expressions/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/matrices/expressions/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_adjoint.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_applyfunc.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_blockmatrix.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_companion.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_derivatives.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_determinant.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_diagonal.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_dotproduct.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_factorizations.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_fourier.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_funcmatrix.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_hadamard.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_indexing.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_inverse.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_kronecker.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_matadd.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_matexpr.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_matmul.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_matpow.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_permutation.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_sets.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_slice.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_special.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_trace.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_transpose.cpython-310.pyc,, +sympy/matrices/expressions/tests/test_adjoint.py,sha256=cxOc334yNSI9MazhG9HT8s1OCXjkDWr3Zj2JnyHS3Z4,1065 +sympy/matrices/expressions/tests/test_applyfunc.py,sha256=mxTJaoB4Ze50lk-2TgVopmrrbuQbEqUsZwc3K1H8w-Q,3522 +sympy/matrices/expressions/tests/test_blockmatrix.py,sha256=EHJWm2dniNmf1CfODQSPm_HCCV77Ia0FbeNigsYJXZY,15695 +sympy/matrices/expressions/tests/test_companion.py,sha256=Lam6r-cSOokjhSlJws55Kq-gL5_pHfeV_Xuvmn5PkRU,1657 +sympy/matrices/expressions/tests/test_derivatives.py,sha256=9mBeaAZDX7-JbYs6tMClNuGDygETVN_dCXSlHmyAhwg,15991 +sympy/matrices/expressions/tests/test_determinant.py,sha256=QutUKtr35GCZ4iS2H1WTzMwa0jAvL0prcS82Untgr5k,1989 +sympy/matrices/expressions/tests/test_diagonal.py,sha256=3L6Vs_Yr36a8dgIqAeIcNEf0xcVyeyGhANNu0dlIpwI,4516 +sympy/matrices/expressions/tests/test_dotproduct.py,sha256=Zkv2N6oRPm0-sN4PFwsVFrM5Y_qv4x2gWqQQQD86hBY,1171 +sympy/matrices/expressions/tests/test_factorizations.py,sha256=6UPA_UhCL5JPbaQCOatMnxhGnQ-aIHmb3lXqbwrSoIE,786 +sympy/matrices/expressions/tests/test_fourier.py,sha256=0eD69faoHXBcuQ7g2Q31fqs-gyR_Xfe-gv-7DXhJh_c,1638 +sympy/matrices/expressions/tests/test_funcmatrix.py,sha256=zmOEcXHCK2MziwVBJb7iq9Q-Lbl4bbCQ_RAk27c7qUU,2381 +sympy/matrices/expressions/tests/test_hadamard.py,sha256=WDelP7lQ9KqsalOOlWHaZq38nTijkRUMAXMcAvU42SM,4610 +sympy/matrices/expressions/tests/test_indexing.py,sha256=wwYQa7LNlzhBA5fU50gPyE8cqaJf0s3O70PUx4eNCEA,12038 +sympy/matrices/expressions/tests/test_inverse.py,sha256=33Ui_vXZBJR1gMirb8c5xHDnx2jpVjWoVpYmVuZQoJg,2060 +sympy/matrices/expressions/tests/test_kronecker.py,sha256=e5H6av3ioOn8jkjyDBrT3NEmCkyHbN6ZEHOlyB9OYLk,5366 +sympy/matrices/expressions/tests/test_matadd.py,sha256=DkK_RuIFA9H9HoWcegtPWRHfQNg17h5CfqUD26E8u8E,1862 +sympy/matrices/expressions/tests/test_matexpr.py,sha256=lBuqWCwSevU7JL66eoHWrxL5gIvaWmkminDoqFmpyKA,17409 +sympy/matrices/expressions/tests/test_matmul.py,sha256=MuMIzP-ouiuRuTU5PmBtU-Xk_0Btu4mym-C20M8lN58,5963 +sympy/matrices/expressions/tests/test_matpow.py,sha256=3tRbEmZi2gZTmkBm7mAWUDbX4jwEfC8tC4kYoOuzaUg,7304 +sympy/matrices/expressions/tests/test_permutation.py,sha256=93Cqjj2k3aoR3ayMJLdJUa5h1u87bRRxT3I8B4FQsvU,5607 +sympy/matrices/expressions/tests/test_sets.py,sha256=x60NRXGjxS_AE37jGFAOvZdKlWW5m4X0C3OzIukftAM,1410 +sympy/matrices/expressions/tests/test_slice.py,sha256=C7OGAQQTz0YZxZCa7g0m8_0Bqq8jaPRa22JHVSqK7tY,2027 +sympy/matrices/expressions/tests/test_special.py,sha256=Mhg71vnjjb4fm0jZgjDoWW8rAJMBeh8aDCM75gjEpKQ,6496 +sympy/matrices/expressions/tests/test_trace.py,sha256=fRlrw9CfdO3z3SI4TQb1fCUb_zVAndbtyOErEeCTCQ0,3383 +sympy/matrices/expressions/tests/test_transpose.py,sha256=P3wPPRywKnrAppX6gssgD66v0RIcolxqDkCaKGGPVcM,1987 +sympy/matrices/expressions/trace.py,sha256=Iqg3wgO7tTTVZGo1qbXKn99qTss-5znAW6-lLrhuIIs,5348 +sympy/matrices/expressions/transpose.py,sha256=SnfU_CE3_dBQkbi_SkPGqsE8eDgstYuplx7XDxKJIyA,2691 +sympy/matrices/graph.py,sha256=O73INKAbTpnzNdZ7y08ow9U2CmApdn7S9NEsA9LR-XQ,9076 +sympy/matrices/immutable.py,sha256=3NWY8oHiTGdWQR6AfZpg2fOtjRc1KH75yxkITNzCcPg,5425 +sympy/matrices/inverse.py,sha256=pGDQ3-iG9oTMEIuCwrFe0X5lxkvZSF-iMzod8zTv1OA,11409 +sympy/matrices/matrices.py,sha256=thx6Ks7DAts1FUB3l3cu4s3HRJ952mGNlXstLVvR4jM,75508 +sympy/matrices/normalforms.py,sha256=KiiKxxnYEaoA75UJjYFGqVLipgraNlG3Dlh9E2c1Q7k,3808 +sympy/matrices/reductions.py,sha256=GmXqmi3mgxi-jUiSx-B8xN0M7qLLovdDDTzjoMZvQR0,10781 +sympy/matrices/repmatrix.py,sha256=JIt55DuimIz7xN0WjdPzZhQmYbaqnDOT5xCRowPR2pY,21962 +sympy/matrices/solvers.py,sha256=IDDTmTY9FTZsbTwPC4oVG_0ZV8v6ey0JbhCFHulNm2E,22764 +sympy/matrices/sparse.py,sha256=KFRkfQ6iyLekYMc-0VJffNKzf7EeFvIk2zRsFoQwwcI,14675 +sympy/matrices/sparsetools.py,sha256=tzI541P8QW_v1eVJAXgOlo_KK1Xp6u1geawX_tdlBxY,9182 +sympy/matrices/subspaces.py,sha256=uLo4qnP0xvFcFo5hhf6g7pHSHiRbcQ1ATDKwGBxW7CE,3761 +sympy/matrices/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/matrices/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_commonmatrix.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_decompositions.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_determinant.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_eigen.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_graph.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_immutable.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_interactions.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_matrices.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_normalforms.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_reductions.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_solvers.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_sparse.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_sparsetools.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_subspaces.cpython-310.pyc,, +sympy/matrices/tests/test_commonmatrix.py,sha256=9xvYBhxFJm020OhVDKWIj-m1PGtkvHFwtV7iL67SdUI,38564 +sympy/matrices/tests/test_decompositions.py,sha256=SvjGIKZawYyotzbbwpwpcC7fV-nZRNlDwRhq1AL2AQ0,14417 +sympy/matrices/tests/test_determinant.py,sha256=RYmf2bLWtk8nuyIJuhRpSIFklsfVtAGa2gx2AvAi2TU,13350 +sympy/matrices/tests/test_eigen.py,sha256=guJ56Hd33ScYp2DPLMQ-mj6WtG7JbRB5pvJLv6SeP-0,22773 +sympy/matrices/tests/test_graph.py,sha256=ckfGDCg2M6gluv9XFnfURga8gxd2HTL7aX281s6wy6c,3213 +sympy/matrices/tests/test_immutable.py,sha256=qV1L1i8RWX3ihJx3J-M07s_thfXmuUA1wIRfQnUbqyA,4618 +sympy/matrices/tests/test_interactions.py,sha256=RKQsDDiwuEZxL7-bTJR_ue7DKGbCZYl7pvjjgE7EyEY,2066 +sympy/matrices/tests/test_matrices.py,sha256=WHL_ngSJgL_R4CBPACf4GPfand2bOGvVhjHcjJyFCY4,144201 +sympy/matrices/tests/test_normalforms.py,sha256=JQvFfp53MW8cJhxEkyNvsMmhhD7FVncAkjuGMXu5Fok,3009 +sympy/matrices/tests/test_reductions.py,sha256=xbB-_vbF9IYIzvkaOjsVeFfJHRk3buFRNdxKGZvuZXE,13951 +sympy/matrices/tests/test_solvers.py,sha256=hsbvtRyBhLzTxX62AYqDTn7bltGanT1NwYUecUPEViE,20386 +sympy/matrices/tests/test_sparse.py,sha256=GvXN6kBVldjqoR8WN8I_PjblKhRmyRWvVuLUgZEgugY,23281 +sympy/matrices/tests/test_sparsetools.py,sha256=pjQR6UaEMR92NolB_IGZ9Umk6FPZjvI0vk1Fd4H_C5I,4877 +sympy/matrices/tests/test_subspaces.py,sha256=vnuIyKbViZMa-AHCZ3PI9HbCL_t-LNI70gwbZvzRtzw,3839 +sympy/matrices/utilities.py,sha256=mMnNsDTxGKqiG0JATsM4W9b5jglhacy-vmRw2aZojgY,2117 +sympy/multipledispatch/__init__.py,sha256=aV2NC2cO_KmD6QFiwy4oC1D8fm3pFuPbaiTMeWmNWak,259 +sympy/multipledispatch/__pycache__/__init__.cpython-310.pyc,, +sympy/multipledispatch/__pycache__/conflict.cpython-310.pyc,, +sympy/multipledispatch/__pycache__/core.cpython-310.pyc,, +sympy/multipledispatch/__pycache__/dispatcher.cpython-310.pyc,, +sympy/multipledispatch/__pycache__/utils.cpython-310.pyc,, +sympy/multipledispatch/conflict.py,sha256=rR6tKn58MfhMMKZ4ZrhVduylXd9f5PjT2TpzM9LMB6o,2117 +sympy/multipledispatch/core.py,sha256=I4WOnmu1VtlaCnn2oD9R2-xckkYLRZPNFEWtCOTAYfM,2261 +sympy/multipledispatch/dispatcher.py,sha256=A2I4upt4qNollXGpwzrqg7M0oKHJhZx1BUMIBnjRIow,12226 +sympy/multipledispatch/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/multipledispatch/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/multipledispatch/tests/__pycache__/test_conflict.cpython-310.pyc,, +sympy/multipledispatch/tests/__pycache__/test_core.cpython-310.pyc,, +sympy/multipledispatch/tests/__pycache__/test_dispatcher.cpython-310.pyc,, +sympy/multipledispatch/tests/test_conflict.py,sha256=msNVSiikuPOqsEm_MMGmjsNbA2CAR0F1FZaHskzzo04,1786 +sympy/multipledispatch/tests/test_core.py,sha256=UfH_7cyvZ6PHjdH8vmLG49CG7E30W8uxm3FthuMc1Jk,4048 +sympy/multipledispatch/tests/test_dispatcher.py,sha256=saJPpGXLpLOuRfw-ekzZGzY-Rys0NsS5ke0n33i9j0U,6228 +sympy/multipledispatch/utils.py,sha256=39wB9i8jNhlLFZyCTFnioLx5N_CNWv4r5VZwKrxswIE,3097 +sympy/ntheory/__init__.py,sha256=MBs5Tdw5xAgNMlCdN8fSLiIswQudZibIbHjI9L5BEds,2746 +sympy/ntheory/__pycache__/__init__.cpython-310.pyc,, +sympy/ntheory/__pycache__/bbp_pi.cpython-310.pyc,, +sympy/ntheory/__pycache__/continued_fraction.cpython-310.pyc,, +sympy/ntheory/__pycache__/digits.cpython-310.pyc,, +sympy/ntheory/__pycache__/ecm.cpython-310.pyc,, +sympy/ntheory/__pycache__/egyptian_fraction.cpython-310.pyc,, +sympy/ntheory/__pycache__/elliptic_curve.cpython-310.pyc,, +sympy/ntheory/__pycache__/factor_.cpython-310.pyc,, +sympy/ntheory/__pycache__/generate.cpython-310.pyc,, +sympy/ntheory/__pycache__/modular.cpython-310.pyc,, +sympy/ntheory/__pycache__/multinomial.cpython-310.pyc,, +sympy/ntheory/__pycache__/partitions_.cpython-310.pyc,, +sympy/ntheory/__pycache__/primetest.cpython-310.pyc,, +sympy/ntheory/__pycache__/qs.cpython-310.pyc,, +sympy/ntheory/__pycache__/residue_ntheory.cpython-310.pyc,, +sympy/ntheory/bbp_pi.py,sha256=p4OLH6B7CFmpTQPM2DNvxWW3T-PYNha5EPAE649i_tA,5252 +sympy/ntheory/continued_fraction.py,sha256=-GA1fzvgK7h8Bad_1NN0majRhwIQEg2zZDPuKSHAVYA,10109 +sympy/ntheory/digits.py,sha256=xFzoMyAC36fLR5OvtTetoXUSvhNTbP3HKY_co8RUEr4,3688 +sympy/ntheory/ecm.py,sha256=3ot2F6V8TSsaFEZndxxDDyqnT0jQ67Xdq0e3cuea_UE,10618 +sympy/ntheory/egyptian_fraction.py,sha256=hW886hPWJtARqgZIrH1WjZFC0uvf9CHxMIn0X9MWZro,6923 +sympy/ntheory/elliptic_curve.py,sha256=zDRjICf4p3PPfdxKWrPeTcMbAMqPvrZmK2rk9JAbh60,11510 +sympy/ntheory/factor_.py,sha256=5Oqd9QvsW4MR_eH--wbpmoa502yhoLM4g-9gPh5eYKc,75815 +sympy/ntheory/generate.py,sha256=42BWhzsUNv2k3pqdzWyAHAPPydPIaxHkmTIV-8rVSAk,29411 +sympy/ntheory/modular.py,sha256=fA3_ovJcPqrwT2bPjmd4cSGPDyVG6HSM9oP07HP1R_s,7650 +sympy/ntheory/multinomial.py,sha256=rbm3STjgfRbNVbcPeH69qtWktthSCk0sC373NuDM6fU,5073 +sympy/ntheory/partitions_.py,sha256=mE-PQKxaEM20AJJiCgkfhuCAruPbrtnHq3Ad2WrBSM8,5975 +sympy/ntheory/primetest.py,sha256=2qI-5HR_CowK2iH07B4XE2anXxkhSDWw7PPcQkOy70g,20951 +sympy/ntheory/qs.py,sha256=QzIJFHjFG2ncIpoJ7CGMzJ6HudVqB2RNp2yBHBjkSz8,18474 +sympy/ntheory/residue_ntheory.py,sha256=qNJSoRFKAcAcRet5rv3nSF7p3BJJXk9ewJxIDdg1lSE,40653 +sympy/ntheory/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/ntheory/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_bbp_pi.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_continued_fraction.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_digits.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_ecm.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_egyptian_fraction.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_elliptic_curve.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_factor_.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_generate.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_modular.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_multinomial.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_partitions.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_primetest.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_qs.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_residue.cpython-310.pyc,, +sympy/ntheory/tests/test_bbp_pi.py,sha256=-RXXkqMUfVCYeO9HonldOOISDKDaUYCCe5CUgK18L3o,9433 +sympy/ntheory/tests/test_continued_fraction.py,sha256=gfQfLuLVFn-bmEPBcgnU-f0VibJiY8hAEl0FO4V3iVU,3052 +sympy/ntheory/tests/test_digits.py,sha256=jC8GCQVJelFcHMApf5TZU1KXP2oBp48lkkD0bM2TLCo,1182 +sympy/ntheory/tests/test_ecm.py,sha256=Hy9pYRZPuFm7yrGVRs2ob_w3YY3bMEENH_hkDh947UE,2303 +sympy/ntheory/tests/test_egyptian_fraction.py,sha256=tpHcwteuuQAahcPqvgBm4Mwq-efzcHOn8mldijynjlE,2378 +sympy/ntheory/tests/test_elliptic_curve.py,sha256=wc0EOsGo-qGpdevRq1o64htwTOT_YSUzUfyhJC-JVbg,624 +sympy/ntheory/tests/test_factor_.py,sha256=Z1RvrqLttbgp3ZhfJZtCZmUV7GehKGQDSUEEdF0CSSA,25024 +sympy/ntheory/tests/test_generate.py,sha256=ALKzLAcCPIMTr3JC6RJHuOYd6z0aFVaF5-e481icYe8,8069 +sympy/ntheory/tests/test_modular.py,sha256=g73sUXtYNxzbDcq5UnMWT8NodAU8unwRj_E-PpvJqDs,1425 +sympy/ntheory/tests/test_multinomial.py,sha256=8uuj6XlatNyIILOpjJap13CMZmDwrCyGKn9LiIUiLV0,2344 +sympy/ntheory/tests/test_partitions.py,sha256=AkmDpR0IFxo0ret91tRPYUqrgQfQ367okTt2Ee2Vm60,507 +sympy/ntheory/tests/test_primetest.py,sha256=1Pkoi-TNxvB0oT1J5_YXryabyiGgPeXigS_vo_4x_v8,7062 +sympy/ntheory/tests/test_qs.py,sha256=ZCWiWiUULzLDTCz6CsolmVAdvZMZrz3wFrZXd-GtHfM,4481 +sympy/ntheory/tests/test_residue.py,sha256=t3-yaWmZvfkQpjUDqOzgwnTFO0je7BkEU2QKpA-pttU,12884 +sympy/parsing/__init__.py,sha256=KHuyDeHY1ifpVxT4aTOhomazCBYVIrKWd28jqp6YNJ8,125 +sympy/parsing/__pycache__/__init__.cpython-310.pyc,, +sympy/parsing/__pycache__/ast_parser.cpython-310.pyc,, +sympy/parsing/__pycache__/mathematica.cpython-310.pyc,, +sympy/parsing/__pycache__/maxima.cpython-310.pyc,, +sympy/parsing/__pycache__/sym_expr.cpython-310.pyc,, +sympy/parsing/__pycache__/sympy_parser.cpython-310.pyc,, +sympy/parsing/ast_parser.py,sha256=PWuAoNPZ6-C8HCYYGCG9tMCgwuMzi_ebyIqFSJCqk6k,2724 +sympy/parsing/autolev/Autolev.g4,sha256=980mo25mLWrQFmhRIg-aqIalUuwktYYaBGTXZ5_XZwA,4195 +sympy/parsing/autolev/__init__.py,sha256=sp5hzv5siVW3xUmhkp0S0iaA0Cz-PVB0HO1zC04pxYs,3611 +sympy/parsing/autolev/__pycache__/__init__.cpython-310.pyc,, +sympy/parsing/autolev/__pycache__/_build_autolev_antlr.cpython-310.pyc,, +sympy/parsing/autolev/__pycache__/_listener_autolev_antlr.cpython-310.pyc,, +sympy/parsing/autolev/__pycache__/_parse_autolev_antlr.cpython-310.pyc,, +sympy/parsing/autolev/_antlr/__init__.py,sha256=MQ4ZacpTuP-NmruFXKdWLQatoeVJQ8SaBQ2DnYvtyE8,203 +sympy/parsing/autolev/_antlr/__pycache__/__init__.cpython-310.pyc,, +sympy/parsing/autolev/_antlr/__pycache__/autolevlexer.cpython-310.pyc,, +sympy/parsing/autolev/_antlr/__pycache__/autolevlistener.cpython-310.pyc,, +sympy/parsing/autolev/_antlr/__pycache__/autolevparser.cpython-310.pyc,, +sympy/parsing/autolev/_antlr/autolevlexer.py,sha256=K7HF_-5dUyAIv1_7GkhTmxqSCanEhCpzJG8fayAEB3Q,13609 +sympy/parsing/autolev/_antlr/autolevlistener.py,sha256=EDb3XkH9Y7CLzxGM-tY-nGqxMGfBHVkqKdVCPxABgRE,12821 +sympy/parsing/autolev/_antlr/autolevparser.py,sha256=BZYJ7IkurRmm44S50pYp_9JHCjT8fr1w5HeksAEPjtg,106291 +sympy/parsing/autolev/_build_autolev_antlr.py,sha256=XOR44PCPo234I_Z1QnneSArY8aPpp4xP4-dycMalQQw,2590 +sympy/parsing/autolev/_listener_autolev_antlr.py,sha256=P5XTo2UjkyDyx4d9kpmWIm6BoCXyOiED9s8Tr3w3Am4,104758 +sympy/parsing/autolev/_parse_autolev_antlr.py,sha256=b9hIaluJUd1V2XIAp1erak6U-c-CwKyDLH1UkYQuvKE,1736 +sympy/parsing/autolev/test-examples/README.txt,sha256=0C4m_nLROeV5J8nMfm3RYEfYgQJqmlHZaCpVD24boQY,528 +sympy/parsing/autolev/test-examples/__pycache__/ruletest1.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/__pycache__/ruletest10.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/__pycache__/ruletest11.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/__pycache__/ruletest12.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/__pycache__/ruletest2.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/__pycache__/ruletest3.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/__pycache__/ruletest4.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/__pycache__/ruletest5.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/__pycache__/ruletest6.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/__pycache__/ruletest7.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/__pycache__/ruletest8.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/__pycache__/ruletest9.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/pydy-example-repo/__pycache__/chaos_pendulum.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/pydy-example-repo/__pycache__/double_pendulum.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/pydy-example-repo/__pycache__/mass_spring_damper.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/pydy-example-repo/__pycache__/non_min_pendulum.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/pydy-example-repo/chaos_pendulum.al,sha256=HpTcX2wXzLqmgpp8fcSqNweKjxljk43iYK0wQmBbCDI,690 +sympy/parsing/autolev/test-examples/pydy-example-repo/chaos_pendulum.py,sha256=FSu4TP2BDTQjzYhMkcpRhXbb3kAD27XCyO_EoL55Ack,2274 +sympy/parsing/autolev/test-examples/pydy-example-repo/double_pendulum.al,sha256=wjeeRdCS3Es6ldX9Ug5Du1uaijUTyoXpfTqmhL0uYfk,427 +sympy/parsing/autolev/test-examples/pydy-example-repo/double_pendulum.py,sha256=uU9azTUGrY15BSDtw5T_V-7gmjyhHbXslzkmwBvFjGk,1583 +sympy/parsing/autolev/test-examples/pydy-example-repo/mass_spring_damper.al,sha256=Gf7OhgRlwqUEXq7rkfbf89yWA23u4uIUJ-buXTyOuXM,505 +sympy/parsing/autolev/test-examples/pydy-example-repo/mass_spring_damper.py,sha256=9ReCAqcUH5HYBgHmop9h5Zx54mfScWZN5L5F6rCHk4w,1366 +sympy/parsing/autolev/test-examples/pydy-example-repo/non_min_pendulum.al,sha256=p5v40h1nVFrWNqnB0K7GiNQT0b-MqwayYjZxXOY4M8M,362 +sympy/parsing/autolev/test-examples/pydy-example-repo/non_min_pendulum.py,sha256=DdxcWrm3HMQuyyY3Pk6sKHb4RXhQEM_EKY3HYZCP8ec,1503 +sympy/parsing/autolev/test-examples/ruletest1.al,sha256=mDJ02Q1Qm-ShVmGoyjzSfgDJHUOuDrsUg3YMnkpKdUw,176 +sympy/parsing/autolev/test-examples/ruletest1.py,sha256=eIKEFzEwkCFhPF0GTmf6SLuxXT384GqdCJnhiL2U0BQ,555 +sympy/parsing/autolev/test-examples/ruletest10.al,sha256=jKpV8BgX91iQsQDLFOJyaS396AyE5YQlUMxih5o9RK0,781 +sympy/parsing/autolev/test-examples/ruletest10.py,sha256=I1tsQcSAW6wqIguF-7lwlj9D4YZ8kCZqPqTKPUHR9oI,2726 +sympy/parsing/autolev/test-examples/ruletest11.al,sha256=j_q7giq2KIuXVRLWwNlwIlpbhNO6SqBMnLGLcxIkzwk,188 +sympy/parsing/autolev/test-examples/ruletest11.py,sha256=dYTRtXvMDXHiKzXHD2Sh0fcEukob3wr_GbSeqaZrrO8,475 +sympy/parsing/autolev/test-examples/ruletest12.al,sha256=drr2NLrK1ewn4FjMppXycpAUNbZEQ0IAMsdVx8nxk6I,185 +sympy/parsing/autolev/test-examples/ruletest12.py,sha256=ZG36s3PnkT0aKBM9Nx6H0sdJrtoLwaebU9386YSUql8,472 +sympy/parsing/autolev/test-examples/ruletest2.al,sha256=d-QjPpW0lzugaGBg8F6pDl_5sZHOR_EDJ8EvWLcz4FY,237 +sympy/parsing/autolev/test-examples/ruletest2.py,sha256=jrJfb0Jk2FP4GS5pDa0UB5ph0ijEVd1X8meKeZrTVng,820 +sympy/parsing/autolev/test-examples/ruletest3.al,sha256=1TAaOe8GI8-yBWJddfIxwnvScHNmOjSzSaQn0RS_v5k,308 +sympy/parsing/autolev/test-examples/ruletest3.py,sha256=O3K3IQo-HCjAIOSkfz3bDlst7dVUiRwhOZ0q_3jb5LU,1574 +sympy/parsing/autolev/test-examples/ruletest4.al,sha256=qPGlPbdDRrzTDUBeWydAIa7mbjs2o3uX938QAsWJ7Qk,302 +sympy/parsing/autolev/test-examples/ruletest4.py,sha256=WHod5yzKF4TNbEf4Yfxmx9WnimA7NOXqtTjZXR8FsP0,682 +sympy/parsing/autolev/test-examples/ruletest5.al,sha256=VuiKjiFmLK3uEdho0m3pk-n0qm4SNLoLPMRJqjMJ4GY,516 +sympy/parsing/autolev/test-examples/ruletest5.py,sha256=WvUtno1D3BrmFNPYYIBKR_gOA-PaHoxLlSTNDX67dcQ,1991 +sympy/parsing/autolev/test-examples/ruletest6.al,sha256=-HwgTmh_6X3wHjo3PQi7378t8YdizRJClc5Eb5DmjhE,703 +sympy/parsing/autolev/test-examples/ruletest6.py,sha256=vEO0jMOD-KIevAcVexmpvac0MGjN7O_dNipOBJJNzF0,1473 +sympy/parsing/autolev/test-examples/ruletest7.al,sha256=wR9S9rTzO9fyKL6Ofgwzw8XCFCV_p2hBpYotC8TvADI,773 +sympy/parsing/autolev/test-examples/ruletest7.py,sha256=_XvMrMe5r9RLopTrIqMGLhaYvHL1qjteWz9CKcotCL8,1696 +sympy/parsing/autolev/test-examples/ruletest8.al,sha256=P7Nu3Pq2R1mKcuFRc9dRO5jJ1_e5fwWdtqYG8NHVVds,682 +sympy/parsing/autolev/test-examples/ruletest8.py,sha256=8tgbwJ-ir0wiOCsgIFCAu4uD8SieYRrLoLzEfae5YQY,2690 +sympy/parsing/autolev/test-examples/ruletest9.al,sha256=txtZ5RH2p1FvAe6etwetSCH8rLktnpk5z0W72sCOdAA,755 +sympy/parsing/autolev/test-examples/ruletest9.py,sha256=GtqV-Wq2GGJzfblMscAz-KXCzs0P_4XqvA3FIdlPe04,1965 +sympy/parsing/c/__init__.py,sha256=J9CvkNRY-qy6CA06GZYuwTuxdnqas6oUP2g0qLztGro,65 +sympy/parsing/c/__pycache__/__init__.cpython-310.pyc,, +sympy/parsing/c/__pycache__/c_parser.cpython-310.pyc,, +sympy/parsing/c/c_parser.py,sha256=o7UohvD8V6feJr74sIbx2NNAyZOLFNJDHtiUPg_rUeg,39331 +sympy/parsing/fortran/__init__.py,sha256=KraiVw2qxIgYeMRTFjs1vkMi-hqqDkxUBv8Rc2gwkCI,73 +sympy/parsing/fortran/__pycache__/__init__.cpython-310.pyc,, +sympy/parsing/fortran/__pycache__/fortran_parser.cpython-310.pyc,, +sympy/parsing/fortran/fortran_parser.py,sha256=RpNQR3eNx5vgfzdt0nEZDCB56kF__SnYMaqWN3zla00,11483 +sympy/parsing/latex/LICENSE.txt,sha256=AHvDClj6QKmW53IEcSDeTq8x9REOT5w7X5P8374urKE,1075 +sympy/parsing/latex/LaTeX.g4,sha256=fG0ZUQPwYQOIbcyaPDAkGvcfGs3ZwwMB8ZnKW5yHUDY,5821 +sympy/parsing/latex/__init__.py,sha256=10TctFMpk3AolsniTJR5rQr19QXNqVTx-rl8ZFkHC4s,991 +sympy/parsing/latex/__pycache__/__init__.cpython-310.pyc,, +sympy/parsing/latex/__pycache__/_build_latex_antlr.cpython-310.pyc,, +sympy/parsing/latex/__pycache__/_parse_latex_antlr.cpython-310.pyc,, +sympy/parsing/latex/__pycache__/errors.cpython-310.pyc,, +sympy/parsing/latex/_antlr/__init__.py,sha256=TAb79senorEsoYLCLwUa8wg8AUCHzmmZ7tLdi0XGNaE,384 +sympy/parsing/latex/_antlr/__pycache__/__init__.cpython-310.pyc,, +sympy/parsing/latex/_antlr/__pycache__/latexlexer.cpython-310.pyc,, +sympy/parsing/latex/_antlr/__pycache__/latexparser.cpython-310.pyc,, +sympy/parsing/latex/_antlr/latexlexer.py,sha256=Y1hmY1VGL5FTSSlToTRQydPnyaLLNy1mDSWx76HaYwM,30502 +sympy/parsing/latex/_antlr/latexparser.py,sha256=ZvonpvTS3vLSOVpas88M3CfNnUhPUDsCCPPk4wBYUGE,123655 +sympy/parsing/latex/_build_latex_antlr.py,sha256=id_4pbcI4nAa0tHumN0lZX0Ubb-BaJ3czGwiQR_jZPE,2777 +sympy/parsing/latex/_parse_latex_antlr.py,sha256=3iUHktfORn60D5SBpRNjSSaxuKlmzEBI5-DilfkkRQ0,20525 +sympy/parsing/latex/errors.py,sha256=adSpvQyWjTLsbN_2KHJ4HuXpY7_U9noeWiG0lskYLgE,45 +sympy/parsing/mathematica.py,sha256=AX5q_9bDARtC0w3bFNmhNKGqe3X7NlprZEvMCbV_vMs,39282 +sympy/parsing/maxima.py,sha256=DhTnXRSAceijyA1OAm86c6TyW9-aeUVoZEELGu0oZtY,1835 +sympy/parsing/sym_expr.py,sha256=-hxarp961eyLtuwUhbg3D3qzy06HrEPZEYpGVcJzAv0,8895 +sympy/parsing/sympy_parser.py,sha256=QA9TRHZwqQ8kqfOPA4EeHfKz1dCqpBppRtVTE61IpO0,43814 +sympy/parsing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/parsing/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/parsing/tests/__pycache__/test_ast_parser.cpython-310.pyc,, +sympy/parsing/tests/__pycache__/test_autolev.cpython-310.pyc,, +sympy/parsing/tests/__pycache__/test_c_parser.cpython-310.pyc,, +sympy/parsing/tests/__pycache__/test_fortran_parser.cpython-310.pyc,, +sympy/parsing/tests/__pycache__/test_implicit_multiplication_application.cpython-310.pyc,, +sympy/parsing/tests/__pycache__/test_latex.cpython-310.pyc,, +sympy/parsing/tests/__pycache__/test_latex_deps.cpython-310.pyc,, +sympy/parsing/tests/__pycache__/test_mathematica.cpython-310.pyc,, +sympy/parsing/tests/__pycache__/test_maxima.cpython-310.pyc,, +sympy/parsing/tests/__pycache__/test_sym_expr.cpython-310.pyc,, +sympy/parsing/tests/__pycache__/test_sympy_parser.cpython-310.pyc,, +sympy/parsing/tests/test_ast_parser.py,sha256=lcT8w7mn6UEZ8T-xfA4TqG4Mt7JxY00oHhOW7JtHQfY,803 +sympy/parsing/tests/test_autolev.py,sha256=tQuUFa8YqVdsHPOcUhAwlMKB8Uk08HejDhDCda8lXs0,6647 +sympy/parsing/tests/test_c_parser.py,sha256=yIYdfnaHX9Z93-Cmf6x9C7eysQ-y3_lU-6CGRXN4WL8,154665 +sympy/parsing/tests/test_fortran_parser.py,sha256=SGbawrJ4a780TJAFVMONc7Y3Y8VYgVqsIHxVGaicbxE,11828 +sympy/parsing/tests/test_implicit_multiplication_application.py,sha256=nPzLKcAJJaoZgdLoq1_CXhiWKFBH--p4t6dq4I3sV9A,7448 +sympy/parsing/tests/test_latex.py,sha256=khNyIVANKnQFIE6hR3UdSqlzYdZWDtO0vs6TxhpWDUI,11503 +sympy/parsing/tests/test_latex_deps.py,sha256=oe5vm2eIKn05ZiCcXUaO8X6HCcRmN1qCuTsz6tB7Qrk,426 +sympy/parsing/tests/test_mathematica.py,sha256=ma9YM-Cti4hMhjZym5RMGaesxaWki6p29QROJ4oSs4E,13166 +sympy/parsing/tests/test_maxima.py,sha256=iIwnFm0lYD0-JcraUIymogqEMN3ji0c-0JeNFFGTEDs,1987 +sympy/parsing/tests/test_sym_expr.py,sha256=-wNR7GwvJHVmPSZxSuAuoX1_FJk83O0tcDi09qYY6Jk,5668 +sympy/parsing/tests/test_sympy_parser.py,sha256=5__CszZfy8DAl5JzfsLGsDECRjdT20a3p9cwYBXvAh8,12253 +sympy/physics/__init__.py,sha256=F_yvUMCuBq3HR-3Ai6W4oktBsXRg8KdutFLwT9FFJlY,220 +sympy/physics/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/__pycache__/hydrogen.cpython-310.pyc,, +sympy/physics/__pycache__/matrices.cpython-310.pyc,, +sympy/physics/__pycache__/paulialgebra.cpython-310.pyc,, +sympy/physics/__pycache__/pring.cpython-310.pyc,, +sympy/physics/__pycache__/qho_1d.cpython-310.pyc,, +sympy/physics/__pycache__/secondquant.cpython-310.pyc,, +sympy/physics/__pycache__/sho.cpython-310.pyc,, +sympy/physics/__pycache__/wigner.cpython-310.pyc,, +sympy/physics/continuum_mechanics/__init__.py,sha256=moVrcsEw_a8db69dtuwE-aquZ1TAJc7JxHukrYnJuyM,89 +sympy/physics/continuum_mechanics/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/continuum_mechanics/__pycache__/beam.cpython-310.pyc,, +sympy/physics/continuum_mechanics/__pycache__/truss.cpython-310.pyc,, +sympy/physics/continuum_mechanics/beam.py,sha256=i3BcVzCsC9AUPjyAcPd5Lfwcpb_9bz9V-cO6N2WlkLU,148566 +sympy/physics/continuum_mechanics/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/physics/continuum_mechanics/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/continuum_mechanics/tests/__pycache__/test_beam.cpython-310.pyc,, +sympy/physics/continuum_mechanics/tests/__pycache__/test_truss.cpython-310.pyc,, +sympy/physics/continuum_mechanics/tests/test_beam.py,sha256=IubYZzOkQ9dBcyR_rLA9FxUkFZ_x1BX16MKUvyJaOkE,26879 +sympy/physics/continuum_mechanics/tests/test_truss.py,sha256=dsjtXQoBXcFDacKc55DbZST1L69XGKN0TMtCBnHN5hY,3368 +sympy/physics/continuum_mechanics/truss.py,sha256=C9JPSDutXBS4QFmdqcsClFCtdN9tdGauPD8TYQ4_NF0,28496 +sympy/physics/control/__init__.py,sha256=Z5cPVgXd8BAdxX9iqyLLVyk2n2ry_jiMBHo6crMeLFA,1027 +sympy/physics/control/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/control/__pycache__/control_plots.cpython-310.pyc,, +sympy/physics/control/__pycache__/lti.cpython-310.pyc,, +sympy/physics/control/control_plots.py,sha256=Q25egDhUs-xrlh5oy4ZBlnOqF5pJtQ1SRo28r5nnudY,32222 +sympy/physics/control/lti.py,sha256=EquvSYF2ifqnFfYsnoJuAsRrZHQIm7f6LwmZGbmbW-M,114652 +sympy/physics/control/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/physics/control/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/control/tests/__pycache__/test_control_plots.cpython-310.pyc,, +sympy/physics/control/tests/__pycache__/test_lti.cpython-310.pyc,, +sympy/physics/control/tests/test_control_plots.py,sha256=EDTfKI08wacHtYFKf7HeBi43msqqAvMOhTWf-8RJu3k,15728 +sympy/physics/control/tests/test_lti.py,sha256=QPuNpHlSquTX14-r4YbhNfxh32x_D17jAxtO2aQn5GA,59908 +sympy/physics/hep/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/physics/hep/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/hep/__pycache__/gamma_matrices.cpython-310.pyc,, +sympy/physics/hep/gamma_matrices.py,sha256=WlSHLUtMU7NrgLyKEvTntMSYxMZq1r_6o2kqUEAdPaA,24253 +sympy/physics/hep/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/physics/hep/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/hep/tests/__pycache__/test_gamma_matrices.cpython-310.pyc,, +sympy/physics/hep/tests/test_gamma_matrices.py,sha256=iKqICj0bP7EK0sSuYFsPdPkDTbHGa6J_LMPZAzv1j4o,14722 +sympy/physics/hydrogen.py,sha256=R2wnNi1xB-WTQ8Z9aPUhX9Z8mQ8TdhCM1JAZIkyXgjw,7594 +sympy/physics/matrices.py,sha256=jHfbWkzL2myFt-39kodQo5wPubBxNZKXlljuSxZL4bE,3836 +sympy/physics/mechanics/__init__.py,sha256=57XHPOZF3y2-dLcrfwECEgjFthUYeQncmft3GZYKyOY,2033 +sympy/physics/mechanics/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/mechanics/__pycache__/body.cpython-310.pyc,, +sympy/physics/mechanics/__pycache__/functions.cpython-310.pyc,, +sympy/physics/mechanics/__pycache__/joint.cpython-310.pyc,, +sympy/physics/mechanics/__pycache__/jointsmethod.cpython-310.pyc,, +sympy/physics/mechanics/__pycache__/kane.cpython-310.pyc,, +sympy/physics/mechanics/__pycache__/lagrange.cpython-310.pyc,, +sympy/physics/mechanics/__pycache__/linearize.cpython-310.pyc,, +sympy/physics/mechanics/__pycache__/method.cpython-310.pyc,, +sympy/physics/mechanics/__pycache__/models.cpython-310.pyc,, +sympy/physics/mechanics/__pycache__/particle.cpython-310.pyc,, +sympy/physics/mechanics/__pycache__/rigidbody.cpython-310.pyc,, +sympy/physics/mechanics/__pycache__/system.cpython-310.pyc,, +sympy/physics/mechanics/body.py,sha256=eqQbmsPOZnad0aH326N_FfZZWtzs4IIvbugwfkLlHtQ,19088 +sympy/physics/mechanics/functions.py,sha256=GbhUZWZD0HqLGh03ojXfnATxM-oxM708AmFtCgOjJFE,25557 +sympy/physics/mechanics/joint.py,sha256=hTBI8wd7ylnRgR1hrW-Xg9pTiFHBNgA6j5MWfTJMzdU,82739 +sympy/physics/mechanics/jointsmethod.py,sha256=FmccW8429JLfg9-Gxc4oeekrPi2ig77gYZJ2x7qVzMA,8530 +sympy/physics/mechanics/kane.py,sha256=L-imRN4zBCtFXajjyQ4-2peMULqysCbVEUq69JpQbgA,30567 +sympy/physics/mechanics/lagrange.py,sha256=_BM2q2euBxiVj-5OVMOkuzu9D012MP5AC6LnOENwbX0,18338 +sympy/physics/mechanics/linearize.py,sha256=sEX52OQP-pJ_pIlw8oVv01oQPeHiPf0LCm1GMuIn1Yo,15615 +sympy/physics/mechanics/method.py,sha256=2vFRhA79ra4HR6AzVBHMr3oNncrcqgLLMRqdyif0DrI,660 +sympy/physics/mechanics/models.py,sha256=9q1g3I2xYpuTMi-v9geswEqxJWTP3RjcOquRfzMhHzM,6463 +sympy/physics/mechanics/particle.py,sha256=F-pPvcmfxdacZxSIwnaXJ-W9KslIEnCw7ljCLlxVk4Y,7577 +sympy/physics/mechanics/rigidbody.py,sha256=YTWj-awmWw-OZQQ6wn_HxrTnmSu0Hvhd1TJxRVU62LI,11192 +sympy/physics/mechanics/system.py,sha256=Un6ep47tygf1Vdp-8G2WS6uT-FCqOBRwrDUdonFd_vA,18671 +sympy/physics/mechanics/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/physics/mechanics/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_body.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_functions.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_joint.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_jointsmethod.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_kane.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_kane2.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_kane3.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_kane4.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_lagrange.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_lagrange2.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_linearize.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_method.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_models.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_particle.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_rigidbody.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_system.cpython-310.pyc,, +sympy/physics/mechanics/tests/test_body.py,sha256=fV3dp94uFbE7ZHb7DkD0fJ1UgbSdc1NVVy0yRuYZfuk,11213 +sympy/physics/mechanics/tests/test_functions.py,sha256=W1k7uhYHs1Ayvtr4q8P_S8cUiwOuaz-UdE1svV4WpCQ,11033 +sympy/physics/mechanics/tests/test_joint.py,sha256=fordUBSC7clvKTuKCtb-KhrOGUonMF1w-91G-pawzKk,53035 +sympy/physics/mechanics/tests/test_jointsmethod.py,sha256=0soorl_p-tVwRx0jWreexWLXBk3v13ZnW9vJ0U6t6Pg,8935 +sympy/physics/mechanics/tests/test_kane.py,sha256=rFhtyVrr4Tifdwwgq-vedU8BneLPa_zVcUNWpHAiEvA,20599 +sympy/physics/mechanics/tests/test_kane2.py,sha256=3MweQ_qfbyc8WqcSvvj7iKQLRdMlki9S6uNyd8ZIDN0,19111 +sympy/physics/mechanics/tests/test_kane3.py,sha256=rc4BwlH3VGV21UH_s6I9y1CwHBwvdy3xvkEDS3lAJHQ,14432 +sympy/physics/mechanics/tests/test_kane4.py,sha256=a7CFmnz-MFbQbfop_tAhRUAHk7BJZEfa9PlcX2K8Y0Y,4722 +sympy/physics/mechanics/tests/test_lagrange.py,sha256=iuHomulBF8MafLeorKGaLHUEF8CvFhXcxEtN0hk1akM,10119 +sympy/physics/mechanics/tests/test_lagrange2.py,sha256=HCnDemnFD1r3DIT4oWnypcsZKvF1BA96_MMYHE7Q_xo,1413 +sympy/physics/mechanics/tests/test_linearize.py,sha256=G4XdGFp6lIUwNJ6qm77X24ZPKgGcyxYBuCv61WeROXM,11826 +sympy/physics/mechanics/tests/test_method.py,sha256=L7CnsvbQC-U7ijbSZdu7DEr03p88OLj4IPvFJ_3kCDo,154 +sympy/physics/mechanics/tests/test_models.py,sha256=X7lrxTIWuTP7GgpYyGVmOG48zG4UDWV99FACXFO5VMA,5091 +sympy/physics/mechanics/tests/test_particle.py,sha256=j66nmXM7R_TSxr2Z1xywQKD-al1z62I15ozPaywN1n0,2153 +sympy/physics/mechanics/tests/test_rigidbody.py,sha256=QvAAtofAqA4oQaYvxN1gK7QJf6TGrI3TqY5fHjbP200,5247 +sympy/physics/mechanics/tests/test_system.py,sha256=vRxvOH56wuWRTygmTcJJZAlB6Bw2Vlhcr9q6A526_WA,8713 +sympy/physics/optics/__init__.py,sha256=0UmqIt2-u8WwNkAqsnOVt9VlkB9K0CRIJYiQaltJ73w,1647 +sympy/physics/optics/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/optics/__pycache__/gaussopt.cpython-310.pyc,, +sympy/physics/optics/__pycache__/medium.cpython-310.pyc,, +sympy/physics/optics/__pycache__/polarization.cpython-310.pyc,, +sympy/physics/optics/__pycache__/utils.cpython-310.pyc,, +sympy/physics/optics/__pycache__/waves.cpython-310.pyc,, +sympy/physics/optics/gaussopt.py,sha256=xMoYUyPyh2ycyNj5gomy_0PkNKKHa9XRlE39mZUQaqI,20892 +sympy/physics/optics/medium.py,sha256=cys0tWGi1VCPWMTZuKadcN_bToz_bqKsDHSEVzuV3CE,7124 +sympy/physics/optics/polarization.py,sha256=mIrZiOVXetGtKkLxl8Llaf2Z9coWenf6JKrClh4W8yU,21434 +sympy/physics/optics/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/physics/optics/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/optics/tests/__pycache__/test_gaussopt.cpython-310.pyc,, +sympy/physics/optics/tests/__pycache__/test_medium.cpython-310.pyc,, +sympy/physics/optics/tests/__pycache__/test_polarization.cpython-310.pyc,, +sympy/physics/optics/tests/__pycache__/test_utils.cpython-310.pyc,, +sympy/physics/optics/tests/__pycache__/test_waves.cpython-310.pyc,, +sympy/physics/optics/tests/test_gaussopt.py,sha256=QMXJw_6mFCC3918b-pc_4b_zgO8Hsk7_SBvMupbEi5I,4222 +sympy/physics/optics/tests/test_medium.py,sha256=RxG7N3lzmCO_8hIoKyPnDKffmk8QFzA9yamu1_mr_dE,2194 +sympy/physics/optics/tests/test_polarization.py,sha256=81MzyA29HZckg_Ss-88-5o0g9augDqCr_LwcJIiXuA0,2605 +sympy/physics/optics/tests/test_utils.py,sha256=SjicjAptcZGwuX-ib_Lq7PlGONotvo2XJ4p3JA9iNVI,8553 +sympy/physics/optics/tests/test_waves.py,sha256=PeFfrl7MBkWBHdc796sDDYDuhGepat3DQk7PmyTXVnw,3397 +sympy/physics/optics/utils.py,sha256=qoSlzujMTHDxIZvBQPJ_cF2PxB-awyXVqCndriUd-PQ,22154 +sympy/physics/optics/waves.py,sha256=Iw-9gGksvWhPmQ_VepmI90ekKyzHdPlq6U41wdM4ikI,10042 +sympy/physics/paulialgebra.py,sha256=1r_qDBbVyl836qIXlVDdoF89Z9wedGvWIkHAbwQaK-4,6002 +sympy/physics/pring.py,sha256=SCMGGIcEhVoD7dwhY7_NWL1iKwo7OfgKdmm2Ok_9Xl0,2240 +sympy/physics/qho_1d.py,sha256=ZXemUsa_b0rLtPVTUkgAkZQ1Ecu2eIZxaiNSSXW0PDk,2005 +sympy/physics/quantum/__init__.py,sha256=RA2xbM7GhFq3dVNTna3odlTJYHqNerxjNeZ1kwigHiw,1705 +sympy/physics/quantum/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/anticommutator.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/boson.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/cartesian.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/cg.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/circuitplot.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/circuitutils.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/commutator.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/constants.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/dagger.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/density.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/fermion.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/gate.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/grover.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/hilbert.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/identitysearch.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/innerproduct.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/matrixcache.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/matrixutils.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/operator.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/operatorordering.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/operatorset.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/pauli.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/piab.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/qapply.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/qasm.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/qexpr.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/qft.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/qubit.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/represent.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/sho1d.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/shor.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/spin.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/state.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/tensorproduct.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/trace.cpython-310.pyc,, +sympy/physics/quantum/anticommutator.py,sha256=TH0mPF3Dk9mL5fa2heuampDpwWFxxh3HCcg4g2uNQ_E,4446 +sympy/physics/quantum/boson.py,sha256=cEH8dcPXunognApc69Y6TSJRMZ63P20No6tB2xGHynQ,6313 +sympy/physics/quantum/cartesian.py,sha256=9R9VDYLV1Xe-GkA9TQbj8PVlBLaD0fF6KXfHJ1ze5as,9092 +sympy/physics/quantum/cg.py,sha256=hPkgraNAWHIC-b0Pr0IwiY_gfR9pthQC6IuNI89J4dI,23331 +sympy/physics/quantum/circuitplot.py,sha256=SacQMhPyDhizKmGRNEs1vtXph8lR6bMn5bVJI4rJiXg,11799 +sympy/physics/quantum/circuitutils.py,sha256=mrQNUDbwM3LV1NZ1EqVpXyOY2mOXCBVZW7cQTiCxUaM,13882 +sympy/physics/quantum/commutator.py,sha256=7IiNnFYxxi9EfElCFtMLEQccb6nB-jIeq4x3IlIqzKs,7521 +sympy/physics/quantum/constants.py,sha256=20VRATCkSprSnGFR5ejvMEYlWwEcv1B-dE3RPqPTQ9k,1420 +sympy/physics/quantum/dagger.py,sha256=KOeHXb52hvR1IbeNwlNU30KPiD9xv7S1a2dowkQqBLM,2428 +sympy/physics/quantum/density.py,sha256=vCH8c4Fu5lcrT0PsuBqEK7eWnyHtCRwVx4wSh3f07ME,9743 +sympy/physics/quantum/fermion.py,sha256=9umlSpm6pKoplH7hRRHbuwvkvdM98A9GGNZ6yeNJf_o,4506 +sympy/physics/quantum/gate.py,sha256=T_VkbtJEN0rbOB8wrlZFkI7NU1XJ2MGyEx9PX3GCV_4,42487 +sympy/physics/quantum/grover.py,sha256=Cu2EPTOWpfyxYMVOdGBZez8SBZ2i2QEUmHnTiPPSi-M,10454 +sympy/physics/quantum/hilbert.py,sha256=qrja92vF7BUeSyHOLKVX8-XKcPGT7QaQMWrqWXjRNus,19632 +sympy/physics/quantum/identitysearch.py,sha256=Zh_ji5J0YeAy2AezsQcHV9W2icWoaa3ZwTbfjCCQmJo,27607 +sympy/physics/quantum/innerproduct.py,sha256=K4tmyWYMlgzkTTXjs82PzEC8VU4jm2J6Qic4YmAM7SQ,4279 +sympy/physics/quantum/matrixcache.py,sha256=S6fPkkYmfX8ELBOc9EST-8XnQ1gtpSOBfd2KwLGKdYo,3587 +sympy/physics/quantum/matrixutils.py,sha256=D5ipMBRCh2NsxIy4F6ZLQAF4Y84-2rKKC-czCVZ22Ds,8213 +sympy/physics/quantum/operator.py,sha256=zxPohzuo4H_veqo_Lkws1mN5mKufKlK5JZrgpxQXABM,19311 +sympy/physics/quantum/operatorordering.py,sha256=smjToA0lj6he22d9R61EL2FSNXFz9oTIF8x5UOd4RNs,11597 +sympy/physics/quantum/operatorset.py,sha256=W8rYUrh167nkZcoXCTFscZ1ZvBT6WXkMfmKzRks3edE,9598 +sympy/physics/quantum/pauli.py,sha256=lzxWFHXqxKWRiYK99QCo9zuVG9eVXiB8vFya7TvrVxQ,17250 +sympy/physics/quantum/piab.py,sha256=Zjb2cRGniVDV6e35gjP4uEpI4w0C7YGQIEXReaq_z-E,1912 +sympy/physics/quantum/qapply.py,sha256=E6hH0w7pMHaXOixT3FWkcBJm56Yoi8B93wedgcH3XQY,7147 +sympy/physics/quantum/qasm.py,sha256=UWpcUIBgkK55SmEBZlpmz-1KGHZvW7dNeSVG8tHr44A,6288 +sympy/physics/quantum/qexpr.py,sha256=UD2gBfjYRnHcqKYk-Jhex8dOoxNProadx154vejvtB4,14005 +sympy/physics/quantum/qft.py,sha256=Iy6yd41lENuCeU5jLXY7O3E_Sc3SAHCN3X5bE0sQiiU,6352 +sympy/physics/quantum/qubit.py,sha256=OyVzGFycgwyn8ZvsCNYsuDmG801JurfKwlKxVDHIBCo,26007 +sympy/physics/quantum/represent.py,sha256=b_mEm3q-gZbIV5x5Vl6pzfyJytqlp_a98xpfse2AfgI,18707 +sympy/physics/quantum/sho1d.py,sha256=ZroR_FjxmjOmDcd0Fm04vWKTGCpvLaEu4NiuplKm708,20867 +sympy/physics/quantum/shor.py,sha256=nHT2m4msS5gyQLYPIo2X6XcF7y0pTRZYJUYxZG0YCUk,5504 +sympy/physics/quantum/spin.py,sha256=3h9uGC5vJcnu3qRzXnZr-nUNyHkC4AvIOB-rBmbliJ4,72948 +sympy/physics/quantum/state.py,sha256=ISVtxmQjQL28neAcvyLDD6QJtLAFPwotCBeArPmDuFc,30975 +sympy/physics/quantum/tensorproduct.py,sha256=uBpy2037T1bCxZsiFoIAzHQru2Yi2Om8PFDtdCq5Nas,14960 +sympy/physics/quantum/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/physics/quantum/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_anticommutator.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_boson.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_cartesian.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_cg.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_circuitplot.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_circuitutils.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_commutator.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_constants.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_dagger.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_density.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_fermion.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_gate.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_grover.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_hilbert.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_identitysearch.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_innerproduct.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_matrixutils.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_operator.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_operatorordering.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_operatorset.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_pauli.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_piab.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_printing.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_qapply.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_qasm.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_qexpr.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_qft.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_qubit.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_represent.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_sho1d.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_shor.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_spin.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_state.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_tensorproduct.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_trace.cpython-310.pyc,, +sympy/physics/quantum/tests/test_anticommutator.py,sha256=ckWHKwQFiAMWcDaYSa_26vi_GIsvs32_0O62I5lGsr8,1304 +sympy/physics/quantum/tests/test_boson.py,sha256=BZjdrZ-F1QhyhDqfK4Zc1VEFBJi1PeiPjMpfBcHekfo,1676 +sympy/physics/quantum/tests/test_cartesian.py,sha256=b8eBLwmL8ize-a30TMDkoWuDym02PvBjr7ayfLwaR_I,4112 +sympy/physics/quantum/tests/test_cg.py,sha256=pw14QQ6XBTkK35021E_nDqcvXdOi4bLiPlkddyE865s,8878 +sympy/physics/quantum/tests/test_circuitplot.py,sha256=c3v9wUzLHUH-eBVGj6_broVhHkioNwpaaApTDAJEflU,2096 +sympy/physics/quantum/tests/test_circuitutils.py,sha256=GrJAWRQVH_l8EIHrj1ve2jtxske72IriQ3lo94fqrVQ,13187 +sympy/physics/quantum/tests/test_commutator.py,sha256=keBstGDpNITFRr06uVFrka_Lje56g6oFoJQEpZXmnYw,2727 +sympy/physics/quantum/tests/test_constants.py,sha256=KBmYPIF49Sq34lbzbFCZRYWSyIdhnR3AK3q-VbU6grU,338 +sympy/physics/quantum/tests/test_dagger.py,sha256=PR19goU60RXL3aU3hU2CJ3VyrlGeP6x_531nI9mqvm8,2009 +sympy/physics/quantum/tests/test_density.py,sha256=EyxiEgyc0nDSweJwI0JUwta7gZ81TVHCl7YDEosTrvI,9718 +sympy/physics/quantum/tests/test_fermion.py,sha256=bFaOWjPHv5HNR10Jvk4i9muJ3MQIyznPWZMtDCtKrZM,1135 +sympy/physics/quantum/tests/test_gate.py,sha256=7oBX1HoWnrYtHjABRoqv_wQDB9B829E99fdcJzaqawM,12496 +sympy/physics/quantum/tests/test_grover.py,sha256=uze62AG6H4x2MYJJA-EY3NtkqwvrDIQ2kONuvIRQiZ4,3640 +sympy/physics/quantum/tests/test_hilbert.py,sha256=IGP6rc2-b3we9dRDbpRniFAhQwp_TYtMfFzxusAprx0,2643 +sympy/physics/quantum/tests/test_identitysearch.py,sha256=3YGrXCsFLhLtN5MRyT5ZF8ELrSdkvDKTv6xKM4i2ims,17745 +sympy/physics/quantum/tests/test_innerproduct.py,sha256=37tT8p6MhHjAYeoay1Zyv7gCs-DeZQi4VdwUH2IffDE,1483 +sympy/physics/quantum/tests/test_matrixutils.py,sha256=3wmKKRhfRuwdQWitWE2mJEHr-TUKn6ixNb_wPWs8wRw,4116 +sympy/physics/quantum/tests/test_operator.py,sha256=BZNYANH2w2xfOkqFA3oIS_Kl1KnwnDUroV7d9lQ3IdY,8164 +sympy/physics/quantum/tests/test_operatorordering.py,sha256=CNMvvTNGNSIXPGLaYjxAOFKk-2Tn4yp3L9w-hc1IMnE,1402 +sympy/physics/quantum/tests/test_operatorset.py,sha256=DNfBeYBa_58kSG7PM5Ilo6xnzek8lSiAGX01uMFRYqI,2628 +sympy/physics/quantum/tests/test_pauli.py,sha256=Bhsx_gj5cpYv4BhVJRQohxlKk_rcp4jHtSRlTP-m_xs,4940 +sympy/physics/quantum/tests/test_piab.py,sha256=8ndnzyIsjF4AOu_9k6Yqap_1XUDTbiGnv7onJdrZBWA,1086 +sympy/physics/quantum/tests/test_printing.py,sha256=wR45NMA2w242-qnAlMjyOPj2yvwDbCKuBDh_V2sekr8,30294 +sympy/physics/quantum/tests/test_qapply.py,sha256=uHw3Crt5Lv0t6TV9jxmNwPVbiWGzFMaLZ8TJZfB1-Mg,6022 +sympy/physics/quantum/tests/test_qasm.py,sha256=ZvMjiheWBceSmIM9LHOL5fiFUl6HsUo8puqdzywrhkc,2976 +sympy/physics/quantum/tests/test_qexpr.py,sha256=emcGEqQeCv-kVJxyfX66TZxahJ8pYznFLE1fyyzeZGc,1517 +sympy/physics/quantum/tests/test_qft.py,sha256=CQWIKZFSpkUe5X7AF27EqVwZ4l0Zqycl3bdYgVZj3Hs,1861 +sympy/physics/quantum/tests/test_qubit.py,sha256=LQNaOuvXc-glRifQBlsXattAQB-yKHvmNMw68_JoM_c,8957 +sympy/physics/quantum/tests/test_represent.py,sha256=rEc_cirIJvoU1xANuOTkMjJHdr6DluP4J9sWD2D8Xpc,5166 +sympy/physics/quantum/tests/test_sho1d.py,sha256=nc75ZE5XXtrc88OcfB5mAGh01Wpf3d4Rbsu8vLJPTC8,4684 +sympy/physics/quantum/tests/test_shor.py,sha256=3a3GCg6V5_mlJ2bltoXinGMGvlSxpq7GluapD_3SZaQ,666 +sympy/physics/quantum/tests/test_spin.py,sha256=LOIPNGWalfPLL7DNAaiLCp4J_G1mZpUYmTCNx3kjqgw,344807 +sympy/physics/quantum/tests/test_state.py,sha256=UjfOdwRzNXHK0AMhEaI431eMNjVUK7glqiGxOXJEC50,6741 +sympy/physics/quantum/tests/test_tensorproduct.py,sha256=UncgjQFeJX3BOdHy8UYbb_Lwit67CfNuwLaFYRmyKUI,4703 +sympy/physics/quantum/tests/test_trace.py,sha256=dbpTXcJArWRR_Hh5JTuy2GJIfgjVo6zS20o5mdVEGH4,3057 +sympy/physics/quantum/trace.py,sha256=2ZqN9IEsz3LKHTLV8ZDwTK0sM5PfwL0p2sYet0N7Gis,6397 +sympy/physics/secondquant.py,sha256=FvAm6mVUVVRxaYPzqn4qwhkZCvN8LA8xUFKjnkMpPdw,90400 +sympy/physics/sho.py,sha256=K8P9FAdZr6UfQKYZO9TlhDUqUd3YsMekXCsKy2HhaY0,2480 +sympy/physics/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/physics/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/tests/__pycache__/test_clebsch_gordan.cpython-310.pyc,, +sympy/physics/tests/__pycache__/test_hydrogen.cpython-310.pyc,, +sympy/physics/tests/__pycache__/test_paulialgebra.cpython-310.pyc,, +sympy/physics/tests/__pycache__/test_physics_matrices.cpython-310.pyc,, +sympy/physics/tests/__pycache__/test_pring.cpython-310.pyc,, +sympy/physics/tests/__pycache__/test_qho_1d.cpython-310.pyc,, +sympy/physics/tests/__pycache__/test_secondquant.cpython-310.pyc,, +sympy/physics/tests/__pycache__/test_sho.cpython-310.pyc,, +sympy/physics/tests/test_clebsch_gordan.py,sha256=HdmpjVHZ1JandoZrGwFb7YshkmEkcvt3jLLVxZ13UvA,8563 +sympy/physics/tests/test_hydrogen.py,sha256=kohRIR6JojE_GWYnlzLsMMgdhoKd8whazs0mq7cCTQc,4987 +sympy/physics/tests/test_paulialgebra.py,sha256=tyshEMsLNPR4iYzoAbPGZRZ-e_8t7GDP_xyjRyhepeQ,1477 +sympy/physics/tests/test_physics_matrices.py,sha256=Dha8iQRhzxLcl7TKSA6QP0pnEcBoqtj_Ob6tx01SMwI,2948 +sympy/physics/tests/test_pring.py,sha256=XScQQO9RhRrlqSII_ZyyOUpE-zs-7wphSFCZq2OuFnE,1261 +sympy/physics/tests/test_qho_1d.py,sha256=LD9WU-Y5lW7bVM7MyCkSGW9MU2FZhVjMB5Zk848_q1M,1775 +sympy/physics/tests/test_secondquant.py,sha256=VgG8NzcFmIkhFbKZpbjjzV4W5JOaJHGj9Ut8ugWM2UM,48450 +sympy/physics/tests/test_sho.py,sha256=aIs1f3eo6hb4ErRU8xrr_h_yhTmRx-fQgv9n27SfsLM,693 +sympy/physics/units/__init__.py,sha256=DVvWy9qNRm742NFGcBpybFY20ZK3BU7DWNbLMTXYiFo,12386 +sympy/physics/units/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/units/__pycache__/dimensions.cpython-310.pyc,, +sympy/physics/units/__pycache__/prefixes.cpython-310.pyc,, +sympy/physics/units/__pycache__/quantities.cpython-310.pyc,, +sympy/physics/units/__pycache__/unitsystem.cpython-310.pyc,, +sympy/physics/units/__pycache__/util.cpython-310.pyc,, +sympy/physics/units/definitions/__init__.py,sha256=F3RyZc1AjM2Ch5b27Tt-VYdZ1HAIWvhgtQQQTfMiN6w,7470 +sympy/physics/units/definitions/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/units/definitions/__pycache__/dimension_definitions.cpython-310.pyc,, +sympy/physics/units/definitions/__pycache__/unit_definitions.cpython-310.pyc,, +sympy/physics/units/definitions/dimension_definitions.py,sha256=5r_WDnyWFX0T8bTjDA6pnr5PqRKv5XGTm0LuJrZ6ffM,1745 +sympy/physics/units/definitions/unit_definitions.py,sha256=kldfMjhOFdJAbYgZiJPUFtyUVINovDf4XTTC0mkoiDU,14374 +sympy/physics/units/dimensions.py,sha256=B2jT7BEsyCSZmUxH6RYrP9gVGeXLn0nLhgMT9gFODW4,20911 +sympy/physics/units/prefixes.py,sha256=ENV04BUHeebXK2U8jf7ZQdYQ-dZUGm1K2m6BYwJYF2w,6224 +sympy/physics/units/quantities.py,sha256=r5E231CULmsSEM7Rh7zfcTPuR85_X0CwRCVU_nDsek0,4671 +sympy/physics/units/systems/__init__.py,sha256=jJuvdc15c83yl11IuvhyjijwOZ9m1JGgZOgKwKv2e2o,244 +sympy/physics/units/systems/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/units/systems/__pycache__/cgs.cpython-310.pyc,, +sympy/physics/units/systems/__pycache__/length_weight_time.cpython-310.pyc,, +sympy/physics/units/systems/__pycache__/mks.cpython-310.pyc,, +sympy/physics/units/systems/__pycache__/mksa.cpython-310.pyc,, +sympy/physics/units/systems/__pycache__/natural.cpython-310.pyc,, +sympy/physics/units/systems/__pycache__/si.cpython-310.pyc,, +sympy/physics/units/systems/cgs.py,sha256=gXbX8uuZo7lcYIENA-CpAnyS9WVQy-vRisxlQm-198A,3702 +sympy/physics/units/systems/length_weight_time.py,sha256=DXIDSWdhjfxGLA0ldOziWhwQjzTAs7-VQTNCHzDvCgY,7004 +sympy/physics/units/systems/mks.py,sha256=Z3eX9yWK9BdvEosCROK2qRKtKFYOjtQ50Jk6vFT7AQY,1546 +sympy/physics/units/systems/mksa.py,sha256=U8cSI-maIuLJRvpKLBuZA8V19LDRYVc2I40Rao-wvjk,2002 +sympy/physics/units/systems/natural.py,sha256=43Odvmtxdpbz8UcW_xoRE9ArJVVdF7dgdAN2ByDAXx4,909 +sympy/physics/units/systems/si.py,sha256=YBPUuovW3-JBDZYuStXXRaC8cfzE3En3K5MjNy5pLJk,14478 +sympy/physics/units/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/physics/units/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/units/tests/__pycache__/test_dimensions.cpython-310.pyc,, +sympy/physics/units/tests/__pycache__/test_dimensionsystem.cpython-310.pyc,, +sympy/physics/units/tests/__pycache__/test_prefixes.cpython-310.pyc,, +sympy/physics/units/tests/__pycache__/test_quantities.cpython-310.pyc,, +sympy/physics/units/tests/__pycache__/test_unit_system_cgs_gauss.cpython-310.pyc,, +sympy/physics/units/tests/__pycache__/test_unitsystem.cpython-310.pyc,, +sympy/physics/units/tests/__pycache__/test_util.cpython-310.pyc,, +sympy/physics/units/tests/test_dimensions.py,sha256=lzkgGfEXMHxB8Izv7nRTN2uOEPh65LXPYaG8Kr5H05o,6122 +sympy/physics/units/tests/test_dimensionsystem.py,sha256=s2_2RAJwOaPOTvyIiAO9SYap374ytZqWbatWkLCnbSU,2717 +sympy/physics/units/tests/test_prefixes.py,sha256=IFeF1tq9SkyqJLOLy5h42oMW7PDJ1QKtvyu0EbN3rxY,2198 +sympy/physics/units/tests/test_quantities.py,sha256=_OmQ1qBPud8-lVesvVNhQLrwRh9qp7rXMSGzqTtqCr0,20055 +sympy/physics/units/tests/test_unit_system_cgs_gauss.py,sha256=JepTWt8yGdtv5dQ2AKUKb9fxpuYqLWOp0oOmzov9vfY,3173 +sympy/physics/units/tests/test_unitsystem.py,sha256=1Xh78_8hbv-yP4ICWI_dUrOnk3cimlvP_VhO-EXOa7Q,3254 +sympy/physics/units/tests/test_util.py,sha256=f2pOxVLArai5EwRAriPh9rQdxIyhFpZ4v7WEB0CI-SI,8465 +sympy/physics/units/unitsystem.py,sha256=UXFcmQoI8Hl89v4ixEfh35g__o6AgQPzgvLJhCLIFtA,7618 +sympy/physics/units/util.py,sha256=dgMkwlaYWO2D1QwSpGKFfYluqzdN6TUp-aIgXo8-W1o,9602 +sympy/physics/vector/__init__.py,sha256=jZmrNB6ZfY7NOP8nx8GWcfI2Ixb2mv7lXuGHn63kyOw,985 +sympy/physics/vector/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/vector/__pycache__/dyadic.cpython-310.pyc,, +sympy/physics/vector/__pycache__/fieldfunctions.cpython-310.pyc,, +sympy/physics/vector/__pycache__/frame.cpython-310.pyc,, +sympy/physics/vector/__pycache__/functions.cpython-310.pyc,, +sympy/physics/vector/__pycache__/point.cpython-310.pyc,, +sympy/physics/vector/__pycache__/printing.cpython-310.pyc,, +sympy/physics/vector/__pycache__/vector.cpython-310.pyc,, +sympy/physics/vector/dyadic.py,sha256=qDsDiWZ8nTOVKKjST3MasskWUvrv8o8CZeLTXfJjp6Y,19538 +sympy/physics/vector/fieldfunctions.py,sha256=1tzyV2iH6-UIPJ6W4UhgOZHTGxAbnWhmdTxbz12Z528,8593 +sympy/physics/vector/frame.py,sha256=5wHaV4FIAC0XjvX5ziFmBwB2P2wKPk1Sipb6ao6STn0,52933 +sympy/physics/vector/functions.py,sha256=Fp3Fx0donNUPj9rkZ03xFC8HhUys4UvogK69ah2Sd3o,24583 +sympy/physics/vector/point.py,sha256=9hUKwsM_5npy9FuDSHe9eiOLQLfmZZE49rVxwEhPT2U,20446 +sympy/physics/vector/printing.py,sha256=iQmyZQib-9Oa7_suxwHplJ9HW198LPGmptDldwqRl20,11792 +sympy/physics/vector/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/physics/vector/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/vector/tests/__pycache__/test_dyadic.cpython-310.pyc,, +sympy/physics/vector/tests/__pycache__/test_fieldfunctions.cpython-310.pyc,, +sympy/physics/vector/tests/__pycache__/test_frame.cpython-310.pyc,, +sympy/physics/vector/tests/__pycache__/test_functions.cpython-310.pyc,, +sympy/physics/vector/tests/__pycache__/test_output.cpython-310.pyc,, +sympy/physics/vector/tests/__pycache__/test_point.cpython-310.pyc,, +sympy/physics/vector/tests/__pycache__/test_printing.cpython-310.pyc,, +sympy/physics/vector/tests/__pycache__/test_vector.cpython-310.pyc,, +sympy/physics/vector/tests/test_dyadic.py,sha256=09VKP_uSaiJny5LxNlkSMwU_LdQhZ6yGqoD1GG4dc2U,4292 +sympy/physics/vector/tests/test_fieldfunctions.py,sha256=FUjh18QzB6dXSau9iHutb36o28faSa7T9sB0icpja-M,5825 +sympy/physics/vector/tests/test_frame.py,sha256=sk4atyErDljoa9Q4YDDWoubBOxfkSXR3mKTmYAO_2vE,26102 +sympy/physics/vector/tests/test_functions.py,sha256=5gR01x9HlqM_DViSlu7Yf1m5NQWI2oqBe1a3dRkBcIc,20763 +sympy/physics/vector/tests/test_output.py,sha256=TFqso2YUb5zw4oX6H206Wu0XTwJZFKPY92gd68ktMN4,2631 +sympy/physics/vector/tests/test_point.py,sha256=B6Yk7K-ouyN-VBXycDJV4sOYrPyFf8a_Q-Ytx7vq1mo,12257 +sympy/physics/vector/tests/test_printing.py,sha256=kptiX3xy_xPSyg8f4xZ2jJnorynPvfTenOBtntsYXaY,10433 +sympy/physics/vector/tests/test_vector.py,sha256=Jm6DeizQxKY-CD7722--Ko073bcN4jJJ-geRoNkofs4,9458 +sympy/physics/vector/vector.py,sha256=o9Ov2GD6-_4eZwqpNkaB1DvCioSXAVtR0HFoRneNEEc,27533 +sympy/physics/wigner.py,sha256=4jYcv62gfHJGlJfYcbn06BFmNIs5JCiEBNnxUbg2Oyo,37605 +sympy/plotting/__init__.py,sha256=hAdOjai8-laj79rLJ2HZbiW1okXlz0p1ck-CoeNU6m8,526 +sympy/plotting/__pycache__/__init__.cpython-310.pyc,, +sympy/plotting/__pycache__/experimental_lambdify.cpython-310.pyc,, +sympy/plotting/__pycache__/plot.cpython-310.pyc,, +sympy/plotting/__pycache__/plot_implicit.cpython-310.pyc,, +sympy/plotting/__pycache__/textplot.cpython-310.pyc,, +sympy/plotting/experimental_lambdify.py,sha256=wIvB02vdrI-nEJX3TqInsf0v8705JI5lcVgMJsJbtO0,22879 +sympy/plotting/intervalmath/__init__.py,sha256=fQV7sLZ9NHpZO5XGl2ZfqX56x-mdq-sYhtWEKLngHlU,479 +sympy/plotting/intervalmath/__pycache__/__init__.cpython-310.pyc,, +sympy/plotting/intervalmath/__pycache__/interval_arithmetic.cpython-310.pyc,, +sympy/plotting/intervalmath/__pycache__/interval_membership.cpython-310.pyc,, +sympy/plotting/intervalmath/__pycache__/lib_interval.cpython-310.pyc,, +sympy/plotting/intervalmath/interval_arithmetic.py,sha256=OibkI5I0i6_NpFd1HEl48d_R4PRWofUoOS4HYQBkVOc,15530 +sympy/plotting/intervalmath/interval_membership.py,sha256=1VpO1T7UjvPxcMySC5GhZl8-VM_DxIirSWC3ZGmxIAY,2385 +sympy/plotting/intervalmath/lib_interval.py,sha256=WY1qRtyub4MDJaZizw6cXQI5NMEIXBO9UEWPEI80aW8,14809 +sympy/plotting/intervalmath/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/plotting/intervalmath/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/plotting/intervalmath/tests/__pycache__/test_interval_functions.cpython-310.pyc,, +sympy/plotting/intervalmath/tests/__pycache__/test_interval_membership.cpython-310.pyc,, +sympy/plotting/intervalmath/tests/__pycache__/test_intervalmath.cpython-310.pyc,, +sympy/plotting/intervalmath/tests/test_interval_functions.py,sha256=gdIo5z54tIbG8hDaGd3I8rBDP67oetMZWWdM-uvt1ec,9862 +sympy/plotting/intervalmath/tests/test_interval_membership.py,sha256=D1KjcrLxAwOmDEUqA-8TCqkFWGtmeerR9KwmzS7tyjk,4216 +sympy/plotting/intervalmath/tests/test_intervalmath.py,sha256=ndBMczrs6xYMN5RGnyCL9yq7pNUxrXHTSU1mdUsp5tU,9034 +sympy/plotting/plot.py,sha256=eTKGJmFyTycCNb6CquLGutB9d92PdlllxW1Wn0W6Q-k,92139 +sympy/plotting/plot_implicit.py,sha256=2kRJ0YRrsDKad8Q34UXdy4lOVGKh6LvL6LokPVDZN8A,15683 +sympy/plotting/pygletplot/__init__.py,sha256=DM7GURQbdSfcddHz23MxOShatBFc26tP_sd3G8pGCQE,3732 +sympy/plotting/pygletplot/__pycache__/__init__.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/color_scheme.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/managed_window.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot_axes.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot_camera.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot_controller.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot_curve.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot_interval.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot_mode.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot_mode_base.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot_modes.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot_object.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot_rotation.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot_surface.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot_window.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/util.cpython-310.pyc,, +sympy/plotting/pygletplot/color_scheme.py,sha256=NgPUamkldygfrIPj0LvC_1AzhscVtg18FSudElvFYB8,12522 +sympy/plotting/pygletplot/managed_window.py,sha256=N7AKtM7ELfIJLie6zvI-J6-OQRBnMZu6AL1USz7hFEk,3072 +sympy/plotting/pygletplot/plot.py,sha256=s-5AJB0KelHs9WGoFIVIdYrOoMXfdpnM5-G2cF8xzDQ,13352 +sympy/plotting/pygletplot/plot_axes.py,sha256=Q9YN8W0Hd1PeflHLvOvSZ-hxeLU4Kq3nUFLYDC0x0E8,8655 +sympy/plotting/pygletplot/plot_camera.py,sha256=yfkGg7TF3yPhhRUDhvPMT1uJgSboTwgAOtKOJdP7d8E,4001 +sympy/plotting/pygletplot/plot_controller.py,sha256=MroJJSPCbBDT8gGs_GdqpV_KHsllMNJpxx0MU3vKJV8,6941 +sympy/plotting/pygletplot/plot_curve.py,sha256=YwKA2lYC7IwCOQJaOVnww8AAG4P36cArgbC1iLV9OFI,2838 +sympy/plotting/pygletplot/plot_interval.py,sha256=doqr2wxnrED4MJDlkxQ07GFvaagX36HUb77ly_vIuKQ,5431 +sympy/plotting/pygletplot/plot_mode.py,sha256=Djq-ewVms_JoSriDpolDhhtttBJQdJO8BD4E0nyOWcQ,14156 +sympy/plotting/pygletplot/plot_mode_base.py,sha256=3z3WjeN7TTslHJevhr3X_7HRHPgUleYSngu6285lR6k,11502 +sympy/plotting/pygletplot/plot_modes.py,sha256=gKzJShz6OXa6EHKar8SuHWrELVznxg_s2d5IBQkkeYE,5352 +sympy/plotting/pygletplot/plot_object.py,sha256=qGtzcKup4It1CqZ2jxA7FnorCua4S9I-B_7I3SHBjcQ,330 +sympy/plotting/pygletplot/plot_rotation.py,sha256=K8MyudYRS2F-ku5blzkWg3q3goMDPUsXqzmHLDU2Uqc,1447 +sympy/plotting/pygletplot/plot_surface.py,sha256=C0q9tzDmxzC1IpWiNKY4llzcopx6dhotGOLpK1N9m3s,3803 +sympy/plotting/pygletplot/plot_window.py,sha256=5boC2Fkmk46-gWGqWzdTkPmTMNHHOpA0CnB9q946Hwc,4643 +sympy/plotting/pygletplot/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/plotting/pygletplot/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/plotting/pygletplot/tests/__pycache__/test_plotting.cpython-310.pyc,, +sympy/plotting/pygletplot/tests/test_plotting.py,sha256=NisjR-yuBRJfQvjcb20skTR3yid2U3MhKHW6sy8RE10,2720 +sympy/plotting/pygletplot/util.py,sha256=mzQQgDDbp04B03KyJrossLp8Yq72RJzjp-3ArfjbMH8,4621 +sympy/plotting/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/plotting/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/plotting/tests/__pycache__/test_experimental_lambdify.cpython-310.pyc,, +sympy/plotting/tests/__pycache__/test_plot.cpython-310.pyc,, +sympy/plotting/tests/__pycache__/test_plot_implicit.cpython-310.pyc,, +sympy/plotting/tests/__pycache__/test_textplot.cpython-310.pyc,, +sympy/plotting/tests/test_experimental_lambdify.py,sha256=EYshdXA5tAGWolaDX-nHAolp7xIJN4Oqb1Uc1C1IhJI,3127 +sympy/plotting/tests/test_plot.py,sha256=HWledOPr2xKq3XFGr458Lc5c0wgf2e0IFa4j63bfdH0,25204 +sympy/plotting/tests/test_plot_implicit.py,sha256=gXXMvVCIlp3HeN12Ej636RnhNEmV3i5WnDA48rjRPOg,5804 +sympy/plotting/tests/test_region_and.png,sha256=EV0Lm4HtQPk_6eIWtPY4TPcQk-O7tkpdZIuLmFjGRaA,6864 +sympy/plotting/tests/test_region_not.png,sha256=3O_9_nPW149FMULEcT5RqI2-k2H3nHELbfJADt2cO8k,7939 +sympy/plotting/tests/test_region_or.png,sha256=5Bug09vyog-Cu3mky7pbtFjew5bMvbpe0ZXWsgDKfy4,8809 +sympy/plotting/tests/test_region_xor.png,sha256=kucVWBA9A98OpcR4did5aLXUyoq4z0O4C3PM6dliBSw,10002 +sympy/plotting/tests/test_textplot.py,sha256=VurTGeMjUfBLpLdoMqzJK9gbcShNb7f1OrAcRNyrtag,12761 +sympy/plotting/textplot.py,sha256=M3TEzIDV6l6CpMpPZcAVrO-Y_pYbRRCsbuPMGAaQEXs,4921 +sympy/polys/__init__.py,sha256=2ZG4bdqNChU1niEsfBNC57G9B51TLYxiDy5WG5_2kMc,5545 +sympy/polys/__pycache__/__init__.cpython-310.pyc,, +sympy/polys/__pycache__/appellseqs.cpython-310.pyc,, +sympy/polys/__pycache__/compatibility.cpython-310.pyc,, +sympy/polys/__pycache__/constructor.cpython-310.pyc,, +sympy/polys/__pycache__/densearith.cpython-310.pyc,, +sympy/polys/__pycache__/densebasic.cpython-310.pyc,, +sympy/polys/__pycache__/densetools.cpython-310.pyc,, +sympy/polys/__pycache__/dispersion.cpython-310.pyc,, +sympy/polys/__pycache__/distributedmodules.cpython-310.pyc,, +sympy/polys/__pycache__/domainmatrix.cpython-310.pyc,, +sympy/polys/__pycache__/euclidtools.cpython-310.pyc,, +sympy/polys/__pycache__/factortools.cpython-310.pyc,, +sympy/polys/__pycache__/fglmtools.cpython-310.pyc,, +sympy/polys/__pycache__/fields.cpython-310.pyc,, +sympy/polys/__pycache__/galoistools.cpython-310.pyc,, +sympy/polys/__pycache__/groebnertools.cpython-310.pyc,, +sympy/polys/__pycache__/heuristicgcd.cpython-310.pyc,, +sympy/polys/__pycache__/modulargcd.cpython-310.pyc,, +sympy/polys/__pycache__/monomials.cpython-310.pyc,, +sympy/polys/__pycache__/multivariate_resultants.cpython-310.pyc,, +sympy/polys/__pycache__/orderings.cpython-310.pyc,, +sympy/polys/__pycache__/orthopolys.cpython-310.pyc,, +sympy/polys/__pycache__/partfrac.cpython-310.pyc,, +sympy/polys/__pycache__/polyclasses.cpython-310.pyc,, +sympy/polys/__pycache__/polyconfig.cpython-310.pyc,, +sympy/polys/__pycache__/polyerrors.cpython-310.pyc,, +sympy/polys/__pycache__/polyfuncs.cpython-310.pyc,, +sympy/polys/__pycache__/polymatrix.cpython-310.pyc,, +sympy/polys/__pycache__/polyoptions.cpython-310.pyc,, +sympy/polys/__pycache__/polyquinticconst.cpython-310.pyc,, +sympy/polys/__pycache__/polyroots.cpython-310.pyc,, +sympy/polys/__pycache__/polytools.cpython-310.pyc,, +sympy/polys/__pycache__/polyutils.cpython-310.pyc,, +sympy/polys/__pycache__/rationaltools.cpython-310.pyc,, +sympy/polys/__pycache__/ring_series.cpython-310.pyc,, +sympy/polys/__pycache__/rings.cpython-310.pyc,, +sympy/polys/__pycache__/rootisolation.cpython-310.pyc,, +sympy/polys/__pycache__/rootoftools.cpython-310.pyc,, +sympy/polys/__pycache__/solvers.cpython-310.pyc,, +sympy/polys/__pycache__/specialpolys.cpython-310.pyc,, +sympy/polys/__pycache__/sqfreetools.cpython-310.pyc,, +sympy/polys/__pycache__/subresultants_qq_zz.cpython-310.pyc,, +sympy/polys/agca/__init__.py,sha256=fahpWoG_0LgoqOXBnDBJS16Jj1fE1_VKG7edM3qZ2HE,130 +sympy/polys/agca/__pycache__/__init__.cpython-310.pyc,, +sympy/polys/agca/__pycache__/extensions.cpython-310.pyc,, +sympy/polys/agca/__pycache__/homomorphisms.cpython-310.pyc,, +sympy/polys/agca/__pycache__/ideals.cpython-310.pyc,, +sympy/polys/agca/__pycache__/modules.cpython-310.pyc,, +sympy/polys/agca/extensions.py,sha256=v3VmKWXQeyPuwNGyizfR6ZFb4GkRZ97xREHawuLWqpg,9168 +sympy/polys/agca/homomorphisms.py,sha256=gaMNV96pKUuYHZ8Bd7QOs27J1IbbJgkEjyWcTLe8GFI,21937 +sympy/polys/agca/ideals.py,sha256=8rh6iQt26zF0qKzHlfqGXKZzKuGY6Y5t9hBNVGG9v5M,10891 +sympy/polys/agca/modules.py,sha256=UZBnmvsQTHRkSVGdst6nksp9a07ZYD65eArjL91n3-Q,46946 +sympy/polys/agca/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/polys/agca/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/polys/agca/tests/__pycache__/test_extensions.cpython-310.pyc,, +sympy/polys/agca/tests/__pycache__/test_homomorphisms.cpython-310.pyc,, +sympy/polys/agca/tests/__pycache__/test_ideals.cpython-310.pyc,, +sympy/polys/agca/tests/__pycache__/test_modules.cpython-310.pyc,, +sympy/polys/agca/tests/test_extensions.py,sha256=i3IHQNXQByFMCvjjyd_hwwJSCiUj0z1rRwS9WFK2AFc,6455 +sympy/polys/agca/tests/test_homomorphisms.py,sha256=m0hFmcTzvZ8sZbbnWeENwzKyufpE9zWwZR-WCI4kdpU,4224 +sympy/polys/agca/tests/test_ideals.py,sha256=w76qXO-_HN6LQbV7l3h7gJZsM-DZ2io2X-kPWiHYRNw,3788 +sympy/polys/agca/tests/test_modules.py,sha256=HdfmcxdEVucEbtfmzVq8i_1wGojT5b5DE5VIfbTMx3k,13552 +sympy/polys/appellseqs.py,sha256=hWeDKsKnJuAuPN_5IU6m1okurAq9xMt3LQgMehcvBKQ,8305 +sympy/polys/benchmarks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/polys/benchmarks/__pycache__/__init__.cpython-310.pyc,, +sympy/polys/benchmarks/__pycache__/bench_galoispolys.cpython-310.pyc,, +sympy/polys/benchmarks/__pycache__/bench_groebnertools.cpython-310.pyc,, +sympy/polys/benchmarks/__pycache__/bench_solvers.cpython-310.pyc,, +sympy/polys/benchmarks/bench_galoispolys.py,sha256=8RtN9ZQga2oxscVPPkMGB29Dz8UbskMS2szYtqZ69u0,1502 +sympy/polys/benchmarks/bench_groebnertools.py,sha256=YqGDCzewRszCye_GnneDXMRNB38ORSpVu_Jn0ELIySo,803 +sympy/polys/benchmarks/bench_solvers.py,sha256=gLrZguh6pE0E4_vM2GeOS5bHnrcSUQXqD0Qz9tItfmo,446778 +sympy/polys/compatibility.py,sha256=OkpZiIrD2u_1YB7dE2NJmhpt1UZoBNoX2JBY3q1Uixo,57743 +sympy/polys/constructor.py,sha256=4hqADMZrcLOsnzVebcZxnn3LJ7HdPIHReq0Qalf91EY,11371 +sympy/polys/densearith.py,sha256=6lkYHNpTPp2qq8qKBNiK9V-xNqLg0MYcoi_ksKaNBcg,34108 +sympy/polys/densebasic.py,sha256=H9DimmE5zLuEpzyYvTWBViBJTe5bbLj-1RefaAy2XXk,35922 +sympy/polys/densetools.py,sha256=q75QA1e0rH9TpVbTGIwRgeisNFt-7HiRcdPUEdHYN2E,25902 +sympy/polys/dispersion.py,sha256=s6GIYnGA6U9jhGP7YXQQS8G3byG4-kPbr55BR6p-iz4,5740 +sympy/polys/distributedmodules.py,sha256=t8pLIgDQs_dMecGXwybVYoLavofEy2DXhFS8N5gj5SU,21827 +sympy/polys/domainmatrix.py,sha256=FmNqklNFQR1WrQYtP2r7jypw2IQadNKGP14EaUaxUqI,310 +sympy/polys/domains/__init__.py,sha256=T6qPNkU1EJ6D5BnvyJSXJv4zeJ5MUT5RLsovMkkXS9E,1872 +sympy/polys/domains/__pycache__/__init__.cpython-310.pyc,, +sympy/polys/domains/__pycache__/algebraicfield.cpython-310.pyc,, +sympy/polys/domains/__pycache__/characteristiczero.cpython-310.pyc,, +sympy/polys/domains/__pycache__/complexfield.cpython-310.pyc,, +sympy/polys/domains/__pycache__/compositedomain.cpython-310.pyc,, +sympy/polys/domains/__pycache__/domain.cpython-310.pyc,, +sympy/polys/domains/__pycache__/domainelement.cpython-310.pyc,, +sympy/polys/domains/__pycache__/expressiondomain.cpython-310.pyc,, +sympy/polys/domains/__pycache__/expressionrawdomain.cpython-310.pyc,, +sympy/polys/domains/__pycache__/field.cpython-310.pyc,, +sympy/polys/domains/__pycache__/finitefield.cpython-310.pyc,, +sympy/polys/domains/__pycache__/fractionfield.cpython-310.pyc,, +sympy/polys/domains/__pycache__/gaussiandomains.cpython-310.pyc,, +sympy/polys/domains/__pycache__/gmpyfinitefield.cpython-310.pyc,, +sympy/polys/domains/__pycache__/gmpyintegerring.cpython-310.pyc,, +sympy/polys/domains/__pycache__/gmpyrationalfield.cpython-310.pyc,, +sympy/polys/domains/__pycache__/groundtypes.cpython-310.pyc,, +sympy/polys/domains/__pycache__/integerring.cpython-310.pyc,, +sympy/polys/domains/__pycache__/modularinteger.cpython-310.pyc,, +sympy/polys/domains/__pycache__/mpelements.cpython-310.pyc,, +sympy/polys/domains/__pycache__/old_fractionfield.cpython-310.pyc,, +sympy/polys/domains/__pycache__/old_polynomialring.cpython-310.pyc,, +sympy/polys/domains/__pycache__/polynomialring.cpython-310.pyc,, +sympy/polys/domains/__pycache__/pythonfinitefield.cpython-310.pyc,, +sympy/polys/domains/__pycache__/pythonintegerring.cpython-310.pyc,, +sympy/polys/domains/__pycache__/pythonrational.cpython-310.pyc,, +sympy/polys/domains/__pycache__/pythonrationalfield.cpython-310.pyc,, +sympy/polys/domains/__pycache__/quotientring.cpython-310.pyc,, +sympy/polys/domains/__pycache__/rationalfield.cpython-310.pyc,, +sympy/polys/domains/__pycache__/realfield.cpython-310.pyc,, +sympy/polys/domains/__pycache__/ring.cpython-310.pyc,, +sympy/polys/domains/__pycache__/simpledomain.cpython-310.pyc,, +sympy/polys/domains/algebraicfield.py,sha256=hg2F7SBrc0I-uqRa90ehtHiF6bCo_AB98XDHRRcGFZw,21556 +sympy/polys/domains/characteristiczero.py,sha256=vHYRUXPrfJzDF8wrd1KSFqG8WzwfITP_eweA-SHPVYA,382 +sympy/polys/domains/complexfield.py,sha256=2GjeNMebTXxLHDkKYqbrP-hZqBXHoc_Uv7kk7xIyPcw,4620 +sympy/polys/domains/compositedomain.py,sha256=wgw_yKwC5gHYWxRHEbVDeHOKQycFkZH0ZxhVES0AR04,1042 +sympy/polys/domains/domain.py,sha256=KOj3-sDzLox86n3Av2Vl6nExWszyWXkJz0-lDpXDwJ4,38006 +sympy/polys/domains/domainelement.py,sha256=IrG-Mzv_VlCAmE-hmJVH_d77TrsfyaGGfJVmU8FFvlY,860 +sympy/polys/domains/expressiondomain.py,sha256=rk2Vky-C5sQiOtkWbtxh1s5_aOALGCREzq-R6qxVZ-I,6924 +sympy/polys/domains/expressionrawdomain.py,sha256=cXarD2jXi97FGNiqNiDqQlX0g764EW2M1PEbrveImnY,1448 +sympy/polys/domains/field.py,sha256=tyOjEqABaOXXkaBEL0qLqyG4g5Ktnd782B_6xTCfia8,2591 +sympy/polys/domains/finitefield.py,sha256=yFU8-FvoDxGQ9Yo-mKlOqnB-91ctpz_TT0zLRmx-iQI,6025 +sympy/polys/domains/fractionfield.py,sha256=pKR3dfOOXqBIwf3jvRnaqgA-t1YYWdubCuz3yNnxepU,5945 +sympy/polys/domains/gaussiandomains.py,sha256=qkbqSXzumxwQq7QGAyvNsgJZlzF5MbvN2O9nz2li-kQ,17975 +sympy/polys/domains/gmpyfinitefield.py,sha256=C_Nd9GubSMBJmIe5vs_C2IuBT8YGFL4xgK4oixNCOrk,444 +sympy/polys/domains/gmpyintegerring.py,sha256=U6Ph1_5Ez5bXN4JcF2Tsq1FUDEwYsGx0nUT-gZDvO5U,3017 +sympy/polys/domains/gmpyrationalfield.py,sha256=dZjrfcWaUA-BHUtutzLOWPlOSNLYzBqSFeukER6L_bA,3178 +sympy/polys/domains/groundtypes.py,sha256=bHPHdmpFRBWe86TNMSsE6m5grvE0bQWLWnRGRBBxMpQ,1615 +sympy/polys/domains/integerring.py,sha256=T2MvIiEI3OPFoOQ5Ep3HgZhNU1evP-Wxu0oDVG7oJa8,6085 +sympy/polys/domains/modularinteger.py,sha256=bAUskiiX1j-n9SLx79jUCPOuO9mDNbzUcuijRcI7Hg4,5094 +sympy/polys/domains/mpelements.py,sha256=MxymxwlGBA3Px2FFyzISEtAnkVoxeq-bJM1fk2jkEts,4616 +sympy/polys/domains/old_fractionfield.py,sha256=6qVb4Zzfq8ArxDyghXwW5Vvw4SattdIt0HUx4WcnD8U,6178 +sympy/polys/domains/old_polynomialring.py,sha256=_Rengtf5vN3w9GJAsDFcN3yKbWjYqkTbsPdxbtbplnE,14914 +sympy/polys/domains/polynomialring.py,sha256=kStXSAtq1b5Tk3vrEze7_E8UMn8bF91Goh7hVzhtax0,6153 +sympy/polys/domains/pythonfinitefield.py,sha256=RYwDRg1zVLLGtJvVXvWhwUZjC91g8pXTwAjuQoWezks,460 +sympy/polys/domains/pythonintegerring.py,sha256=qUBqWBtP_faY-m2tJA07JQyCTdh27tXVBDD7vsKNUn4,2929 +sympy/polys/domains/pythonrational.py,sha256=M3VUGODh3MLElePjYtjt9b02ReMThw-XXpuQTkohgNs,548 +sympy/polys/domains/pythonrationalfield.py,sha256=x8BPkGKj0WPuwJzN2py5l9aAjHaY4djv65c4tzUTr3Y,2295 +sympy/polys/domains/quotientring.py,sha256=LBUIIpN3y3QPS6pFYWwqpca5ShoWDyaZbZ6PwDm_SmA,5866 +sympy/polys/domains/rationalfield.py,sha256=-4rLYoh3IhsURx09OtLR3A29NLDi_RO-QzWO3RGoy8Q,4869 +sympy/polys/domains/realfield.py,sha256=Wt5_y7HTDe8u1qGalhNhTT7Rw3CQiVkmgduQ7jcpD9c,3782 +sympy/polys/domains/ring.py,sha256=p66U2X58acSHLHxOTU6aJZ0Umdcu1qiGIUDtV8iJCD0,3236 +sympy/polys/domains/simpledomain.py,sha256=_K-Zz8Opf505r3eHSrbPAlnGiGSjY_O4Cwa4OTeOSoY,369 +sympy/polys/domains/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/polys/domains/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/polys/domains/tests/__pycache__/test_domains.cpython-310.pyc,, +sympy/polys/domains/tests/__pycache__/test_polynomialring.cpython-310.pyc,, +sympy/polys/domains/tests/__pycache__/test_quotientring.cpython-310.pyc,, +sympy/polys/domains/tests/test_domains.py,sha256=1PsckHIBXMQFm-sgSDMjiUor2c-000iEZhqqPV9pfR4,43846 +sympy/polys/domains/tests/test_polynomialring.py,sha256=gW82jcxL2J5nKrA4iDCuk88K1bqpfAG7z32Y9191mKU,3312 +sympy/polys/domains/tests/test_quotientring.py,sha256=BYoq1CqI76RDSm0xQdp1v7Dv1n5sdcmes-b_y_AfW-0,1459 +sympy/polys/euclidtools.py,sha256=h8qC0ZsXf-ZKPLIMBaLV2aSCHDuXLQBczKZcU-J2BaE,41221 +sympy/polys/factortools.py,sha256=AghhwHVn_wJsEBBo-THmMIKT9zr-gBJlkLTctJrT_eY,38457 +sympy/polys/fglmtools.py,sha256=KYZuP4CxAN3KP6If3hM53HKM4S87rNU2HecwbYjWfOE,4302 +sympy/polys/fields.py,sha256=HEXUOH-bhYkTTXyev87LZPsyK3-aeqCmGRgErFiJzhA,21245 +sympy/polys/galoistools.py,sha256=cuwAArjtyoV4wfaQtX8fs4mz4ZXLuc6yKvHObyXgnw8,52133 +sympy/polys/groebnertools.py,sha256=NhK-XcFR9e4chDDJJ-diXb7XYuw9zcixFA_riomThPM,23342 +sympy/polys/heuristicgcd.py,sha256=rD3intgKCtAAMH3sqlgqbJL1XSq9QjfeG_MYzwCOek0,3732 +sympy/polys/matrices/__init__.py,sha256=ZaPJMi8l22d3F3rudS4NqzSt0xwxbs3uwnQwlhhR91o,397 +sympy/polys/matrices/__pycache__/__init__.cpython-310.pyc,, +sympy/polys/matrices/__pycache__/_typing.cpython-310.pyc,, +sympy/polys/matrices/__pycache__/ddm.cpython-310.pyc,, +sympy/polys/matrices/__pycache__/dense.cpython-310.pyc,, +sympy/polys/matrices/__pycache__/domainmatrix.cpython-310.pyc,, +sympy/polys/matrices/__pycache__/domainscalar.cpython-310.pyc,, +sympy/polys/matrices/__pycache__/eigen.cpython-310.pyc,, +sympy/polys/matrices/__pycache__/exceptions.cpython-310.pyc,, +sympy/polys/matrices/__pycache__/linsolve.cpython-310.pyc,, +sympy/polys/matrices/__pycache__/lll.cpython-310.pyc,, +sympy/polys/matrices/__pycache__/normalforms.cpython-310.pyc,, +sympy/polys/matrices/__pycache__/sdm.cpython-310.pyc,, +sympy/polys/matrices/_typing.py,sha256=ZMxO82uprk9lCq4ClHL-pg6_wOmmnLozg0sQhJrjbbk,319 +sympy/polys/matrices/ddm.py,sha256=a-NJkOmGtm0P8Y88e9frpxRwap-gGZluG07oDReeyTg,13586 +sympy/polys/matrices/dense.py,sha256=LcFY1OAEvIaXzdToD84VvU_DZmNwRSiZt3PA-6YCwMQ,8718 +sympy/polys/matrices/domainmatrix.py,sha256=KeXk7Q0vTweGAWZduZHo2u0RUl2g2EnPeCXgz-16vrQ,47889 +sympy/polys/matrices/domainscalar.py,sha256=zosOQfLeKsMpAv1sm-JHPneGmMTeELvAloNxKMkZ8Uo,3643 +sympy/polys/matrices/eigen.py,sha256=pvICWI8_r_usa0EFqlbz7I8ASzKMK2j2gn-65CmTSPU,2983 +sympy/polys/matrices/exceptions.py,sha256=ay3Lv21X3QqszysBN71xdr9KGQuC5kDBl90a2Sjx6pM,1351 +sympy/polys/matrices/linsolve.py,sha256=fuuS_NvFFw7vP7KEtkfursOtgJmnIWSv9PEZv56ovOE,7548 +sympy/polys/matrices/lll.py,sha256=8vWLPm3SaFDY5pAwawzb2paF29hmJBucVdxwqGEzcAk,3556 +sympy/polys/matrices/normalforms.py,sha256=SkrGcuvfi27Bb3UeU_HHtCU4HrPSZSz1Azh5p4TqZ68,13105 +sympy/polys/matrices/sdm.py,sha256=Y_GV0aMlJDDa452OA72EwxvwKQAA3NaZRGVRwqwbKTI,35571 +sympy/polys/matrices/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/polys/matrices/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/polys/matrices/tests/__pycache__/test_ddm.cpython-310.pyc,, +sympy/polys/matrices/tests/__pycache__/test_dense.cpython-310.pyc,, +sympy/polys/matrices/tests/__pycache__/test_domainmatrix.cpython-310.pyc,, +sympy/polys/matrices/tests/__pycache__/test_domainscalar.cpython-310.pyc,, +sympy/polys/matrices/tests/__pycache__/test_eigen.cpython-310.pyc,, +sympy/polys/matrices/tests/__pycache__/test_linsolve.cpython-310.pyc,, +sympy/polys/matrices/tests/__pycache__/test_lll.cpython-310.pyc,, +sympy/polys/matrices/tests/__pycache__/test_normalforms.cpython-310.pyc,, +sympy/polys/matrices/tests/__pycache__/test_sdm.cpython-310.pyc,, +sympy/polys/matrices/tests/test_ddm.py,sha256=3tFhjkA1alE827Qiw9mAPlWkSgV3Sesrqeh-NxHXsA4,16640 +sympy/polys/matrices/tests/test_dense.py,sha256=Ig_SJ86pogur9AEfcetO_L01fy1WFhe-E9g9ngVTlxs,9483 +sympy/polys/matrices/tests/test_domainmatrix.py,sha256=IjRa6uCfAu1hm6XrN1fUUaAA2GeVxi5IgVaf4vZc4Lk,32371 +sympy/polys/matrices/tests/test_domainscalar.py,sha256=9HQL95XlxyXHNDf_UBN9t1da_9syRNZGOb7IKkmjn-U,3624 +sympy/polys/matrices/tests/test_eigen.py,sha256=T1lYZeW-0NwDxDOG6ZJLr-OICfxY2wa0fVHV2V6EXSk,3200 +sympy/polys/matrices/tests/test_linsolve.py,sha256=G1LCDkB3BDUuDzQuUxn4jCjqUSbCwMX_lfkVXDLe-k0,3334 +sympy/polys/matrices/tests/test_lll.py,sha256=Zg7rNTlywHgrhr9OYpRj5yW6t2JPzJvwcclCvRNc7xw,6480 +sympy/polys/matrices/tests/test_normalforms.py,sha256=_4Cm3EJxHh3TEwF278uB7WQZweFWFsx3j0zc2AZFgDI,3036 +sympy/polys/matrices/tests/test_sdm.py,sha256=H0oNZkNmwpP8i6UpysnkD7yave0E3YU3Z8dKGobSbOA,14000 +sympy/polys/modulargcd.py,sha256=vE57ZJv1iJNKHcRbFJBgG6Jytudweq3wyDB90yxtFCc,58664 +sympy/polys/monomials.py,sha256=R2o7vpjdZdpp57u-PrKw1REk_Cr9uoNcum1a8DnDHZg,18925 +sympy/polys/multivariate_resultants.py,sha256=G9NCKrb5MBoUshiB_QD86w6MwQAxLwOmc-_HFO_ZXdE,15265 +sympy/polys/numberfields/__init__.py,sha256=ZfhC9MyfGfGUz_DT_rXasB-M_P2zUiZXOJUNh_Gtm8c,538 +sympy/polys/numberfields/__pycache__/__init__.cpython-310.pyc,, +sympy/polys/numberfields/__pycache__/basis.cpython-310.pyc,, +sympy/polys/numberfields/__pycache__/exceptions.cpython-310.pyc,, +sympy/polys/numberfields/__pycache__/galois_resolvents.cpython-310.pyc,, +sympy/polys/numberfields/__pycache__/galoisgroups.cpython-310.pyc,, +sympy/polys/numberfields/__pycache__/minpoly.cpython-310.pyc,, +sympy/polys/numberfields/__pycache__/modules.cpython-310.pyc,, +sympy/polys/numberfields/__pycache__/primes.cpython-310.pyc,, +sympy/polys/numberfields/__pycache__/resolvent_lookup.cpython-310.pyc,, +sympy/polys/numberfields/__pycache__/subfield.cpython-310.pyc,, +sympy/polys/numberfields/__pycache__/utilities.cpython-310.pyc,, +sympy/polys/numberfields/basis.py,sha256=IPA6cSwz-53ClQwo-wkmRzfx9pRX4iBhiggdLMVSgJ0,8261 +sympy/polys/numberfields/exceptions.py,sha256=IN36PiHvWvH5YOtWmU0EHSPiKhGryPezcOawdQmesMo,1668 +sympy/polys/numberfields/galois_resolvents.py,sha256=iGuCtXU5ZsoyHZVIbj7eh3ry_zhdAtUaV30Df7pT8WM,24858 +sympy/polys/numberfields/galoisgroups.py,sha256=_ORI7MYUyWhBuDsRL9W0olW5piJLkRNFsbRoJPPkryk,20665 +sympy/polys/numberfields/minpoly.py,sha256=uMMy3Ddui5_oNUBS55JNLF5xAZywfJzUjINmWRw3_EU,27716 +sympy/polys/numberfields/modules.py,sha256=pK69MtEb5BcrSWU9E9jtpVxGhEcR-5XB8_qatpskFVk,69117 +sympy/polys/numberfields/primes.py,sha256=9UHrJrIDPhAcNtqrDcqXIm9Z-Ch69W_gKGOBfDKduro,23967 +sympy/polys/numberfields/resolvent_lookup.py,sha256=qfLNKOz_WjtXwpVlfzy8EkD4gw12epx9npE9HsjyIdg,40411 +sympy/polys/numberfields/subfield.py,sha256=_s8u4a1y1L4HhoKEpoemSvNrXdW0Mh4YvrUOozq_lvc,16480 +sympy/polys/numberfields/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/polys/numberfields/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/polys/numberfields/tests/__pycache__/test_basis.cpython-310.pyc,, +sympy/polys/numberfields/tests/__pycache__/test_galoisgroups.cpython-310.pyc,, +sympy/polys/numberfields/tests/__pycache__/test_minpoly.cpython-310.pyc,, +sympy/polys/numberfields/tests/__pycache__/test_modules.cpython-310.pyc,, +sympy/polys/numberfields/tests/__pycache__/test_numbers.cpython-310.pyc,, +sympy/polys/numberfields/tests/__pycache__/test_primes.cpython-310.pyc,, +sympy/polys/numberfields/tests/__pycache__/test_subfield.cpython-310.pyc,, +sympy/polys/numberfields/tests/__pycache__/test_utilities.cpython-310.pyc,, +sympy/polys/numberfields/tests/test_basis.py,sha256=96BJ7e4oPDKXyvlRrUkiQxmHyjRGpOkAC7R3ln-jgNE,4580 +sympy/polys/numberfields/tests/test_galoisgroups.py,sha256=3LFuMbV92VBFlqqEqjh37oQvmG8cgZ0pFxDCXUoYRL4,5036 +sympy/polys/numberfields/tests/test_minpoly.py,sha256=IA0WH56vMXbSQpiml78jZes1M1XZSHDRARv5tM4VGTQ,22590 +sympy/polys/numberfields/tests/test_modules.py,sha256=GU4166j_hMlB22uWxxIjV_ON8RsyvpaN7Ly3eK8_m8Y,22926 +sympy/polys/numberfields/tests/test_numbers.py,sha256=M0vZIBnjPBHV4vFUnPBILaqiR_cgSuU50kFB-v7l1gA,5988 +sympy/polys/numberfields/tests/test_primes.py,sha256=JhcAkaQMgjkOSziQ2jZApJ8b8oviil5cUy0hfFqNmZg,9779 +sympy/polys/numberfields/tests/test_subfield.py,sha256=_aCbvukrahv-QyCwNT7EpTYC1u53yUlMhfGqV5GzW3Y,12215 +sympy/polys/numberfields/tests/test_utilities.py,sha256=T3YfFouXZNcBG2AfLEQ77Uqy-_TTufGTUsysmzUHNuA,3655 +sympy/polys/numberfields/utilities.py,sha256=aQBm_rgKxjHOCTktOYJ-aI5Cpb59IBvWJiyZCowcM-I,13081 +sympy/polys/orderings.py,sha256=IFieyj4LkFa7NDiGTZD3VwUY7mSN3GEjThKk0z5WJ1s,8500 +sympy/polys/orthopolys.py,sha256=Kjx3fSoLDpX-bXUlgkPQdOK_TutIidI0MHmJ-6cviKM,8526 +sympy/polys/partfrac.py,sha256=KzReYNMyYfgXUM-UFj67eQU7MQk6EsbfhVuf4_Tl_u0,14665 +sympy/polys/polyclasses.py,sha256=byf1JS2pYGCZXGvzaxnBC18r--jTf0OFqOjJxWy6z_U,54564 +sympy/polys/polyconfig.py,sha256=mgfFpp9SU159tA_PM2o04WZyzMoWfOtWZugRcHnP42c,1598 +sympy/polys/polyerrors.py,sha256=xByI-fqIHVYsYRm63NmHXlSSRCwSI9vZUoO-1Mf5Wlk,4744 +sympy/polys/polyfuncs.py,sha256=OEZpdYeHQADBJYqMw8JAyN4sw-jsJ6lzVH6m-CCoK8g,8547 +sympy/polys/polymatrix.py,sha256=83_9L66dbzVv0UfbPR3OTKtxZZ6sMaeOifMBPUDBeiM,9749 +sympy/polys/polyoptions.py,sha256=BqXFyhKVDoFRJlSSBb_jxOkWPzM2MpQ67BKiQR852A8,21721 +sympy/polys/polyquinticconst.py,sha256=mYLFWSBq3H3Y0I8cx76Z_xauLx1YeViC4xF6yWsSTPQ,96035 +sympy/polys/polyroots.py,sha256=etxwQFngxSLRgjRJ8AzPc28CCQm56xx9CRlp4MPwhl4,36995 +sympy/polys/polytools.py,sha256=H8xrnAGUu8Df_HStGD2wVpI-cKOhqEYlEECJ9ep3PHM,194263 +sympy/polys/polyutils.py,sha256=gGwRUZXAFv132f96uONc6Ybfh8xyyP9pAouNY6fX-uQ,16519 +sympy/polys/rationaltools.py,sha256=gkLu0YvsSJ2b04AOK7MV_rjp1m6exLkdqClOjrbBboo,2848 +sympy/polys/ring_series.py,sha256=qBKirsiZpM5x0ix4V5ntm7inynnahYCfVSgHZRCpccc,57766 +sympy/polys/rings.py,sha256=rparZxHTHV9j7Av3XUnAE2CSn1WglhXveO13IcuDljE,72970 +sympy/polys/rootisolation.py,sha256=vOvKe1Vi2uklmMB4qNy_EczSRzelMUqPB3o7qYdiWR0,64527 +sympy/polys/rootoftools.py,sha256=_rwgSXUkgg0bUsp949GiSz6ouoxuyysclg-fKGxRlYA,41040 +sympy/polys/solvers.py,sha256=CWrzPJNlosjhxScXzIHYZQwCjsLnkAgAeIgYrY92gbc,13519 +sympy/polys/specialpolys.py,sha256=B2vijl75zgUKUTY1HCqjB9BTDFf3FM8ugwkKGTB83XA,11038 +sympy/polys/sqfreetools.py,sha256=2Gdv9t9TNgdbnc-7XrpEhgYJfSvacHUyuE1aOWo9DXU,11464 +sympy/polys/subresultants_qq_zz.py,sha256=TDVS9-rEBXK88m4mAixuvPFMAXmn3MwKaSsGmq9oUCo,88261 +sympy/polys/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/polys/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_appellseqs.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_constructor.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_densearith.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_densebasic.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_densetools.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_dispersion.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_distributedmodules.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_euclidtools.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_factortools.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_fields.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_galoistools.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_groebnertools.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_heuristicgcd.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_injections.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_modulargcd.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_monomials.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_multivariate_resultants.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_orderings.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_orthopolys.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_partfrac.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_polyclasses.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_polyfuncs.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_polymatrix.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_polyoptions.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_polyroots.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_polytools.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_polyutils.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_pythonrational.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_rationaltools.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_ring_series.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_rings.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_rootisolation.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_rootoftools.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_solvers.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_specialpolys.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_sqfreetools.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_subresultants_qq_zz.cpython-310.pyc,, +sympy/polys/tests/test_appellseqs.py,sha256=YTERuRr30QtfxYR0erXvJG8D-INe9RaMFAF0ZM-H4Ks,3820 +sympy/polys/tests/test_constructor.py,sha256=U1LBjA881oG4A8oMXqZe0sZ42pmH7YpR_VSJjBNZz-w,6378 +sympy/polys/tests/test_densearith.py,sha256=1YBmEJTtPRWj4l39HMkFD6ffkU8h3pIs7lz-k_9XGYk,40428 +sympy/polys/tests/test_densebasic.py,sha256=vcoTscGRB1bef9UhclHcsKnBJp9baexjQ-enXq1-pKM,21477 +sympy/polys/tests/test_densetools.py,sha256=QM1Yt0hOHBnUTvdn14aFRUdfMQE9P2q1Hpzeud-n-ds,24572 +sympy/polys/tests/test_dispersion.py,sha256=8JfwjSNy7X74qJODMaVp1GSLprFiRDVt6XrYc_-omgQ,3183 +sympy/polys/tests/test_distributedmodules.py,sha256=dXmjhozX5Yzb7DsrtbdFTqAxi9Z1UZNJvGxj-vHM7cM,7639 +sympy/polys/tests/test_euclidtools.py,sha256=vEyj48eIjm6-KRQtThNfI4ic_VDNB6l7jMouxJAF9HE,19482 +sympy/polys/tests/test_factortools.py,sha256=MXOJfhjrLAu-UCyXg6YRMYAc7nkw6SAfkY66_RKG9Es,24560 +sympy/polys/tests/test_fields.py,sha256=vrdg27319R3Zro_idhQVxIeomN9P6mU3jHyX7HZKeMU,10245 +sympy/polys/tests/test_galoistools.py,sha256=btKRaqckjvyGOhCvIfwLtRDVG2Qiwo6CTnoPW8h4S9E,28130 +sympy/polys/tests/test_groebnertools.py,sha256=ZWHBcCCOVNwDxuJWg1WPo0krTHx1m1wTPi2cOYPsAT4,18584 +sympy/polys/tests/test_heuristicgcd.py,sha256=wsAKgOKuLYra14qMS8EUt_Pda_SoBfP90X2-Tv1WG7A,4031 +sympy/polys/tests/test_injections.py,sha256=EONGggBUNWaVSwi817CzLBYJgkTehFq8-m-Qdqes984,1286 +sympy/polys/tests/test_modulargcd.py,sha256=GE-24EnWOAQVYwgBb5PJzySX6EEJQs-q3HRFBWsXkTE,9042 +sympy/polys/tests/test_monomials.py,sha256=bY057IDFyVs864jcJ46ZITLv57xMKNfBVwBC-mnzJLA,10988 +sympy/polys/tests/test_multivariate_resultants.py,sha256=DJu8CcZ3xwx8njpjDeSOyhyxeqZYmhfb7dkSCU-ll7Y,9501 +sympy/polys/tests/test_orderings.py,sha256=bdsIsqJTFJCVyZNRMAGVDXVk79ldw9rmAGejS_lwKP0,4254 +sympy/polys/tests/test_orthopolys.py,sha256=UpJwPlmqZ3IZtWhaLcfhR5EyKj49_VpruRlI2dK_Awk,6379 +sympy/polys/tests/test_partfrac.py,sha256=78xlrvzvON2047j_DeQ0E8BBZg6Z1koJzksj5rQah9A,7096 +sympy/polys/tests/test_polyclasses.py,sha256=uUjLcfKrfW-EBB6N9ofESJgw4_QacKWN1fLa0etn6iY,13321 +sympy/polys/tests/test_polyfuncs.py,sha256=VbgCgCRE06dtSY9I9GSdPH9T52ETYYoxk4J3N1WBtd4,4520 +sympy/polys/tests/test_polymatrix.py,sha256=pl2VrN_d2XGOVHvvAnaNQzkdFTdQgjt9ePgo41soBRs,7353 +sympy/polys/tests/test_polyoptions.py,sha256=z9DUdt8K3lYkm4IyLH1Cv-TKe76HP-EyaRkZVsfWb6U,12416 +sympy/polys/tests/test_polyroots.py,sha256=LUh1A92dy93Ou2t2_650ujTqvC3DQK0qpl3QO7VZCrk,26809 +sympy/polys/tests/test_polytools.py,sha256=855XWTO3k68OALdT-PpsZ8ZfQepTsUEhDxU8dYyF1SE,126200 +sympy/polys/tests/test_polyutils.py,sha256=Qs3QQl0WYmTnkYE2ovTxdLeu6DYnWO_OoUmLwNDZzSw,11547 +sympy/polys/tests/test_pythonrational.py,sha256=vYMlOTuYvf-15P0nKTFm-uRrhUc-nCFEkqYFAPLxg08,4143 +sympy/polys/tests/test_rationaltools.py,sha256=wkvjzNP1IH-SdubNk5JJ7OWcY-zNF6z3t32kfp9Ncs0,2397 +sympy/polys/tests/test_ring_series.py,sha256=SCUiciL10XGGjxFuM6ulzA460XAUVRykW3HLb8RNsc0,24662 +sympy/polys/tests/test_rings.py,sha256=g3hl2fMJ6-X7-k9n3IBdOAtyqONbjYwTizlrFpWTR4M,45393 +sympy/polys/tests/test_rootisolation.py,sha256=x-n-T-Con-8phelNa05BPszkC_UCW1C0yAOwz658I60,32724 +sympy/polys/tests/test_rootoftools.py,sha256=psVf3YA1MMkeuVvn-IpmF_rc3AEhh8U4U09h6dEY9u0,21531 +sympy/polys/tests/test_solvers.py,sha256=LZwjEQKKpFdCr4hMaU0CoN650BqU-arsACJNOF7lOmk,13655 +sympy/polys/tests/test_specialpolys.py,sha256=vBEDCC82ccGvxsETR5xr3yQ70Ho_HUqv1Q970vWf44M,4995 +sympy/polys/tests/test_sqfreetools.py,sha256=QJdMLVvQOiPm8ZYr4OESV71d5Ag9QcK1dMUkYv3pY5o,4387 +sympy/polys/tests/test_subresultants_qq_zz.py,sha256=ro6-F0vJrR46syl5Q0zuXfXQzEREtlkWAeRV9xJE31Y,13138 +sympy/printing/__init__.py,sha256=ws2P2KshXpwfnij4zaU3lVzIFQOh7nSjLbrB50cVFcU,2264 +sympy/printing/__pycache__/__init__.cpython-310.pyc,, +sympy/printing/__pycache__/aesaracode.cpython-310.pyc,, +sympy/printing/__pycache__/c.cpython-310.pyc,, +sympy/printing/__pycache__/codeprinter.cpython-310.pyc,, +sympy/printing/__pycache__/conventions.cpython-310.pyc,, +sympy/printing/__pycache__/cxx.cpython-310.pyc,, +sympy/printing/__pycache__/defaults.cpython-310.pyc,, +sympy/printing/__pycache__/dot.cpython-310.pyc,, +sympy/printing/__pycache__/fortran.cpython-310.pyc,, +sympy/printing/__pycache__/glsl.cpython-310.pyc,, +sympy/printing/__pycache__/gtk.cpython-310.pyc,, +sympy/printing/__pycache__/jscode.cpython-310.pyc,, +sympy/printing/__pycache__/julia.cpython-310.pyc,, +sympy/printing/__pycache__/lambdarepr.cpython-310.pyc,, +sympy/printing/__pycache__/latex.cpython-310.pyc,, +sympy/printing/__pycache__/llvmjitcode.cpython-310.pyc,, +sympy/printing/__pycache__/maple.cpython-310.pyc,, +sympy/printing/__pycache__/mathematica.cpython-310.pyc,, +sympy/printing/__pycache__/mathml.cpython-310.pyc,, +sympy/printing/__pycache__/numpy.cpython-310.pyc,, +sympy/printing/__pycache__/octave.cpython-310.pyc,, +sympy/printing/__pycache__/precedence.cpython-310.pyc,, +sympy/printing/__pycache__/preview.cpython-310.pyc,, +sympy/printing/__pycache__/printer.cpython-310.pyc,, +sympy/printing/__pycache__/pycode.cpython-310.pyc,, +sympy/printing/__pycache__/python.cpython-310.pyc,, +sympy/printing/__pycache__/rcode.cpython-310.pyc,, +sympy/printing/__pycache__/repr.cpython-310.pyc,, +sympy/printing/__pycache__/rust.cpython-310.pyc,, +sympy/printing/__pycache__/smtlib.cpython-310.pyc,, +sympy/printing/__pycache__/str.cpython-310.pyc,, +sympy/printing/__pycache__/tableform.cpython-310.pyc,, +sympy/printing/__pycache__/tensorflow.cpython-310.pyc,, +sympy/printing/__pycache__/theanocode.cpython-310.pyc,, +sympy/printing/__pycache__/tree.cpython-310.pyc,, +sympy/printing/aesaracode.py,sha256=aVXDMh_YDRsDwPbZMt8X73jjv4DW8g15M1M4TdNlqXQ,18227 +sympy/printing/c.py,sha256=dQ2ucrIGZGgYB6hS4gLIzFKDEYpfABNbP54lS7H6AIQ,26942 +sympy/printing/codeprinter.py,sha256=RkV88Z-SSCGkWJXuc_7pe2zoB-hRheBtJDDPEyK5acQ,35350 +sympy/printing/conventions.py,sha256=k6YRWHfvbLHJp1uKgQX-ySiOXSsXH8QJxC9fymYmcSM,2580 +sympy/printing/cxx.py,sha256=CtkngKi4o_z5XMbmzpa1eC1uUR9SCbuOIli9Zsnh4Rc,5737 +sympy/printing/defaults.py,sha256=YitLfIRfFH8ltNd18Y6YtBgq5H2te0wFKlHuIO4cvo8,135 +sympy/printing/dot.py,sha256=W0J798ZxBdlJercffBGnNDTp7J2tMdIYQkE_KIiyi3s,8274 +sympy/printing/fortran.py,sha256=JeDXvo6dL0-yG2nk9oiTmgBiWJZrjeZURsMcrFuSayo,28568 +sympy/printing/glsl.py,sha256=fYURb8NYRAxmbMQleFs-X2IWQ7uk5xHkJVhgskrFsbU,20537 +sympy/printing/gtk.py,sha256=ptnwYxJr5ox3LG4TCDbRIgxsCikaVvEzWBaqIpITUXc,466 +sympy/printing/jscode.py,sha256=EkGUqMH3qBAbLVbSSuYi4ZQ89G4xUImDT2nTAf3nn9E,12131 +sympy/printing/julia.py,sha256=iJqOPrHhqJjAc6UnT_8R7A5NFcn6ImE3mOTLS7X0bUY,23553 +sympy/printing/lambdarepr.py,sha256=BCx4eSdG8MQ8ZSUV1lWEd3CzbZ4IiMid-TTxPoV6FHU,8305 +sympy/printing/latex.py,sha256=ImSA8Ri3-30szn-FgMC4xTkrjnq9qlGisUhZtUiTyYE,121722 +sympy/printing/llvmjitcode.py,sha256=wa32lF5254AOPnbV9F5OvQTd1HOk0rfN-HUekcN1HmI,17164 +sympy/printing/maple.py,sha256=yEGhEsE_WkG4M6PpRdURw-FbsG-eVLL8d2-d3CUpkHk,10588 +sympy/printing/mathematica.py,sha256=9R-wXu1SR7Rp5hDFHdrRA0CPpADI58qeGoSxbAMpYP0,12701 +sympy/printing/mathml.py,sha256=BZNSIr05Hf3i2qBeNq0rGGEtHsChD2p8lfqg6GpRU5M,75290 +sympy/printing/numpy.py,sha256=X-MKcpT1u6Z6qaFKs6N17TQnzZMaeSMeKpJEru6Mhvo,19776 +sympy/printing/octave.py,sha256=31BmnCU-CCqllApOBJp5EPQCRO7hjU7hvYTqYxerPYg,25621 +sympy/printing/precedence.py,sha256=dK6ueqV6OOXg0qY9L-goOgbQarqVRygIYK5FQGTBPR8,5268 +sympy/printing/pretty/__init__.py,sha256=pJTe-DO4ctTlnjg1UvqyoeBY50B5znFjcGvivXRhM2U,344 +sympy/printing/pretty/__pycache__/__init__.cpython-310.pyc,, +sympy/printing/pretty/__pycache__/pretty.cpython-310.pyc,, +sympy/printing/pretty/__pycache__/pretty_symbology.cpython-310.pyc,, +sympy/printing/pretty/__pycache__/stringpict.cpython-310.pyc,, +sympy/printing/pretty/pretty.py,sha256=Yom39Yqxqb7mO0FxSRqsOmxSUvrwCaORdE4e_78YGIk,105281 +sympy/printing/pretty/pretty_symbology.py,sha256=nfBI-cLYLBP9VuZxb7DSWtFIg3vgDphNfV-uBtFDMIE,20208 +sympy/printing/pretty/stringpict.py,sha256=NuWPIg1wLFMu39Cxf09pgVKix_oY7zAWrPOBWVd_5Jc,19097 +sympy/printing/pretty/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/printing/pretty/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/printing/pretty/tests/__pycache__/test_pretty.cpython-310.pyc,, +sympy/printing/pretty/tests/test_pretty.py,sha256=IC7BOUZ01_-WrqBvn3nEAL89UezKzucS8dNDAvDzAHY,184797 +sympy/printing/preview.py,sha256=FwN0_q52iU6idLNZNXo002gPNpVw_9xrxLifFnK_ssw,14104 +sympy/printing/printer.py,sha256=0-hGTS9IPEqqP3s2sW7cZWyBe6opGa1FzyIRhND6FkA,14479 +sympy/printing/pycode.py,sha256=L6SbgH4ulnqTKVvAUtaKCATX4XYLNK-rs2UAgVe-1Rw,24290 +sympy/printing/python.py,sha256=sJcUWJYaWX41EZVkhUmZqpLA2ITcYU65Qd1UKZXMdFo,3367 +sympy/printing/rcode.py,sha256=mgWYYacqkLiBblV60CRH1G6FC9FkZ0LOfAYs1NgxOHA,14282 +sympy/printing/repr.py,sha256=p9G_EeK2WkI__6LFEtWyL1KFHJLL1KTFUJsp7N5n6vk,11649 +sympy/printing/rust.py,sha256=OD9xYBoTk-yRhhtbCaxyceg1lsnCaUclp_NWW4uaNYY,21377 +sympy/printing/smtlib.py,sha256=sJ0-_Ns2vH45b5oEXIPJtIOG9lvCEqHlJRQzQoiVC44,19445 +sympy/printing/str.py,sha256=OEX6W7wBj1aJIiq39qFxstyWJxkAp08RzOLolXObeIM,33260 +sympy/printing/tableform.py,sha256=-1d1cwmnprJKPXpViTbQxpwy3wT7K8KjPD5HCyjbDGk,11799 +sympy/printing/tensorflow.py,sha256=KHdJMHMBOaJkHO8_uBfYRHeBW2VIziv_YYqIV30D-dA,7906 +sympy/printing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/printing/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_aesaracode.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_c.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_codeprinter.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_conventions.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_cupy.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_cxx.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_dot.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_fortran.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_glsl.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_gtk.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_jax.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_jscode.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_julia.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_lambdarepr.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_latex.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_llvmjit.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_maple.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_mathematica.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_mathml.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_numpy.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_octave.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_precedence.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_preview.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_pycode.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_python.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_rcode.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_repr.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_rust.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_smtlib.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_str.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_tableform.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_tensorflow.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_theanocode.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_tree.cpython-310.pyc,, +sympy/printing/tests/test_aesaracode.py,sha256=s0pu_J7hfDJ4HttXP6cFM6fSUU1rHgga1SeAIdbNbAo,21016 +sympy/printing/tests/test_c.py,sha256=OrK5CxLbppwiOX2L-Whh9h7GC9XXueNkWhhF5ODaCnA,30804 +sympy/printing/tests/test_codeprinter.py,sha256=Bdh1RcusYzR7lTQ8s3Sik7zw_INivUcW2AS4dA0OCtg,1410 +sympy/printing/tests/test_conventions.py,sha256=yqPpU3F0WcbxImPBBAHd3YEZpkFGfcq_TLK4WN_gtP4,5257 +sympy/printing/tests/test_cupy.py,sha256=-hO52M1RJSQe0qSVSl6B1LudZIgaBMme0Nkd6dQGr6g,1858 +sympy/printing/tests/test_cxx.py,sha256=900VUfUpS55zfllYGQcpjdC4Wmcg4T8TV94Mr430NZc,2490 +sympy/printing/tests/test_dot.py,sha256=TSAtgGIgK_JbY-RMbQgUvnAI87SJqeJOqzcLjAobhKM,4648 +sympy/printing/tests/test_fortran.py,sha256=8L2zwZX8_QuNwcx24swcQUTvXYTO-5i-YrPL1hTRUVI,35518 +sympy/printing/tests/test_glsl.py,sha256=cfog9fp_EOFm_piJwqUcSvAIJ78bRwkFjecwr3ocCak,28421 +sympy/printing/tests/test_gtk.py,sha256=94gp1xRlPrFiALQGuqHnmh9xKrMxR52RQVkN0MXbUdA,500 +sympy/printing/tests/test_jax.py,sha256=B5GVZV9UxKeOmb4lzJHDkQXRbWQiLLD7w7Ze3sDrWHQ,10536 +sympy/printing/tests/test_jscode.py,sha256=ObahZne9lQbBiXyJZLohjQGdHsG2CnWCFOB8KbFOAqQ,11369 +sympy/printing/tests/test_julia.py,sha256=U7R9zOckGWy99f5StDFE9lMXkcEmMkGHzYj1UM1xzgc,13875 +sympy/printing/tests/test_lambdarepr.py,sha256=YU_lAQpiNHKJpBjZmgXr-unzOwS6Ss-u8sS2D_u-Mq0,6947 +sympy/printing/tests/test_latex.py,sha256=m8UBxuluF0fEYoLSOMM79VtwhEzkqIiouu6vsaZ1G4c,135670 +sympy/printing/tests/test_llvmjit.py,sha256=EGPeRisM60_TIVgnk7PTLSm5F-Aod_88zLjHPZwfyZ8,5344 +sympy/printing/tests/test_maple.py,sha256=te2l-yWWfklFHnaw-F2ik8q2dqES2cxrnE1voJxMGL0,13135 +sympy/printing/tests/test_mathematica.py,sha256=vijg7xfoelywL-ZhNuXFfDjM1FgaW_4liTBx1wzpkWk,10954 +sympy/printing/tests/test_mathml.py,sha256=x4IckrMxOlSzt6CxGFpHdN2l6OXl7zrcxIHwn-KxeS8,96209 +sympy/printing/tests/test_numpy.py,sha256=7fGncgPzvUbSjtltsu-kwiCFPv9tJlv2zPLRFo3ZkNw,10360 +sympy/printing/tests/test_octave.py,sha256=xIFRIXtTHcuU6ZhBW8Ht_KjUPewJoCEQ0b5GVVRyP7g,18728 +sympy/printing/tests/test_precedence.py,sha256=CS4L-WbI2ZuWLgbGATtF41--h0iGkfuE6dK5DYYiC5g,2787 +sympy/printing/tests/test_preview.py,sha256=dSVxiGqdNR6gbF40V4J2tGhQ-T4RDvSyGypHvYcPDYM,988 +sympy/printing/tests/test_pycode.py,sha256=nFeQHGQ9l-R2X_Q1snMFZP4KQ0M35V48P_j9kdahW4Q,15894 +sympy/printing/tests/test_python.py,sha256=HN7JkzQcKSnB6718i7kaEJZ5pYMqu56z1mSmHQGzY4k,8128 +sympy/printing/tests/test_rcode.py,sha256=PqYfr3akhhBcmswU3QLSFNyrmNTc92irTn0Wf_2jdv4,13779 +sympy/printing/tests/test_repr.py,sha256=sj3bAdBShn0itw2yYsAuDOuRPfKQSKJy2R8cPlLdDnY,12689 +sympy/printing/tests/test_rust.py,sha256=eZTYJ3zN5LEt8tl5KhADg1HwcrofhSQswagP_zcxoMw,11504 +sympy/printing/tests/test_smtlib.py,sha256=b4Ou4bTp8E_fFzlg6vQRpWowhxR-9SB88qA_yShXjhk,20934 +sympy/printing/tests/test_str.py,sha256=m-fw28ThIk0AcCz2_0HKgUNIwe9m3YGndcb4bJ28Leo,42262 +sympy/printing/tests/test_tableform.py,sha256=Ff5l1QL2HxN32WS_TdFhUAVqzop8YoWY3Uz1TThvVIM,5692 +sympy/printing/tests/test_tensorflow.py,sha256=p-Jx4Umby9k5t5umhus-0hkuTJN7C5kEbJL_l2KdyJA,15643 +sympy/printing/tests/test_theanocode.py,sha256=E36Fj72HxMK0e1pKTkoTpv9wI4UvwHdVufo-JA6dYq0,21394 +sympy/printing/tests/test_tree.py,sha256=_8PGAhWMQ_A0f2DQLdDeMrpxY19889P5Ih9H41RZn8s,6080 +sympy/printing/theanocode.py,sha256=3RxlOR4bRjMHOta6kvBk_ZuxKM3LZvPO8WYuxrtd38g,19028 +sympy/printing/tree.py,sha256=GxEF1WIflPNShlOrZc8AZch2I6GxDlbpImHqX61_P5o,3872 +sympy/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/release.py,sha256=iyi5eR6SKGqbP1Fp0_6p-darg5riTqiLfREzuz7g8UE,21 +sympy/sandbox/__init__.py,sha256=IaEVOYHaZ97OHEuto1UGthFuO35c0uvAZFZU23YyEaU,189 +sympy/sandbox/__pycache__/__init__.cpython-310.pyc,, +sympy/sandbox/__pycache__/indexed_integrals.cpython-310.pyc,, +sympy/sandbox/indexed_integrals.py,sha256=svh4xDIa8nGpDeH4TeRb49gG8miMvXpCzEarbor58EE,2141 +sympy/sandbox/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/sandbox/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/sandbox/tests/__pycache__/test_indexed_integrals.cpython-310.pyc,, +sympy/sandbox/tests/test_indexed_integrals.py,sha256=UK2E2wg9EMwda4Vwpzyj3rmXs6ni33HqcbyaqAww6ww,1179 +sympy/series/__init__.py,sha256=DYG9oisjzYeS55dIUpQpbAFcoDz7Q81fZJw36PRGu14,766 +sympy/series/__pycache__/__init__.cpython-310.pyc,, +sympy/series/__pycache__/acceleration.cpython-310.pyc,, +sympy/series/__pycache__/approximants.cpython-310.pyc,, +sympy/series/__pycache__/aseries.cpython-310.pyc,, +sympy/series/__pycache__/formal.cpython-310.pyc,, +sympy/series/__pycache__/fourier.cpython-310.pyc,, +sympy/series/__pycache__/gruntz.cpython-310.pyc,, +sympy/series/__pycache__/kauers.cpython-310.pyc,, +sympy/series/__pycache__/limits.cpython-310.pyc,, +sympy/series/__pycache__/limitseq.cpython-310.pyc,, +sympy/series/__pycache__/order.cpython-310.pyc,, +sympy/series/__pycache__/residues.cpython-310.pyc,, +sympy/series/__pycache__/sequences.cpython-310.pyc,, +sympy/series/__pycache__/series.cpython-310.pyc,, +sympy/series/__pycache__/series_class.cpython-310.pyc,, +sympy/series/acceleration.py,sha256=9VTCOEOgIyOvcwjY5ZT_c4kWE-f_bL79iz_T3WGis94,3357 +sympy/series/approximants.py,sha256=tE-hHuoW62QJHDA3WhRlXaTkokCAODs1vXgjirhOYiQ,3181 +sympy/series/aseries.py,sha256=cHVGRQaza4ayqI6ji6OHNkdQEMV7Bko4f4vug2buEQY,255 +sympy/series/benchmarks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/series/benchmarks/__pycache__/__init__.cpython-310.pyc,, +sympy/series/benchmarks/__pycache__/bench_limit.cpython-310.pyc,, +sympy/series/benchmarks/__pycache__/bench_order.cpython-310.pyc,, +sympy/series/benchmarks/bench_limit.py,sha256=2PtdeeJtD6qyEvt9HFNvyTnMM8phFZRjscgnb4fHndU,173 +sympy/series/benchmarks/bench_order.py,sha256=iC8sQJ0lLlTgiXltAyLzSCQ-3490cf-c6NFiIU44JSk,207 +sympy/series/formal.py,sha256=CtRziTUItAd8G9z__jJ9s7dRIHAOdeHajdPmNB3HRgY,51772 +sympy/series/fourier.py,sha256=dzVo4VZ8OkD9YSbBEYQudpcHcEdVMG7LfnIRTMd4Lzg,22885 +sympy/series/gruntz.py,sha256=Iex_MRKqixBX7cehe-Wro-4fNreoXBsFIjcoUvsijG8,24544 +sympy/series/kauers.py,sha256=PzD0MATMNjLjPi9GW5GQGL6Uqc2UT-uPwnzhi7TkJH8,1720 +sympy/series/limits.py,sha256=D_lAe-Y0V1n5W3JztWs34tUasTTFgNqQi4MuPZc5oJk,12820 +sympy/series/limitseq.py,sha256=WM1Lh3RXhSZM1gQaJrhWnUtYEgJunLujIEw1gmtVhYw,7752 +sympy/series/order.py,sha256=bKvLPG0QwPl3a7Qw-SMQEjkpyaTxxye7pvC27-jvt80,19255 +sympy/series/residues.py,sha256=k46s_fFfIHdJZqfst-B_-X1R-SAWs_rR9MQH7a9JLtg,2213 +sympy/series/sequences.py,sha256=S2_GtHiPY9q2BpzbVgJsD4pBf_e4yWveEwluX9rSHF4,35589 +sympy/series/series.py,sha256=crSkQK1wA6FQAKI1islG6rpAzvWlz1gZZPx2Awp43Qg,1861 +sympy/series/series_class.py,sha256=033NJ5Re8AS4eq-chmfct3-Lz2vBqdFqXtnrbxswTx0,2918 +sympy/series/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/series/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_approximants.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_aseries.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_demidovich.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_formal.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_fourier.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_gruntz.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_kauers.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_limits.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_limitseq.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_lseries.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_nseries.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_order.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_residues.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_sequences.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_series.cpython-310.pyc,, +sympy/series/tests/test_approximants.py,sha256=KViHMW1dPXn7xaPYhtTQ9L_WtLLkoIic6yfFnwZ8Q70,1012 +sympy/series/tests/test_aseries.py,sha256=LblW4hBDVhigX9YvNc_HFvMm8nJMSTAT9PcUK3p-9HU,2371 +sympy/series/tests/test_demidovich.py,sha256=JGYacqJMEqHS6oT2AYs9d7iutIEb32PkJs9EJqOHxcQ,4947 +sympy/series/tests/test_formal.py,sha256=k2rqySJg6WnPSwcDyQBG7041bJxXdiYZt-KSs_IAso0,22495 +sympy/series/tests/test_fourier.py,sha256=Dknk64RWGNO8kXmpy2RRIbT8b-0CjL_35QcBugReW38,5891 +sympy/series/tests/test_gruntz.py,sha256=CRRAlU0JLygDL7pHnxfILSDAQ6UbJfaKZrClAdGB1iE,16060 +sympy/series/tests/test_kauers.py,sha256=Z85FhfXOOVki0HNGeK5BEBZOpkuB6SnKK3FqfK1-aLQ,1102 +sympy/series/tests/test_limits.py,sha256=yMw_5X2GLXybVHHMnQ0H0Nx8sXWPYK9EH8boSZBOYwo,44263 +sympy/series/tests/test_limitseq.py,sha256=QjEF99sYEDqfY7ULz1qjQTo6e0lIRUCflEOBgiDYRVA,5691 +sympy/series/tests/test_lseries.py,sha256=GlQvlBlD9wh02PPBP6zU83wmhurvGUFTuCRp44B4uI4,1875 +sympy/series/tests/test_nseries.py,sha256=uzhzYswSOe9Gh_nWKeO69tvGPMLd-9tqk4HBYX8JIm4,18284 +sympy/series/tests/test_order.py,sha256=BGB1j0vmSMS8lGwSVmBOc9apI1NM82quFwF2Hhr2bDE,16500 +sympy/series/tests/test_residues.py,sha256=pT9xzPqtmfKGSbLLAxgDVZLTSy3TOxyfq3thTJs2VLw,3178 +sympy/series/tests/test_sequences.py,sha256=Oyq32yQZnGNQDS2uJ3by3bZ-y4G9c9BFfdQTcVuW2RM,11161 +sympy/series/tests/test_series.py,sha256=rsSCpDWpZQGMo0RfrkCS5XOl--wVFmIyZcaYUoaFXdc,15478 +sympy/sets/__init__.py,sha256=3vjCm4v2esbpsVPY0ROwTXMETxns_66bG4FCIFZ96oM,1026 +sympy/sets/__pycache__/__init__.cpython-310.pyc,, +sympy/sets/__pycache__/conditionset.cpython-310.pyc,, +sympy/sets/__pycache__/contains.cpython-310.pyc,, +sympy/sets/__pycache__/fancysets.cpython-310.pyc,, +sympy/sets/__pycache__/ordinals.cpython-310.pyc,, +sympy/sets/__pycache__/powerset.cpython-310.pyc,, +sympy/sets/__pycache__/setexpr.cpython-310.pyc,, +sympy/sets/__pycache__/sets.cpython-310.pyc,, +sympy/sets/conditionset.py,sha256=mBxxVHIFt9UfddAyvwfd-uVsM5fisNUSvBdNWH5QN_A,7825 +sympy/sets/contains.py,sha256=1jXxAFsl2ivXlT9SsGOM7s1uvS2UKEuWzNYA_bTtS6U,1234 +sympy/sets/fancysets.py,sha256=kVDkGbp316dFdR5GMWLtreltBFot8G39XM_xLvG1TkU,48118 +sympy/sets/handlers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/sets/handlers/__pycache__/__init__.cpython-310.pyc,, +sympy/sets/handlers/__pycache__/add.cpython-310.pyc,, +sympy/sets/handlers/__pycache__/comparison.cpython-310.pyc,, +sympy/sets/handlers/__pycache__/functions.cpython-310.pyc,, +sympy/sets/handlers/__pycache__/intersection.cpython-310.pyc,, +sympy/sets/handlers/__pycache__/issubset.cpython-310.pyc,, +sympy/sets/handlers/__pycache__/mul.cpython-310.pyc,, +sympy/sets/handlers/__pycache__/power.cpython-310.pyc,, +sympy/sets/handlers/__pycache__/union.cpython-310.pyc,, +sympy/sets/handlers/add.py,sha256=_ucFvxuDv9wsmKxGkCDUERtYk3I_tQxjZjY3ZkroWs0,1863 +sympy/sets/handlers/comparison.py,sha256=WfT_vLrOkvPqRg2mf7gziVs_6cLg0kOTEFv-Nb1zIvo,1601 +sympy/sets/handlers/functions.py,sha256=jYSFqFNH6mXbKFPgvIAIGY8BhbLPo1dAvcNg4MxmCaI,8381 +sympy/sets/handlers/intersection.py,sha256=rIdRTqFQzbsa0NGepzWmfoKhAd87aEqxONdOgujR_0A,16633 +sympy/sets/handlers/issubset.py,sha256=azka_5eOaUro3r3v72PmET0oY8-aaoJkzVEK7kuqXCA,4739 +sympy/sets/handlers/mul.py,sha256=XFbkOw4PDQumaOEUlHeQLvjhIom0f3iniSYv_Kau-xw,1842 +sympy/sets/handlers/power.py,sha256=84N3dIus7r09XV7PF_RiEpFRw1y5tOGD34WKzSM9F-4,3186 +sympy/sets/handlers/union.py,sha256=lrAdydqExnALUjM0dnoM-7JAZqtbgLb46Y2GGmFtQdw,4225 +sympy/sets/ordinals.py,sha256=GSyaBq7BHJC3pvgoCDoUKZQ0IE2VXyHtx6_g5OS64W4,7641 +sympy/sets/powerset.py,sha256=vIGnSYKngEPEt6V-6beDOXAOY9ugDLJ8fXOx5H9JJck,2913 +sympy/sets/setexpr.py,sha256=jMOQigDscLTrFPXvHqo1ODVRG9BqC4yn38Ej4m6WPa0,3019 +sympy/sets/sets.py,sha256=Ma1U85BlQq_VwQZzu5aVVrqK9h0f7iwsltfOleqRnUE,79027 +sympy/sets/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/sets/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/sets/tests/__pycache__/test_conditionset.cpython-310.pyc,, +sympy/sets/tests/__pycache__/test_contains.cpython-310.pyc,, +sympy/sets/tests/__pycache__/test_fancysets.cpython-310.pyc,, +sympy/sets/tests/__pycache__/test_ordinals.cpython-310.pyc,, +sympy/sets/tests/__pycache__/test_powerset.cpython-310.pyc,, +sympy/sets/tests/__pycache__/test_setexpr.cpython-310.pyc,, +sympy/sets/tests/__pycache__/test_sets.cpython-310.pyc,, +sympy/sets/tests/test_conditionset.py,sha256=4FdbXxobY286r5UtrCbcQPqaFIycsdlbtNO2vJmzsEI,11352 +sympy/sets/tests/test_contains.py,sha256=SYiiiedUpAevS0I2gBQ8JEWrhRBmGsvOAxjGLPRe_gg,1559 +sympy/sets/tests/test_fancysets.py,sha256=GsRbQGZK_KAGp9aIBs6TLWlLzDNJvzkrzjzdUFMhRb8,51685 +sympy/sets/tests/test_ordinals.py,sha256=L4DYc6ByQMDwJGFzJC3YhfSrVk5auW7pf4QYpJ5xY7w,2637 +sympy/sets/tests/test_powerset.py,sha256=nFvDGlhAf0wG-pZnPkgJjfwDHrTwdro3MYIinwyxn94,4805 +sympy/sets/tests/test_setexpr.py,sha256=E--SjYVzrmau0EbD8g4NTqp6aLD8qHzIuI7sAfuWxpY,14797 +sympy/sets/tests/test_sets.py,sha256=9Upkysel9pewUn77Rowv0Ct8jKduZgW2lutpGKBnQj4,66659 +sympy/simplify/__init__.py,sha256=MH1vkwHq0J5tNm7ss8V6v-mjrDGUXwfOsariIwfi38c,1274 +sympy/simplify/__pycache__/__init__.cpython-310.pyc,, +sympy/simplify/__pycache__/combsimp.cpython-310.pyc,, +sympy/simplify/__pycache__/cse_main.cpython-310.pyc,, +sympy/simplify/__pycache__/cse_opts.cpython-310.pyc,, +sympy/simplify/__pycache__/epathtools.cpython-310.pyc,, +sympy/simplify/__pycache__/fu.cpython-310.pyc,, +sympy/simplify/__pycache__/gammasimp.cpython-310.pyc,, +sympy/simplify/__pycache__/hyperexpand.cpython-310.pyc,, +sympy/simplify/__pycache__/hyperexpand_doc.cpython-310.pyc,, +sympy/simplify/__pycache__/powsimp.cpython-310.pyc,, +sympy/simplify/__pycache__/radsimp.cpython-310.pyc,, +sympy/simplify/__pycache__/ratsimp.cpython-310.pyc,, +sympy/simplify/__pycache__/simplify.cpython-310.pyc,, +sympy/simplify/__pycache__/sqrtdenest.cpython-310.pyc,, +sympy/simplify/__pycache__/traversaltools.cpython-310.pyc,, +sympy/simplify/__pycache__/trigsimp.cpython-310.pyc,, +sympy/simplify/combsimp.py,sha256=XZOyP8qxowsXNbrtdUiinUFTUau4DZvivmd--Cw8Jnk,3605 +sympy/simplify/cse_main.py,sha256=4TJ15SSMyLa1rBp3FswVpkSmUDsu3uMxBkaUlyU9xZM,31349 +sympy/simplify/cse_opts.py,sha256=ZTCaOdOrgtifWxQmFzyngrLq9uwzByBdiSS5mE-DDoE,1618 +sympy/simplify/epathtools.py,sha256=YEeS5amYseT1nC4bHqyyemrjAE1qlhWz0ISXJk5I8Xo,10173 +sympy/simplify/fu.py,sha256=fgEyS5xWwvEUDWDkA7nco9k96NDxmjf3AHrP6Yc1zsg,61835 +sympy/simplify/gammasimp.py,sha256=n-TDIl7W_8RPSvpRTk8XiRSvYDBpzh55xxxWBpdXrfI,18609 +sympy/simplify/hyperexpand.py,sha256=TCqQwNyLflSgkGbuhVAohoXcMr1Dc9OgdXzeROC78Go,84437 +sympy/simplify/hyperexpand_doc.py,sha256=E8AD0mj8ULtelDSUkmJKJY7kYm5fVfCL4QH_DX65qEw,521 +sympy/simplify/powsimp.py,sha256=ThrrYTEIwQnd1cOfw-_p6ydRb1e2-7K5CU7dJpXTx-Y,26577 +sympy/simplify/radsimp.py,sha256=rE5fKX7Rf744zH_ybaTdytGNDPmGtEnd8oD9btuM_cU,41028 +sympy/simplify/ratsimp.py,sha256=s8K5jmxvPoYw8DVIpW0-h-brHlWi3a3Xj7DQoKJUjl8,7686 +sympy/simplify/simplify.py,sha256=VNAkKbQc_Mr4wxKTNfhOP4US4FccKMNI07Avj4axcQc,72902 +sympy/simplify/sqrtdenest.py,sha256=Ee1_NGJmWMG2fn2906PpyC79W-dZQdsSLNjkiT4gi1Q,21635 +sympy/simplify/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/simplify/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_combsimp.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_cse.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_epathtools.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_fu.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_function.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_gammasimp.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_hyperexpand.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_powsimp.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_radsimp.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_ratsimp.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_rewrite.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_simplify.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_sqrtdenest.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_trigsimp.cpython-310.pyc,, +sympy/simplify/tests/test_combsimp.py,sha256=O95WSxCvo2fDQs-UlarAcSf0_8M3PuTR76lhREDoNA8,2958 +sympy/simplify/tests/test_cse.py,sha256=pXDjx2yrL1YlT0ddzUJnZn3a1zD-Ch6I1C4TPtK9Nlk,25299 +sympy/simplify/tests/test_epathtools.py,sha256=ugsQlfuK6POiixdeit63QovsVAlG5JyCaPlPp0j35LE,3525 +sympy/simplify/tests/test_fu.py,sha256=Xqv8OyB_z3GrDUa9YdxyY98vq_XrwiMKzwMpqKx8XFQ,18651 +sympy/simplify/tests/test_function.py,sha256=gzdcSFObuDzVFJDdAgmERtZJvG38WNSmclPAdG8OaPQ,2199 +sympy/simplify/tests/test_gammasimp.py,sha256=32cPRmtG-_Mz9g02lmmn-PWDD3J_Ku6sxLxIUU7WqxE,5320 +sympy/simplify/tests/test_hyperexpand.py,sha256=tkrRq3zeOjXlH88kGiPgPHC3TTr5Y4BboC3bqDssKJc,40851 +sympy/simplify/tests/test_powsimp.py,sha256=CG5H_xSbtwZakjLzL-EEg-T9j2GOUylCU5YgLsbHm2A,14313 +sympy/simplify/tests/test_radsimp.py,sha256=7GjCVKP_nyS8s36Oxwmw6TiPRY0fG3aZP9Rd3oSksTY,18789 +sympy/simplify/tests/test_ratsimp.py,sha256=uRq7AGI957LeLOmYIXMqKkstQylK09xMYJRUflT8a-s,2210 +sympy/simplify/tests/test_rewrite.py,sha256=LZj4V6a95GJj1o3NlKRoHMk7sWGPASFlw24nsm4z43k,1127 +sympy/simplify/tests/test_simplify.py,sha256=7t9yEQCj53nrir-lItM0BSKZPgueDpul3H-Bsp-Bcu8,41565 +sympy/simplify/tests/test_sqrtdenest.py,sha256=4zRtDQVGpKRRBYSAnEF5pSM0AR_fAMumONu2Ocb3tqg,7470 +sympy/simplify/tests/test_trigsimp.py,sha256=vG5PDTDNOuFypT7H9DSMjIollPqkKdNhWv5FBj6vFnE,19949 +sympy/simplify/traversaltools.py,sha256=pn_t9Yrk_SL1X0vl-zVR6yZaxkY25D4MwTBv4ywnD1Y,409 +sympy/simplify/trigsimp.py,sha256=CasB3mOMniKbNiBDJU-SjyIFxNCKIWkgFLEsbOYlRSA,46856 +sympy/solvers/__init__.py,sha256=cqnpjbmL0YQNal_aQ-AFeCNkU1eHCpC17uaJ-Jo8COQ,2210 +sympy/solvers/__pycache__/__init__.cpython-310.pyc,, +sympy/solvers/__pycache__/bivariate.cpython-310.pyc,, +sympy/solvers/__pycache__/decompogen.cpython-310.pyc,, +sympy/solvers/__pycache__/deutils.cpython-310.pyc,, +sympy/solvers/__pycache__/inequalities.cpython-310.pyc,, +sympy/solvers/__pycache__/pde.cpython-310.pyc,, +sympy/solvers/__pycache__/polysys.cpython-310.pyc,, +sympy/solvers/__pycache__/recurr.cpython-310.pyc,, +sympy/solvers/__pycache__/solvers.cpython-310.pyc,, +sympy/solvers/__pycache__/solveset.cpython-310.pyc,, +sympy/solvers/benchmarks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/solvers/benchmarks/__pycache__/__init__.cpython-310.pyc,, +sympy/solvers/benchmarks/__pycache__/bench_solvers.cpython-310.pyc,, +sympy/solvers/benchmarks/bench_solvers.py,sha256=ZVK2TIW0XjWRDBex054ymmVlSBQw-RIBhEL1wS2ZAmU,288 +sympy/solvers/bivariate.py,sha256=yrlo0AoY_MtXHP1j0qKV4UgAhSXBBpvHHRnDJuCFsC8,17869 +sympy/solvers/decompogen.py,sha256=dWQla7hp7A4RqI2a0qRNQLWNPEuur68lD3dVTyktdBU,3757 +sympy/solvers/deutils.py,sha256=6dCIoZqX8mFz77SpT1DOM_I5yvdwU1tUMnTbA2vjYME,10309 +sympy/solvers/diophantine/__init__.py,sha256=I1p3uj3kFQv20cbsZ34K5rNCx1_pDS7JwHUCFstpBgs,128 +sympy/solvers/diophantine/__pycache__/__init__.cpython-310.pyc,, +sympy/solvers/diophantine/__pycache__/diophantine.cpython-310.pyc,, +sympy/solvers/diophantine/diophantine.py,sha256=oU1NhMmD2Eyzl_H5mMZw90-rxxU4A4MnwvrDswukk-8,120229 +sympy/solvers/diophantine/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/solvers/diophantine/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/solvers/diophantine/tests/__pycache__/test_diophantine.cpython-310.pyc,, +sympy/solvers/diophantine/tests/test_diophantine.py,sha256=mB79JLU5qe-9EM33USi8LmNLJjKrNuZ8TpPxaBz7gVw,42265 +sympy/solvers/inequalities.py,sha256=2IZlzDBYx8lWmW_7PVnIpTw6_FuYFsJLKvYna3nurA4,33098 +sympy/solvers/ode/__init__.py,sha256=I7RKwCcaoerflUm5i3ZDJgBIOnkhBjb83BCHcVcFqfM,468 +sympy/solvers/ode/__pycache__/__init__.cpython-310.pyc,, +sympy/solvers/ode/__pycache__/hypergeometric.cpython-310.pyc,, +sympy/solvers/ode/__pycache__/lie_group.cpython-310.pyc,, +sympy/solvers/ode/__pycache__/nonhomogeneous.cpython-310.pyc,, +sympy/solvers/ode/__pycache__/ode.cpython-310.pyc,, +sympy/solvers/ode/__pycache__/riccati.cpython-310.pyc,, +sympy/solvers/ode/__pycache__/single.cpython-310.pyc,, +sympy/solvers/ode/__pycache__/subscheck.cpython-310.pyc,, +sympy/solvers/ode/__pycache__/systems.cpython-310.pyc,, +sympy/solvers/ode/hypergeometric.py,sha256=kizvLgjzX1VUZ1n84uT6tlOs_8NfQBW1JZVo0fJLkdM,10048 +sympy/solvers/ode/lie_group.py,sha256=tGCy_KAMuKa4gb4JR084Qy0VKu9qU1BoYBgreDX5D9Q,39242 +sympy/solvers/ode/nonhomogeneous.py,sha256=SyQVXK3BB1gEZlcK1q5LueWvpyo-U600tdnpV_87QbE,18231 +sympy/solvers/ode/ode.py,sha256=Zt6XrqtQTEPa5a7lj-r0HJ8tZoS-lJNgt8J_3kHrqyg,145088 +sympy/solvers/ode/riccati.py,sha256=Ma2sEij9Ns3onj35F7PMOLAXsFG4NAcPjP-Qp5Spt4s,30748 +sympy/solvers/ode/single.py,sha256=UtDMHdaKSYKCOfanLiwG3tAzqov5eG51fV_5dGq_agI,109468 +sympy/solvers/ode/subscheck.py,sha256=CIPca_qTxL9z5oaD2e2NrgME0eVQgF9PabZndcVqHZM,16130 +sympy/solvers/ode/systems.py,sha256=jjhV_7GdP-kpqM8Kk3xlR1Dss5rvWCC839wguTnFLhI,71526 +sympy/solvers/ode/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/solvers/ode/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/solvers/ode/tests/__pycache__/test_lie_group.cpython-310.pyc,, +sympy/solvers/ode/tests/__pycache__/test_ode.cpython-310.pyc,, +sympy/solvers/ode/tests/__pycache__/test_riccati.cpython-310.pyc,, +sympy/solvers/ode/tests/__pycache__/test_single.cpython-310.pyc,, +sympy/solvers/ode/tests/__pycache__/test_subscheck.cpython-310.pyc,, +sympy/solvers/ode/tests/__pycache__/test_systems.cpython-310.pyc,, +sympy/solvers/ode/tests/test_lie_group.py,sha256=vg1yy_-a5x1Xm2IcVkEi5cD2uA5wE5gjqpfBwkV1vZc,5319 +sympy/solvers/ode/tests/test_ode.py,sha256=WsDeiS1cxO4NCDNJa99NMAqysPsOrKTQ0c6aY_u2vjc,48311 +sympy/solvers/ode/tests/test_riccati.py,sha256=-2C79UTh6WGwT8GjQ_YwdzlBrQU45f-NT7y0s1vdo8c,29352 +sympy/solvers/ode/tests/test_single.py,sha256=RV6Dl3MjY1dOQwNZk7hveZUzz8Gft6plRuIr7FmG58c,99983 +sympy/solvers/ode/tests/test_subscheck.py,sha256=Gzwc9h9n6zlNOhJ8Qh6fQDeB8ghaRmgv3ktBAfPJx-U,12468 +sympy/solvers/ode/tests/test_systems.py,sha256=Lkq84sR3pSw75d_pTAkm2_0gY45pCTKWmKmrO2zbov8,129359 +sympy/solvers/pde.py,sha256=FRFnEbD7ZJOcy8-q1LZ5NvYRt4Fu4Avf5Xe6Xk6pWoo,35659 +sympy/solvers/polysys.py,sha256=SQw-W8d5VHBfF81EYVFbcSSVUrsIHG9a9YzbkUaKIqc,13202 +sympy/solvers/recurr.py,sha256=DyssZuOyemoC6J1cWq635O7zkg1WLHrR7KGoM-gNy0g,25389 +sympy/solvers/solvers.py,sha256=bVtrpSn5jmko1ik6_JXD2rYW5ZRNKnboT0OiBDRbFRw,136170 +sympy/solvers/solveset.py,sha256=KySAjWzQfiEnVpXRHSCGh8Gq2ObJWOZf7OMmssZR5qU,141021 +sympy/solvers/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/solvers/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/solvers/tests/__pycache__/test_constantsimp.cpython-310.pyc,, +sympy/solvers/tests/__pycache__/test_decompogen.cpython-310.pyc,, +sympy/solvers/tests/__pycache__/test_inequalities.cpython-310.pyc,, +sympy/solvers/tests/__pycache__/test_numeric.cpython-310.pyc,, +sympy/solvers/tests/__pycache__/test_pde.cpython-310.pyc,, +sympy/solvers/tests/__pycache__/test_polysys.cpython-310.pyc,, +sympy/solvers/tests/__pycache__/test_recurr.cpython-310.pyc,, +sympy/solvers/tests/__pycache__/test_solvers.cpython-310.pyc,, +sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc,, +sympy/solvers/tests/test_constantsimp.py,sha256=9Feugsg9jD2BwQiG4EFpb9fORyst6JdBmZqq2GaOgH8,8707 +sympy/solvers/tests/test_decompogen.py,sha256=7GUsDQQZtYbZIK0p0UxsOuNEJxEt4IHeOSsem_k-k0U,2943 +sympy/solvers/tests/test_inequalities.py,sha256=MuSP5v1kFL7eH_CSqOPhl6xDd1GuwRBWcZQSCwBy6Bg,20688 +sympy/solvers/tests/test_numeric.py,sha256=EeqGECpAsHoaXulCsOEJ6zAFn5i8iDy52Uo67awFAII,4738 +sympy/solvers/tests/test_pde.py,sha256=UGP3uWjF8pKQgfPifmdfvS5URVmzSg6m2NkS7LGzmio,9257 +sympy/solvers/tests/test_polysys.py,sha256=P1Jk79CAYB85L-O3KRJKpsqvwVJgqqJ_u44NigGWsaA,6873 +sympy/solvers/tests/test_recurr.py,sha256=-OeghSg16GFN70y_RUXC6CF6VU_b7NXaKDbejtRSocg,11418 +sympy/solvers/tests/test_solvers.py,sha256=hbJtihDVJQfRngUOBSz4OtV8HIkojkg528UNGtVAmr8,104484 +sympy/solvers/tests/test_solveset.py,sha256=YXl1lfZ1xnYrk_Dt4DY1gZuY9a0A5V462TPgqNfIPXk,134515 +sympy/stats/__init__.py,sha256=aNs_difmTw7e2GIfLGaPLpS-mXlttrrB3TVFPDSdGwU,8471 +sympy/stats/__pycache__/__init__.cpython-310.pyc,, +sympy/stats/__pycache__/compound_rv.cpython-310.pyc,, +sympy/stats/__pycache__/crv.cpython-310.pyc,, +sympy/stats/__pycache__/crv_types.cpython-310.pyc,, +sympy/stats/__pycache__/drv.cpython-310.pyc,, +sympy/stats/__pycache__/drv_types.cpython-310.pyc,, +sympy/stats/__pycache__/error_prop.cpython-310.pyc,, +sympy/stats/__pycache__/frv.cpython-310.pyc,, +sympy/stats/__pycache__/frv_types.cpython-310.pyc,, +sympy/stats/__pycache__/joint_rv.cpython-310.pyc,, +sympy/stats/__pycache__/joint_rv_types.cpython-310.pyc,, +sympy/stats/__pycache__/matrix_distributions.cpython-310.pyc,, +sympy/stats/__pycache__/random_matrix.cpython-310.pyc,, +sympy/stats/__pycache__/random_matrix_models.cpython-310.pyc,, +sympy/stats/__pycache__/rv.cpython-310.pyc,, +sympy/stats/__pycache__/rv_interface.cpython-310.pyc,, +sympy/stats/__pycache__/stochastic_process.cpython-310.pyc,, +sympy/stats/__pycache__/stochastic_process_types.cpython-310.pyc,, +sympy/stats/__pycache__/symbolic_multivariate_probability.cpython-310.pyc,, +sympy/stats/__pycache__/symbolic_probability.cpython-310.pyc,, +sympy/stats/compound_rv.py,sha256=SO1KXJ0aHGbD5y9QA8o6qOHbio3ua8wyO2Rsh0Hnw48,7965 +sympy/stats/crv.py,sha256=VK7jvYiQH523ar6QvLzV_k67u0ghcCrrWlBgt3cMdaw,20979 +sympy/stats/crv_types.py,sha256=TDANQNWz_fcSq7RzyMzxEKeidlHEmzdhunmxnuGlZNk,120259 +sympy/stats/drv.py,sha256=ewxYnUlCyvaF5ceMpziiz4e6FAgknzP5cC1ZVvQ_YLE,11995 +sympy/stats/drv_types.py,sha256=q7MjAtpLjO2nFxnQOKfw_Ipf2-gYzlavbqrEcUjMQlw,19288 +sympy/stats/error_prop.py,sha256=a-H6GZEidsiP_4-iNw7nSD99AMyN6DNHsSl0IUZGIAs,3315 +sympy/stats/frv.py,sha256=C4FHAVuckxdVnXGlmT957At5xdOLVYvH76KgL44TR38,16876 +sympy/stats/frv_types.py,sha256=MP1byJwusjZKRmzsy0fMBRkzScurG2-q58puaF6TF0U,23224 +sympy/stats/joint_rv.py,sha256=DcixlO2Ml4gnwMmZk2VTegiHVq88DkLdQlOTQ57SQtc,15963 +sympy/stats/joint_rv_types.py,sha256=Yx_TL9Xx862SZo8MofErvVh-fptL9UTzalDUbnW26Lg,30633 +sympy/stats/matrix_distributions.py,sha256=3OricwEMM_NU8b2lJxoiSTml7kvqrNQ6IUIn9Xy_DsY,21953 +sympy/stats/random_matrix.py,sha256=NmzLC5JMDWI2TvH8tY6go8lYyHmqcZ-B7sSIO7z7oAk,1028 +sympy/stats/random_matrix_models.py,sha256=7i5XAUYxt-ekmP5KDMaytUlmCvxglEspoWbswSf82tE,15328 +sympy/stats/rv.py,sha256=r8G52PBmkfrVJtHUWEw1dPiBSrwTYagRdyzAweftjqk,54464 +sympy/stats/rv_interface.py,sha256=8KeUP2YG_1g4OYPrwSdZyq4R0mOO52qqBX-D225WbUg,13939 +sympy/stats/sampling/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/stats/sampling/__pycache__/__init__.cpython-310.pyc,, +sympy/stats/sampling/__pycache__/sample_numpy.cpython-310.pyc,, +sympy/stats/sampling/__pycache__/sample_pymc.cpython-310.pyc,, +sympy/stats/sampling/__pycache__/sample_scipy.cpython-310.pyc,, +sympy/stats/sampling/sample_numpy.py,sha256=B4ZC7ZBrSD6ICQT468rOy-xrOgQDuecsHa0zJesAeYE,4229 +sympy/stats/sampling/sample_pymc.py,sha256=9g-n04aXSFc6F7FJ5zTYtHHL6W8-26g1nrgtamJc3Hw,2995 +sympy/stats/sampling/sample_scipy.py,sha256=ysqpDy8bp1RMH0g5FFgMmp2SQuXGFkcSH7JDZEpiZ8w,6329 +sympy/stats/sampling/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/stats/sampling/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/stats/sampling/tests/__pycache__/test_sample_continuous_rv.cpython-310.pyc,, +sympy/stats/sampling/tests/__pycache__/test_sample_discrete_rv.cpython-310.pyc,, +sympy/stats/sampling/tests/__pycache__/test_sample_finite_rv.cpython-310.pyc,, +sympy/stats/sampling/tests/test_sample_continuous_rv.py,sha256=Gh8hFN1hFFsthEv9wP2ZdgghQfaEnE8n7HlmyXXhN1E,5708 +sympy/stats/sampling/tests/test_sample_discrete_rv.py,sha256=jd2qnr4ABqpFcJrGcUpnTsN1z1d1prVvwUkG965oFeA,3319 +sympy/stats/sampling/tests/test_sample_finite_rv.py,sha256=dWwrFePw8eX2rBheAXi1AVxr_gqBD63VZKfW81hNoQc,3061 +sympy/stats/stochastic_process.py,sha256=pDz0rbKXTiaNmMmmz70dP3F_KWL_XhoCKFHYBNt1QeU,2312 +sympy/stats/stochastic_process_types.py,sha256=S2y3qCs7AO1EkQltN_OYkB4PsamQqcIjcPu_181wFqY,88608 +sympy/stats/symbolic_multivariate_probability.py,sha256=4wwyTYywD3TQ43Isv5KDtg-7jCyF-SW5xR5JeeqEfFM,10446 +sympy/stats/symbolic_probability.py,sha256=m0-p5hTGU2Ey7uBQrB7LSPgTvS0C8Fr-SA9d2BAX6Mk,23019 +sympy/stats/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/stats/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_compound_rv.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_continuous_rv.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_discrete_rv.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_error_prop.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_finite_rv.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_joint_rv.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_matrix_distributions.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_mix.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_random_matrix.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_rv.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_stochastic_process.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_symbolic_multivariate.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_symbolic_probability.cpython-310.pyc,, +sympy/stats/tests/test_compound_rv.py,sha256=2927chbHTThA34Ki-ji319QT7ajQ1ueC640Mga-18ZA,6263 +sympy/stats/tests/test_continuous_rv.py,sha256=j3SFC2-4a6X2JObL3JU8znQkRXOGxz2a9XPlGPoBku0,55665 +sympy/stats/tests/test_discrete_rv.py,sha256=kr3MjfI02cPvQrQISwmsIDEEh2gpMnzZsjMd5TOhAl0,10676 +sympy/stats/tests/test_error_prop.py,sha256=xKAkw3F5XJ72xiDREI7PkyReWNVW_89CD_mjOY_diDY,1933 +sympy/stats/tests/test_finite_rv.py,sha256=JHYgY4snFF5t9qcnQfKaN5zaGsO7_SuNR7Tq234W4No,20413 +sympy/stats/tests/test_joint_rv.py,sha256=W28rCRYczv5Jax7k-bj7OveT-y-AP4q-kRR0-LNaWX0,18653 +sympy/stats/tests/test_matrix_distributions.py,sha256=9daJUiSGaLq34TeZfB-xPqC8xz6vECGrm0DdBZaQPyY,8857 +sympy/stats/tests/test_mix.py,sha256=Cplnw06Ki96Y_4fx6Bu7lUXjxoIfX7tNJasm9SOz5wQ,3991 +sympy/stats/tests/test_random_matrix.py,sha256=CiD1hV25MGHwTfHGaoaehGD3iJ4lqNYi-ZiwReO6CVk,5842 +sympy/stats/tests/test_rv.py,sha256=Bp7UwffIMO7oc8UnFV11yYGcXUjSa0NhsuOgQaNRMt8,12959 +sympy/stats/tests/test_stochastic_process.py,sha256=ufbFxlJ6El6YH7JDztMlrOjXKzrOvEyLGK30j1_lNjw,39335 +sympy/stats/tests/test_symbolic_multivariate.py,sha256=0qXWQUjBU6N5yiNO09B3QB8RfAiLBSCJ0R5n0Eo2-lQ,5576 +sympy/stats/tests/test_symbolic_probability.py,sha256=k5trScMiwSgl9dzJt30BV-t0KuYcyD-s9HtT2-hVhQ0,9398 +sympy/strategies/__init__.py,sha256=XaTAPqDoi6527juvR8LLN1mv6ZcslDrGloTTBMjJzxA,1402 +sympy/strategies/__pycache__/__init__.cpython-310.pyc,, +sympy/strategies/__pycache__/core.cpython-310.pyc,, +sympy/strategies/__pycache__/rl.cpython-310.pyc,, +sympy/strategies/__pycache__/tools.cpython-310.pyc,, +sympy/strategies/__pycache__/traverse.cpython-310.pyc,, +sympy/strategies/__pycache__/tree.cpython-310.pyc,, +sympy/strategies/__pycache__/util.cpython-310.pyc,, +sympy/strategies/branch/__init__.py,sha256=xxbMwR2LzLcQWsH9ss8ddE99VHFJTY-cYiR6xhO3tj0,356 +sympy/strategies/branch/__pycache__/__init__.cpython-310.pyc,, +sympy/strategies/branch/__pycache__/core.cpython-310.pyc,, +sympy/strategies/branch/__pycache__/tools.cpython-310.pyc,, +sympy/strategies/branch/__pycache__/traverse.cpython-310.pyc,, +sympy/strategies/branch/core.py,sha256=QiXSa7uhvmUBTLyUwBQHrYkWlOceKh5p4kVD90VnCKM,2759 +sympy/strategies/branch/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/strategies/branch/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/strategies/branch/tests/__pycache__/test_core.cpython-310.pyc,, +sympy/strategies/branch/tests/__pycache__/test_tools.cpython-310.pyc,, +sympy/strategies/branch/tests/__pycache__/test_traverse.cpython-310.pyc,, +sympy/strategies/branch/tests/test_core.py,sha256=23KQWJxC_2T1arwMAkt9pY1ZtG59avlxTZcVTn81UPI,2246 +sympy/strategies/branch/tests/test_tools.py,sha256=4BDkqVqrTlsivQ0PldQr6PjVZsAikc39tSxGAQA3ir8,942 +sympy/strategies/branch/tests/test_traverse.py,sha256=6rikMnZdamSzww1sSiM-aQwqa4lQrpM-DpOU9XCbiOQ,1322 +sympy/strategies/branch/tools.py,sha256=tvv3IjmQGNYbo-slCbbDf_rylZd537wvLcpdBtT-bbY,357 +sympy/strategies/branch/traverse.py,sha256=7iBViQdNpKu-AHoFED7_C9KBSyYcQBfLGopEJQbNtvk,799 +sympy/strategies/core.py,sha256=nsH6LZgyc_aslv4Na5XvJMEizC6uSzscRlVW91k1pu4,3956 +sympy/strategies/rl.py,sha256=I2puD2khbCmO3e9_ngUnclLgk1c-xBHeUf-bZu5haLM,4403 +sympy/strategies/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/strategies/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/strategies/tests/__pycache__/test_core.cpython-310.pyc,, +sympy/strategies/tests/__pycache__/test_rl.cpython-310.pyc,, +sympy/strategies/tests/__pycache__/test_tools.cpython-310.pyc,, +sympy/strategies/tests/__pycache__/test_traverse.cpython-310.pyc,, +sympy/strategies/tests/__pycache__/test_tree.cpython-310.pyc,, +sympy/strategies/tests/test_core.py,sha256=42XHlv1hN1S1QPEf2r9pddZ2EQL6o4FEPQvfo-UmXcw,2152 +sympy/strategies/tests/test_rl.py,sha256=wm0L6pdvddBgRcwhpiSk-nCgyzVGickfnOCkmHWS0j4,1949 +sympy/strategies/tests/test_tools.py,sha256=UdMojFIn3f1b2x2iRGv1Wfnwdso-Kl57GTyjCU_DjzQ,875 +sympy/strategies/tests/test_traverse.py,sha256=jWuZhYEt-F18_rxEMhn6OgGQ1GNs-dM_GFZ2F5nHs2I,2082 +sympy/strategies/tests/test_tree.py,sha256=9NL948rt6i9tYU6CQz9VNxE6l1begQs-MxP2euzE3Sc,2400 +sympy/strategies/tools.py,sha256=ERASzEP2SP-EcJ8p-4XyREYB15q3t81x1cyamJ-M880,1368 +sympy/strategies/traverse.py,sha256=DhPnBJ5Rw_xzhGiBtSciTyV-H2zhlxgjYVjrNH-gLyk,1183 +sympy/strategies/tree.py,sha256=ggnP9l3NIpJsssBMVKr4-yM_m8uCkrkm191ZC6MfZjc,3770 +sympy/strategies/util.py,sha256=2fbR813IY4IYco5mBoGJLu5z88OhXmwuIxgOO9IvZO4,361 +sympy/tensor/__init__.py,sha256=VMNXCRSayigQT6a3cvf5M_M-wdV-KSil_JbAmHcuUQc,870 +sympy/tensor/__pycache__/__init__.cpython-310.pyc,, +sympy/tensor/__pycache__/functions.cpython-310.pyc,, +sympy/tensor/__pycache__/index_methods.cpython-310.pyc,, +sympy/tensor/__pycache__/indexed.cpython-310.pyc,, +sympy/tensor/__pycache__/tensor.cpython-310.pyc,, +sympy/tensor/__pycache__/toperators.cpython-310.pyc,, +sympy/tensor/array/__init__.py,sha256=lTT1EwV5tb3WAvmmS_mIjhCSWSLiB0NNPW4n9_3fu0k,8244 +sympy/tensor/array/__pycache__/__init__.cpython-310.pyc,, +sympy/tensor/array/__pycache__/array_comprehension.cpython-310.pyc,, +sympy/tensor/array/__pycache__/array_derivatives.cpython-310.pyc,, +sympy/tensor/array/__pycache__/arrayop.cpython-310.pyc,, +sympy/tensor/array/__pycache__/dense_ndim_array.cpython-310.pyc,, +sympy/tensor/array/__pycache__/mutable_ndim_array.cpython-310.pyc,, +sympy/tensor/array/__pycache__/ndim_array.cpython-310.pyc,, +sympy/tensor/array/__pycache__/sparse_ndim_array.cpython-310.pyc,, +sympy/tensor/array/array_comprehension.py,sha256=01PTIbkAGaq0CDcaI_2KsaMnYm1nxQ8sFAiHHcc__gw,12262 +sympy/tensor/array/array_derivatives.py,sha256=BWQC43h2WieqJgaCqhLV39BXN22Gb6zcy_BXerdVixA,4811 +sympy/tensor/array/arrayop.py,sha256=UYKdKQZgDsXtDopymWS8QM7FZcxR1O0D_cbt-Kjx7yM,18395 +sympy/tensor/array/dense_ndim_array.py,sha256=Ie8qVMJyp2Tsq7aVhmZpPX8X-KTlF9uaxkQfTzCZ9z8,6433 +sympy/tensor/array/expressions/__init__.py,sha256=OUMJjZY7HtWJL0ygqkdWC8LdCqibJZhHCfYeXu-eB4E,7045 +sympy/tensor/array/expressions/__pycache__/__init__.cpython-310.pyc,, +sympy/tensor/array/expressions/__pycache__/array_expressions.cpython-310.pyc,, +sympy/tensor/array/expressions/__pycache__/arrayexpr_derivatives.cpython-310.pyc,, +sympy/tensor/array/expressions/__pycache__/conv_array_to_indexed.cpython-310.pyc,, +sympy/tensor/array/expressions/__pycache__/conv_array_to_matrix.cpython-310.pyc,, +sympy/tensor/array/expressions/__pycache__/conv_indexed_to_array.cpython-310.pyc,, +sympy/tensor/array/expressions/__pycache__/conv_matrix_to_array.cpython-310.pyc,, +sympy/tensor/array/expressions/__pycache__/from_array_to_indexed.cpython-310.pyc,, +sympy/tensor/array/expressions/__pycache__/from_array_to_matrix.cpython-310.pyc,, +sympy/tensor/array/expressions/__pycache__/from_indexed_to_array.cpython-310.pyc,, +sympy/tensor/array/expressions/__pycache__/from_matrix_to_array.cpython-310.pyc,, +sympy/tensor/array/expressions/__pycache__/utils.cpython-310.pyc,, +sympy/tensor/array/expressions/array_expressions.py,sha256=Gc0ADM3i-6sFoQTsgRHs7dRpmdH0XYVj8z9iS80vEoQ,77022 +sympy/tensor/array/expressions/arrayexpr_derivatives.py,sha256=W9-bY2LL83lLSNHXItzqjOgvf-HIDbUXPoVw8uOymcg,6249 +sympy/tensor/array/expressions/conv_array_to_indexed.py,sha256=BIwlQr7RKC8bZN3mR8ICC5TYOC9uasYcV0Zc1VNKmiE,445 +sympy/tensor/array/expressions/conv_array_to_matrix.py,sha256=85YZBTZI4o9dJtKDJXXug_lJVLG8dT_22AT7l7DKoyE,416 +sympy/tensor/array/expressions/conv_indexed_to_array.py,sha256=EyW52TplBxIx25mUDvI_5Tzc8LD6Mnp6XNW9wIw9pH4,254 +sympy/tensor/array/expressions/conv_matrix_to_array.py,sha256=XYyqt0NsQSrgNpEkr8xTGeUhR7ZYeNljVFfVEF1K7vA,250 +sympy/tensor/array/expressions/from_array_to_indexed.py,sha256=3YIcsAzWVWQRJYQS90uPvSl2dM7ZqLV_qt7E9-uYU28,3936 +sympy/tensor/array/expressions/from_array_to_matrix.py,sha256=OHkMM_yOLP6C1aAIZB-lPbz4AYS9i2shhFXGFBi9_Lc,41355 +sympy/tensor/array/expressions/from_indexed_to_array.py,sha256=RUcKemmrwuK5RFRr19YSPVMCOkZfLAWlbbB56u8Wi0g,11187 +sympy/tensor/array/expressions/from_matrix_to_array.py,sha256=yIY1RupF9-FVV3jZLsqWxZ1ckoE1-HkQyM8cQIm4_Gs,3929 +sympy/tensor/array/expressions/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/tensor/array/expressions/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/tensor/array/expressions/tests/__pycache__/test_array_expressions.cpython-310.pyc,, +sympy/tensor/array/expressions/tests/__pycache__/test_arrayexpr_derivatives.cpython-310.pyc,, +sympy/tensor/array/expressions/tests/__pycache__/test_as_explicit.cpython-310.pyc,, +sympy/tensor/array/expressions/tests/__pycache__/test_convert_array_to_indexed.cpython-310.pyc,, +sympy/tensor/array/expressions/tests/__pycache__/test_convert_array_to_matrix.cpython-310.pyc,, +sympy/tensor/array/expressions/tests/__pycache__/test_convert_indexed_to_array.cpython-310.pyc,, +sympy/tensor/array/expressions/tests/__pycache__/test_convert_matrix_to_array.cpython-310.pyc,, +sympy/tensor/array/expressions/tests/__pycache__/test_deprecated_conv_modules.cpython-310.pyc,, +sympy/tensor/array/expressions/tests/test_array_expressions.py,sha256=QUAdxQ9TvBpDEAZoJpLSWwbqjmuflPe3xBRP30lFZr0,31262 +sympy/tensor/array/expressions/tests/test_arrayexpr_derivatives.py,sha256=lpC4ly6MJLDRBcVt3GcP3H6ke9bI-o3VULw0xyF5QbY,2470 +sympy/tensor/array/expressions/tests/test_as_explicit.py,sha256=nOjFKXCqYNu2O7Szc1TD1x1bsUchPRAG3nGlNGEd1Yg,2568 +sympy/tensor/array/expressions/tests/test_convert_array_to_indexed.py,sha256=6yNxGXH6BX5607FTjMkwR2t9wNVlEhV8JMSh4UIWux8,2500 +sympy/tensor/array/expressions/tests/test_convert_array_to_matrix.py,sha256=2vkSep9CPKYrQQS0u8Ayn_sc7yek1zwzjjCWK5cfYe8,29311 +sympy/tensor/array/expressions/tests/test_convert_indexed_to_array.py,sha256=RVEG_qUsXiBH9gHtWp2-9pMC4J2aLc4iUdzBFM0QyTw,8615 +sympy/tensor/array/expressions/tests/test_convert_matrix_to_array.py,sha256=G2g5E0l-FABwYyQowbKKvLcEI8NViJXaYLW3eUEcvjw,4595 +sympy/tensor/array/expressions/tests/test_deprecated_conv_modules.py,sha256=DG8IoUtxCy2acWjUHUUKu4bRsTxXbeFLFjKMLA2GdLY,1216 +sympy/tensor/array/expressions/utils.py,sha256=Rn58boHHUEoBZFtinDpruLWFBkNBwgkVQ4c9m7Nym1o,3939 +sympy/tensor/array/mutable_ndim_array.py,sha256=M0PTt8IOIcVXqQPWe2N50sm4Eq2bodRXV4Vkd08crXk,277 +sympy/tensor/array/ndim_array.py,sha256=_UYVi2vd1zI0asXN7B53e0mp2plgVT5xvB71A_L63Ao,19060 +sympy/tensor/array/sparse_ndim_array.py,sha256=4nD_Hg-JdC_1mYQTohmKFfL5M1Ugdq0fpnDUILkTtq8,6387 +sympy/tensor/array/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/tensor/array/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/tensor/array/tests/__pycache__/test_array_comprehension.cpython-310.pyc,, +sympy/tensor/array/tests/__pycache__/test_array_derivatives.cpython-310.pyc,, +sympy/tensor/array/tests/__pycache__/test_arrayop.cpython-310.pyc,, +sympy/tensor/array/tests/__pycache__/test_immutable_ndim_array.cpython-310.pyc,, +sympy/tensor/array/tests/__pycache__/test_mutable_ndim_array.cpython-310.pyc,, +sympy/tensor/array/tests/__pycache__/test_ndim_array.cpython-310.pyc,, +sympy/tensor/array/tests/__pycache__/test_ndim_array_conversions.cpython-310.pyc,, +sympy/tensor/array/tests/test_array_comprehension.py,sha256=32n8ZKV4_5DeJ0F7fM_Xo0i0mx6m9w3uWUI2a6OXhzY,4750 +sympy/tensor/array/tests/test_array_derivatives.py,sha256=3O2nD4_d1TFP75qcGJ8XD4DwfPblFzKhY6fAgNQ9KJ0,1609 +sympy/tensor/array/tests/test_arrayop.py,sha256=WahGcUnArsAo9eaMqGT7_AjKons0WgFzLOWTtNvnSEI,25844 +sympy/tensor/array/tests/test_immutable_ndim_array.py,sha256=9ji_14szn-qoL6DQ5muzIFNaXefT7n55PFigXoFwk50,15823 +sympy/tensor/array/tests/test_mutable_ndim_array.py,sha256=rFFa0o0AJYgPNnpqijl91Vb9EW2kgHGQc6cu9f1fIvY,13070 +sympy/tensor/array/tests/test_ndim_array.py,sha256=KH-9LAME3ldVIu5n7Vd_Xr36dN4frCdiF9qZdBWETu0,2232 +sympy/tensor/array/tests/test_ndim_array_conversions.py,sha256=CUGDCbCcslACy3Ngq-zoig9JnO4yHTw3IPcKy0FnRpw,648 +sympy/tensor/functions.py,sha256=3jkzxjMvHHsWchz-0wvuOSFvkNqnoG5knknPCEsZ1bk,4166 +sympy/tensor/index_methods.py,sha256=dcX9kNKLHi_XXkFHBPS-fcM-PaeYKkX80jmzxC0siiQ,15434 +sympy/tensor/indexed.py,sha256=dLic-2CMpPXItLsJCjIUrRDEio-mH2Dcu3H0NgRo3Do,24660 +sympy/tensor/tensor.py,sha256=MEUQJM7NA40rzlZTV1D5PBR_SdIf7K3bVT2ixzqkYKw,165096 +sympy/tensor/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/tensor/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/tensor/tests/__pycache__/test_functions.cpython-310.pyc,, +sympy/tensor/tests/__pycache__/test_index_methods.cpython-310.pyc,, +sympy/tensor/tests/__pycache__/test_indexed.cpython-310.pyc,, +sympy/tensor/tests/__pycache__/test_printing.cpython-310.pyc,, +sympy/tensor/tests/__pycache__/test_tensor.cpython-310.pyc,, +sympy/tensor/tests/__pycache__/test_tensor_element.cpython-310.pyc,, +sympy/tensor/tests/__pycache__/test_tensor_operators.cpython-310.pyc,, +sympy/tensor/tests/test_functions.py,sha256=rBBHjJIUA2oR83UgEJ_GIASDWfTZXDzOllmcO90XYDU,1552 +sympy/tensor/tests/test_index_methods.py,sha256=Pu951z4yYYMOXBKcNteH63hTAxmNX8702nSQH_pciFE,7112 +sympy/tensor/tests/test_indexed.py,sha256=pCvqmScU0oQxx44qm9T3MkKIXKgVFRDkSHLDhSNqOIY,16157 +sympy/tensor/tests/test_printing.py,sha256=sUx_rChNTWFKPNwVl296QXO-d4-yemDJnkEHFislsmc,424 +sympy/tensor/tests/test_tensor.py,sha256=JybH2AAbEGNob44I6vl7uiiy_VpmR4O4gKCZOfwDPWE,75044 +sympy/tensor/tests/test_tensor_element.py,sha256=1dF96FtqUGaJzethw23vJIj3H5KdxsU1Xyd4DU54EB4,908 +sympy/tensor/tests/test_tensor_operators.py,sha256=sOwu-U28098Lg0iV_9RfYxvJ8wAd5Rk6_vAivWdkc9Q,17945 +sympy/tensor/toperators.py,sha256=fniTUpdYz0OvtNnFgrHINedX86FxVcxfKj9l_l1p9Rw,8840 +sympy/testing/__init__.py,sha256=YhdM87Kfsci8340HmKrXVmA4y0z_VeUN5QQbwAOvEbg,139 +sympy/testing/__pycache__/__init__.cpython-310.pyc,, +sympy/testing/__pycache__/matrices.cpython-310.pyc,, +sympy/testing/__pycache__/pytest.cpython-310.pyc,, +sympy/testing/__pycache__/quality_unicode.cpython-310.pyc,, +sympy/testing/__pycache__/randtest.cpython-310.pyc,, +sympy/testing/__pycache__/runtests.cpython-310.pyc,, +sympy/testing/__pycache__/tmpfiles.cpython-310.pyc,, +sympy/testing/matrices.py,sha256=VWBPdjIUYNHE7fdbYcmQwQTYcIWpOP9tFn9A0rGCBmE,216 +sympy/testing/pytest.py,sha256=VsbyFXAwDHWc69AxJZBml7U_Mun6kS5NutziSH6l-RE,13142 +sympy/testing/quality_unicode.py,sha256=aJma-KtrKgusUL1jz5IADz7q6vc70rsfbT9NtxJDeV4,3318 +sympy/testing/randtest.py,sha256=IKDFAm8b72Z1OkT7vpgnZjaW5LsSU_wf6g35sCkq9I0,562 +sympy/testing/runtests.py,sha256=QbirfrvKseYmrM2kLjHHhNGNgO6DsHJS1ncuH5PnPT4,88921 +sympy/testing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/testing/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/testing/tests/__pycache__/diagnose_imports.cpython-310.pyc,, +sympy/testing/tests/__pycache__/test_code_quality.cpython-310.pyc,, +sympy/testing/tests/__pycache__/test_deprecated.cpython-310.pyc,, +sympy/testing/tests/__pycache__/test_module_imports.cpython-310.pyc,, +sympy/testing/tests/__pycache__/test_pytest.cpython-310.pyc,, +sympy/testing/tests/diagnose_imports.py,sha256=ZtSLMYNT1-RUvPlCUpYzj97aE3NafvGgp0UzRXOPd0Q,9694 +sympy/testing/tests/test_code_quality.py,sha256=JTVznHG1HKBmy3Or4_gFjBlAi0L1BJ2wjgZLUu5zBa0,19237 +sympy/testing/tests/test_deprecated.py,sha256=wQZHs4wDNuK4flaKKLsJW6XRMtrVjMv_5rUP3WspgPA,183 +sympy/testing/tests/test_module_imports.py,sha256=5w6F6JW6K7lgpbB4X9Tj0Vw8AcNVlfaSuvbwKXJKD6c,1459 +sympy/testing/tests/test_pytest.py,sha256=iKO10Tvua1Xem6a22IWH4SDrpFfr-bM-rXx039Ua7YA,6778 +sympy/testing/tmpfiles.py,sha256=bF8ktKC9lDhS65gahB9hOewsZ378UkhLgq3QHiqWYXU,1042 +sympy/this.py,sha256=XfOkN5EIM2RuDxSm_q6k_R_WtkIoSy6PXWKp3aAXvoc,550 +sympy/unify/__init__.py,sha256=Upa9h7SSr9W1PXo0WkNESsGsMZ85rcWkeruBtkAi3Fg,293 +sympy/unify/__pycache__/__init__.cpython-310.pyc,, +sympy/unify/__pycache__/core.cpython-310.pyc,, +sympy/unify/__pycache__/rewrite.cpython-310.pyc,, +sympy/unify/__pycache__/usympy.cpython-310.pyc,, +sympy/unify/core.py,sha256=-BCNPPMdfZuhhIWqyn9pYJoO8yFPGDX78Hn2551ABuE,7037 +sympy/unify/rewrite.py,sha256=Emr8Uoum3gxKpMDqFHJIjx3xChArUIN6XIy6NPfCS8I,1798 +sympy/unify/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/unify/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/unify/tests/__pycache__/test_rewrite.cpython-310.pyc,, +sympy/unify/tests/__pycache__/test_sympy.cpython-310.pyc,, +sympy/unify/tests/__pycache__/test_unify.cpython-310.pyc,, +sympy/unify/tests/test_rewrite.py,sha256=BgA8zmdz9Nw-Xbu4-w3UABeWypqLvmy9VzL744EmYtE,2002 +sympy/unify/tests/test_sympy.py,sha256=UCItZJNAx9dG5F7O27pyXUF1-e6aOwkZ-cVdB6SZFZc,5922 +sympy/unify/tests/test_unify.py,sha256=4TlgchV6NWuBekJx9RGlMjx3-UwonzgIYXDytb7sBRU,3029 +sympy/unify/usympy.py,sha256=6Kxx96FXSdqXimLseVK_FkYwy2vqWhNnxMVPMRShvy4,3964 +sympy/utilities/__init__.py,sha256=nbQhzII8dw5zd4hQJ2SUyriK5dOrqf-bbjy10XKQXPw,840 +sympy/utilities/__pycache__/__init__.cpython-310.pyc,, +sympy/utilities/__pycache__/autowrap.cpython-310.pyc,, +sympy/utilities/__pycache__/codegen.cpython-310.pyc,, +sympy/utilities/__pycache__/decorator.cpython-310.pyc,, +sympy/utilities/__pycache__/enumerative.cpython-310.pyc,, +sympy/utilities/__pycache__/exceptions.cpython-310.pyc,, +sympy/utilities/__pycache__/iterables.cpython-310.pyc,, +sympy/utilities/__pycache__/lambdify.cpython-310.pyc,, +sympy/utilities/__pycache__/magic.cpython-310.pyc,, +sympy/utilities/__pycache__/matchpy_connector.cpython-310.pyc,, +sympy/utilities/__pycache__/memoization.cpython-310.pyc,, +sympy/utilities/__pycache__/misc.cpython-310.pyc,, +sympy/utilities/__pycache__/pkgdata.cpython-310.pyc,, +sympy/utilities/__pycache__/pytest.cpython-310.pyc,, +sympy/utilities/__pycache__/randtest.cpython-310.pyc,, +sympy/utilities/__pycache__/runtests.cpython-310.pyc,, +sympy/utilities/__pycache__/source.cpython-310.pyc,, +sympy/utilities/__pycache__/timeutils.cpython-310.pyc,, +sympy/utilities/__pycache__/tmpfiles.cpython-310.pyc,, +sympy/utilities/_compilation/__init__.py,sha256=uYUDPbwrMTbGEMVuago32EN_ix8fsi5M0SvcLOtwMOk,751 +sympy/utilities/_compilation/__pycache__/__init__.cpython-310.pyc,, +sympy/utilities/_compilation/__pycache__/availability.cpython-310.pyc,, +sympy/utilities/_compilation/__pycache__/compilation.cpython-310.pyc,, +sympy/utilities/_compilation/__pycache__/runners.cpython-310.pyc,, +sympy/utilities/_compilation/__pycache__/util.cpython-310.pyc,, +sympy/utilities/_compilation/availability.py,sha256=ybxp3mboH5772JHTWKBN1D-cs6QxATQiaL4zJVV4RE0,2884 +sympy/utilities/_compilation/compilation.py,sha256=t6UrVUHDrk7im_mYXx8s7ZkyUEkllhx38u7AAk5Z1P8,21675 +sympy/utilities/_compilation/runners.py,sha256=mb8_rvyx68qekMx8yZZyBH5G7bX94QG6W3lJ17rBmGU,8974 +sympy/utilities/_compilation/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/utilities/_compilation/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/utilities/_compilation/tests/__pycache__/test_compilation.cpython-310.pyc,, +sympy/utilities/_compilation/tests/test_compilation.py,sha256=MORW8RsdmQTgFpYR7PLRQ35gxFYup3ejQu0byiIxmig,1735 +sympy/utilities/_compilation/util.py,sha256=3ZVUy732fHXFm6oK2EE13M-tztpG5G5vy4FcJ-V3SwY,7928 +sympy/utilities/autowrap.py,sha256=MNoV81PCxJvlk9_aG87jUpWkGhn03WCCk0SPG54nRoc,41123 +sympy/utilities/codegen.py,sha256=WbFTgzQPlCf-0O-gk8X-r9pxMnz4j8roObFsCThVl4Q,81495 +sympy/utilities/decorator.py,sha256=RTwHzeF1N9WMe6apBkYM2vaJcDoP683Ze548S3T_NN8,10925 +sympy/utilities/enumerative.py,sha256=pYpty2YDgvF5LBrmiAVyiqpiqhfFeYTfQfS7sTQMNks,43621 +sympy/utilities/exceptions.py,sha256=g9fgLCjrkuYk-ImX_V42ve2XIayK01mWmlXKOIVmW_8,10571 +sympy/utilities/iterables.py,sha256=VpGyggsMbqd2CL2TRSX1Iozp1G4VMIPNS7FMME-hPAw,90920 +sympy/utilities/lambdify.py,sha256=2DLVtqwhws_PAPVzxS5nh7YVfICAdGKxYGVNQ9p9mrg,55149 +sympy/utilities/magic.py,sha256=ofrwi1-xwMWb4VCQOEIwe4J1QAwxOscigDq26uSn3iY,400 +sympy/utilities/matchpy_connector.py,sha256=045re8zEDdr70Ey39OWRq0xnM6OsKBISiu9SB4nJ90g,10068 +sympy/utilities/mathml/__init__.py,sha256=3AG_eTJ4I7071riTqesIi1A3bykCeIUES2CTEYxfrPI,2299 +sympy/utilities/mathml/__pycache__/__init__.cpython-310.pyc,, +sympy/utilities/mathml/data/mmlctop.xsl,sha256=fi3CTNyg-mSscOGYBXLJv8veE_ItR_YTFMJ4jmjp6aE,114444 +sympy/utilities/mathml/data/mmltex.xsl,sha256=haX7emZOfD6_nbn5BjK93F-C85mSS8KogAbIBsW1aBA,137304 +sympy/utilities/mathml/data/simple_mmlctop.xsl,sha256=lhL-HXG_FfsJZhjeHbD7Ou8RnUaStI0-5VFcggsogjA,114432 +sympy/utilities/memoization.py,sha256=ZGOUUmwJCNRhHVZjTF4j65WjQ6VUoCeC1E8DkjryU00,1429 +sympy/utilities/misc.py,sha256=7N6LNt5N9eR2AK-_jmdOXXKhyhbW4kLRY8O5wYw3VgI,16007 +sympy/utilities/pkgdata.py,sha256=jt-hKL0xhxnDJDI9C2IXtH_QgYYtfq9fX9kJ3E7iang,1788 +sympy/utilities/pytest.py,sha256=F9TGNtoNvQUdlt5HYU084ITNmc7__7MBCSLLulBlM_Y,435 +sympy/utilities/randtest.py,sha256=aYUX_mgmQyfRdMjEOWaHM506CZ6WUK0eFuew0vFTwRs,430 +sympy/utilities/runtests.py,sha256=hYnDNiFNnDjQcXG04_3lzPFbUz6i0AUZ2rZ_RECVoDo,446 +sympy/utilities/source.py,sha256=ShIXRNtplSEfZNi5VDYD3yi6305eRz4TmchEOEvcicw,1127 +sympy/utilities/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/utilities/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_autowrap.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_codegen.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_codegen_julia.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_codegen_octave.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_codegen_rust.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_decorator.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_deprecated.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_enumerative.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_exceptions.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_iterables.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_lambdify.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_matchpy_connector.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_mathml.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_misc.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_pickling.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_source.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_timeutils.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_wester.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_xxe.cpython-310.pyc,, +sympy/utilities/tests/test_autowrap.py,sha256=NW20YQiJgEofZ0xr4Ggocix4fAsBmnyankmbxPf54Fk,14603 +sympy/utilities/tests/test_codegen.py,sha256=PLuSicBhnspClTiSeKCJgKd1NyU0qBkDRvQMrwm_gLc,55496 +sympy/utilities/tests/test_codegen_julia.py,sha256=kb3soJ1L7lTfZkYJKytfY_aKoHt6fkNjWhYblebzThw,18543 +sympy/utilities/tests/test_codegen_octave.py,sha256=_yd9uGKHZzwUFpderSa9E2cYqt8JMcEtBuN6U7_7bJ0,17833 +sympy/utilities/tests/test_codegen_rust.py,sha256=wJh6YmDfq8haGjJDniDaVUsDIKEj3rT_OB4r6uLI77Y,12323 +sympy/utilities/tests/test_decorator.py,sha256=VYUvzUrVI7I7MK0YZxLLEmEu4pV5dqaB1CLEJ8Ocav4,3705 +sympy/utilities/tests/test_deprecated.py,sha256=LRrZ2UxuXnK6Jwxl8vT0EdLT-q-7jLkTC69U9JjuYYU,489 +sympy/utilities/tests/test_enumerative.py,sha256=aUw6nbSzBp8h_pk35YZ_uzRncRoLYStblodeiDRFk6I,6089 +sympy/utilities/tests/test_exceptions.py,sha256=OKRa2yuHMtnVcnisu-xcaedi2RKsH9QrgU9exgoOK30,716 +sympy/utilities/tests/test_iterables.py,sha256=fPlgquV8GaZEIAjCwxE5DnXjGJUQlt6PGR7yj-gBLJ8,34905 +sympy/utilities/tests/test_lambdify.py,sha256=COnloXr7-MetPh-YonB1h6sEy5UkzBYWTdNuEGuduew,59594 +sympy/utilities/tests/test_matchpy_connector.py,sha256=dUfDfIdofKYufww29jV8mVQmglU1AnG2uEyREpNY7V0,4506 +sympy/utilities/tests/test_mathml.py,sha256=-6z1MRYEH4eYQi2_wt8zmdjwtt5Cn483zqsvD-o_r70,836 +sympy/utilities/tests/test_misc.py,sha256=TxjUNCosyCR5w1iJ6o77yKB4WBLyirVhOaALGYdkN9k,4726 +sympy/utilities/tests/test_pickling.py,sha256=JxsZSIVrXrscDwZ0Bvx4DkyLSEIyXUzoO96qrOx-5tU,23301 +sympy/utilities/tests/test_source.py,sha256=ObjrJxZFVhLgXjVmFHUy7bti9UPPgOh5Cptw8lHW9mM,289 +sympy/utilities/tests/test_timeutils.py,sha256=sCRC6BCSho1e9n4clke3QXHx4a3qYLru-bddS_sEmFA,337 +sympy/utilities/tests/test_wester.py,sha256=6_o3Dm4fT3R-TZEinuel2VFdZth0BOgPTPFYSEIcDX0,94546 +sympy/utilities/tests/test_xxe.py,sha256=xk1j0Dd96wsGYKRNDzXTW0hTQejGCfiZcEhYcYiqojg,66 +sympy/utilities/timeutils.py,sha256=DUtQYONkJnWjU2FvAbvxuRMkGmXpLMeaiOcH7R9Os9o,1968 +sympy/utilities/tmpfiles.py,sha256=yOjbs90sEtVc00YZyveyblT8zkwj4o70_RmuEKdKq_s,445 +sympy/vector/__init__.py,sha256=8a4cSQ1sJ5uirdMoHnV7SWXU3zJPKt_0ojona8C-p1Y,1909 +sympy/vector/__pycache__/__init__.cpython-310.pyc,, +sympy/vector/__pycache__/basisdependent.cpython-310.pyc,, +sympy/vector/__pycache__/coordsysrect.cpython-310.pyc,, +sympy/vector/__pycache__/deloperator.cpython-310.pyc,, +sympy/vector/__pycache__/dyadic.cpython-310.pyc,, +sympy/vector/__pycache__/functions.cpython-310.pyc,, +sympy/vector/__pycache__/implicitregion.cpython-310.pyc,, +sympy/vector/__pycache__/integrals.cpython-310.pyc,, +sympy/vector/__pycache__/operators.cpython-310.pyc,, +sympy/vector/__pycache__/orienters.cpython-310.pyc,, +sympy/vector/__pycache__/parametricregion.cpython-310.pyc,, +sympy/vector/__pycache__/point.cpython-310.pyc,, +sympy/vector/__pycache__/scalar.cpython-310.pyc,, +sympy/vector/__pycache__/vector.cpython-310.pyc,, +sympy/vector/basisdependent.py,sha256=BTTlFGRnZIvpvK_WEK4Tk_WZXEXYGosx9fWTuMO4M0o,11553 +sympy/vector/coordsysrect.py,sha256=1JV4GBgG99JKIWo2snYMMgIJCdob3XcwYqq9s8d6fA8,36859 +sympy/vector/deloperator.py,sha256=4BJNjmI342HkVRmeQkqauqvibKsf2HOuzknQTfQMkpg,3191 +sympy/vector/dyadic.py,sha256=IOyrgONyGDHPtG0RINcMgetAVMSOmYI5a99s-OwXBTA,8571 +sympy/vector/functions.py,sha256=auLfE1Su2kLtkRvlB_7Wol8O0_sqei1hojun3pkDRYI,15552 +sympy/vector/implicitregion.py,sha256=WrCIFuh_KZ6iEA7FZzYanZoUQuJ4gNBP3NeNKMxC0l0,16155 +sympy/vector/integrals.py,sha256=x8DrvKXPznE05JgnZ7I3IWLWrvFl9SEghGaFmHrBaE4,6837 +sympy/vector/operators.py,sha256=mI6d0eIxVcoDeH5PrhtPTzhxX_RXByX_4hjXeBTeq88,9521 +sympy/vector/orienters.py,sha256=EtWNWfOvAuy_wipam9SA7_muKSrsP-43UPRCCz56sb0,11798 +sympy/vector/parametricregion.py,sha256=3YyY0fkFNelR6ldi8XYRWpkFEvqY5-rFg_vT3NFute0,5932 +sympy/vector/point.py,sha256=ozYlInnlsmIpKBEr5Ui331T1lnAB5zS2_pHYh9k_eMs,4516 +sympy/vector/scalar.py,sha256=Z2f2wiK7BS73ctYTyNvn3gB74mXZuENpScLi_M1SpYg,1962 +sympy/vector/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/vector/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/vector/tests/__pycache__/test_coordsysrect.cpython-310.pyc,, +sympy/vector/tests/__pycache__/test_dyadic.cpython-310.pyc,, +sympy/vector/tests/__pycache__/test_field_functions.cpython-310.pyc,, +sympy/vector/tests/__pycache__/test_functions.cpython-310.pyc,, +sympy/vector/tests/__pycache__/test_implicitregion.cpython-310.pyc,, +sympy/vector/tests/__pycache__/test_integrals.cpython-310.pyc,, +sympy/vector/tests/__pycache__/test_operators.cpython-310.pyc,, +sympy/vector/tests/__pycache__/test_parametricregion.cpython-310.pyc,, +sympy/vector/tests/__pycache__/test_printing.cpython-310.pyc,, +sympy/vector/tests/__pycache__/test_vector.cpython-310.pyc,, +sympy/vector/tests/test_coordsysrect.py,sha256=q9n9OIG_CpD4KQN20dzwRZIXoMv7VSgp8fHmVnkZfr0,19595 +sympy/vector/tests/test_dyadic.py,sha256=f1R-BL_63VBbc0XgEX_LYzV_3OupYd4hp5RzRk6dAbI,4949 +sympy/vector/tests/test_field_functions.py,sha256=v9l8Ex8K2MsPGxqAPhpEgu6WAo6wS6qvdWLKQMxgE4A,14094 +sympy/vector/tests/test_functions.py,sha256=Bs2sekdDJyw_wrUpG7vZQGH0y0S4C4AbxGSpeU_8C2s,8050 +sympy/vector/tests/test_implicitregion.py,sha256=wVilD5H-MhHiW58QT6P5U7uT79JdKHm9D7JgZoi6BE4,4028 +sympy/vector/tests/test_integrals.py,sha256=BVRhrr_JeAsCKv_E-kA2jaXB8ZXTfj7nkNgT5o-XOJc,5093 +sympy/vector/tests/test_operators.py,sha256=KexUWvc_Nwp2HWrEbhxiO7MeaFxYlckrp__Tkwg-wmU,1613 +sympy/vector/tests/test_parametricregion.py,sha256=OfKapF9A_g9X6JxgYc0UfxIhwXzRERzaj-EijQCJONw,4009 +sympy/vector/tests/test_printing.py,sha256=3BeW55iQ4qXdfDTFqptE2ufJPJIBOzdfIYVx84n_EwA,7708 +sympy/vector/tests/test_vector.py,sha256=Mo88Jgmy3CuSQz25WSH34EnZSs_JBY7E-OKPO2SjhPc,7861 +sympy/vector/vector.py,sha256=pikmeLwkdW_6ed-Xo_U0_a2Om5TGSlfE4PijkRsJllc,17911 diff --git a/venv/lib/python3.10/site-packages/sympy-1.12.dist-info/WHEEL b/venv/lib/python3.10/site-packages/sympy-1.12.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..1f37c02f2eb2e26b306202feaccb31e522b8b169 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy-1.12.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.40.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/sympy-1.12.dist-info/entry_points.txt b/venv/lib/python3.10/site-packages/sympy-1.12.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..42a12f960335556fcee728e5754c346447d4e89c --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy-1.12.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[console_scripts] +isympy = isympy:main diff --git a/venv/lib/python3.10/site-packages/sympy-1.12.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/sympy-1.12.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..0aa85c253508b68bb075f556be3c3f76dc4467ad --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy-1.12.dist-info/top_level.txt @@ -0,0 +1,2 @@ +isympy +sympy