diff --git a/ckpts/universal/global_step80/zero/7.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/7.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..90816d1bb67bf95b82f2a34019c14685d9ed319d --- /dev/null +++ b/ckpts/universal/global_step80/zero/7.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27ab1b2364482a2dccb2c681419e049b24b6b5dab31fe0058dc3ebd7c0fadd53 +size 33555612 diff --git a/ckpts/universal/global_step80/zero/7.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step80/zero/7.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..9932833f7fbc9a2a7b383cee4eb6d934ac5a46d7 --- /dev/null +++ b/ckpts/universal/global_step80/zero/7.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3558402125a35be5f7b5d60934d25ed4debddf5a910656b549142a877fd513dc +size 33555533 diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/__init__.py b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3cd16dbb6805161a7dfc6a71b602442be3ea6eb4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/__init__.py b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/channel_descriptor.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/channel_descriptor.h new file mode 100644 index 0000000000000000000000000000000000000000..c6f039db8effce996015f901562009ebe976d832 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/channel_descriptor.h @@ -0,0 +1,588 @@ +/* + * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CHANNEL_DESCRIPTOR_H__) +#define __CHANNEL_DESCRIPTOR_H__ + +#if defined(__cplusplus) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +/** + * \addtogroup CUDART_HIGHLEVEL + * + * @{ + */ + +/** + * \brief \hl Returns a channel descriptor using the specified format + * + * Returns a channel descriptor with format \p f and number of bits of each + * component \p x, \p y, \p z, and \p w. The ::cudaChannelFormatDesc is + * defined as: + * \code + struct cudaChannelFormatDesc { + int x, y, z, w; + enum cudaChannelFormatKind f; + }; + * \endcode + * + * where ::cudaChannelFormatKind is one of ::cudaChannelFormatKindSigned, + * ::cudaChannelFormatKindUnsigned, cudaChannelFormatKindFloat, + * ::cudaChannelFormatKindSignedNormalized8X1, ::cudaChannelFormatKindSignedNormalized8X2, + * ::cudaChannelFormatKindSignedNormalized8X4, + * ::cudaChannelFormatKindUnsignedNormalized8X1, ::cudaChannelFormatKindUnsignedNormalized8X2, + * ::cudaChannelFormatKindUnsignedNormalized8X4, + * ::cudaChannelFormatKindSignedNormalized16X1, ::cudaChannelFormatKindSignedNormalized16X2, + * ::cudaChannelFormatKindSignedNormalized16X4, + * ::cudaChannelFormatKindUnsignedNormalized16X1, ::cudaChannelFormatKindUnsignedNormalized16X2, + * ::cudaChannelFormatKindUnsignedNormalized16X4 + * or ::cudaChannelFormatKindNV12. + * + * The format is specified by the template specialization. + * + * The template function specializes for the following scalar types: + * char, signed char, unsigned char, short, unsigned short, int, unsigned int, long, unsigned long, and float. + * The template function specializes for the following vector types: + * char{1|2|4}, uchar{1|2|4}, short{1|2|4}, ushort{1|2|4}, int{1|2|4}, uint{1|2|4}, long{1|2|4}, ulong{1|2|4}, float{1|2|4}. + * The template function specializes for following cudaChannelFormatKind enum values: + * ::cudaChannelFormatKind{Uns|S}ignedNormalized{8|16}X{1|2|4}, and ::cudaChannelFormatKindNV12. + * + * Invoking the function on a type without a specialization defaults to creating a channel format of kind ::cudaChannelFormatKindNone + * + * \return + * Channel descriptor with format \p f + * + * \sa \ref ::cudaCreateChannelDesc(int,int,int,int,cudaChannelFormatKind) "cudaCreateChannelDesc (Low level)", + * ::cudaGetChannelDesc, + */ +template __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(0, 0, 0, 0, cudaChannelFormatKindNone); +} + +static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf(void) +{ + int e = (int)sizeof(unsigned short) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat); +} + +static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf1(void) +{ + int e = (int)sizeof(unsigned short) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat); +} + +static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf2(void) +{ + int e = (int)sizeof(unsigned short) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindFloat); +} + +static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf4(void) +{ + int e = (int)sizeof(unsigned short) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindFloat); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(char) * 8; + +#if defined(_CHAR_UNSIGNED) || defined(__CHAR_UNSIGNED__) + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +#else /* _CHAR_UNSIGNED || __CHAR_UNSIGNED__ */ + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +#endif /* _CHAR_UNSIGNED || __CHAR_UNSIGNED__ */ +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(signed char) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned char) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(signed char) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned char) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(signed char) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned char) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(signed char) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned char) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(short) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned short) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(short) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned short) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(short) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned short) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(short) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned short) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(int) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned int) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(int) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned int) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(int) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned int) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(int) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned int) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned); +} + +#if !defined(__LP64__) + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(long) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned long) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(long) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned long) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(long) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned long) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(long) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned long) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned); +} + +#endif /* !__LP64__ */ + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(float) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(float) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(float) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindFloat); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(float) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindFloat); +} + +static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescNV12(void) +{ + int e = (int)sizeof(char) * 8; + + return cudaCreateChannelDesc(e, e, e, 0, cudaChannelFormatKindNV12); +} + +template __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(0, 0, 0, 0, cudaChannelFormatKindNone); +} + +/* Signed 8-bit normalized integer formats */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindSignedNormalized8X1); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindSignedNormalized8X2); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindSignedNormalized8X4); +} + +/* Unsigned 8-bit normalized integer formats */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsignedNormalized8X1); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindUnsignedNormalized8X2); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedNormalized8X4); +} + +/* Signed 16-bit normalized integer formats */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(16, 0, 0, 0, cudaChannelFormatKindSignedNormalized16X1); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(16, 16, 0, 0, cudaChannelFormatKindSignedNormalized16X2); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(16, 16, 16, 16, cudaChannelFormatKindSignedNormalized16X4); +} + +/* Unsigned 16-bit normalized integer formats */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(16, 0, 0, 0, cudaChannelFormatKindUnsignedNormalized16X1); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(16, 16, 0, 0, cudaChannelFormatKindUnsignedNormalized16X2); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(16, 16, 16, 16, cudaChannelFormatKindUnsignedNormalized16X4); +} + +/* NV12 format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 0, cudaChannelFormatKindNV12); +} + +/* BC1 format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed1); +} + +/* BC1sRGB format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed1SRGB); +} + +/* BC2 format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed2); +} + +/* BC2sRGB format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed2SRGB); +} + +/* BC3 format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed3); +} + +/* BC3sRGB format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed3SRGB); +} + +/* BC4 unsigned format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsignedBlockCompressed4); +} + +/* BC4 signed format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindSignedBlockCompressed4); +} + +/* BC5 unsigned format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindUnsignedBlockCompressed5); +} + +/* BC5 signed format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindSignedBlockCompressed5); +} + +/* BC6H unsigned format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(16, 16, 16, 0, cudaChannelFormatKindUnsignedBlockCompressed6H); +} + +/* BC6H signed format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(16, 16, 16, 0, cudaChannelFormatKindSignedBlockCompressed6H); +} + +/* BC7 format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed7); +} + +/* BC7sRGB format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed7SRGB); +} + +#endif /* __cplusplus */ + +/** @} */ +/** @} */ /* END CUDART_TEXTURE_HL */ + +#endif /* !__CHANNEL_DESCRIPTOR_H__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups.h new file mode 100644 index 0000000000000000000000000000000000000000..942f22dab54701141c9ce5e2a055c957824ca5b8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups.h @@ -0,0 +1,1690 @@ +/* + * Copyright 1993-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _COOPERATIVE_GROUPS_H_ +#define _COOPERATIVE_GROUPS_H_ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#include "cooperative_groups/details/info.h" +#include "cooperative_groups/details/driver_abi.h" +#include "cooperative_groups/details/helpers.h" +#include "cooperative_groups/details/memory.h" + +#if defined(_CG_HAS_STL_ATOMICS) +#include +#define _CG_THREAD_SCOPE(scope) _CG_STATIC_CONST_DECL cuda::thread_scope thread_scope = scope; +#else +#define _CG_THREAD_SCOPE(scope) +#endif + +_CG_BEGIN_NAMESPACE + +namespace details { + _CG_CONST_DECL unsigned int coalesced_group_id = 1; + _CG_CONST_DECL unsigned int multi_grid_group_id = 2; + _CG_CONST_DECL unsigned int grid_group_id = 3; + _CG_CONST_DECL unsigned int thread_block_id = 4; + _CG_CONST_DECL unsigned int multi_tile_group_id = 5; + _CG_CONST_DECL unsigned int cluster_group_id = 6; +} + +/** + * class thread_group; + * + * Generic thread group type, into which all groups are convertible. + * It acts as a container for all storage necessary for the derived groups, + * and will dispatch the API calls to the correct derived group. This means + * that all derived groups must implement the same interface as thread_group. + */ +class thread_group +{ +protected: + struct group_data { + unsigned int _unused : 1; + unsigned int type : 7, : 0; + }; + + struct gg_data { + details::grid_workspace *gridWs; + }; + +#if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL) + struct mg_data { + unsigned long long _unused : 1; + unsigned long long type : 7; + unsigned long long handle : 56; + const details::multi_grid::multi_grid_functions *functions; + }; +#endif + + struct tg_data { + unsigned int is_tiled : 1; + unsigned int type : 7; + unsigned int size : 24; + // packed to 4b + unsigned int metaGroupSize : 16; + unsigned int metaGroupRank : 16; + // packed to 8b + unsigned int mask; + // packed to 12b + unsigned int _res; + }; + + friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz); + friend class thread_block; + + union __align__(8) { + group_data group; + tg_data coalesced; + gg_data grid; +#if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL) + mg_data multi_grid; +#endif + } _data; + + _CG_QUALIFIER thread_group operator=(const thread_group& src); + + _CG_QUALIFIER thread_group(unsigned int type) { + _data.group.type = type; + _data.group._unused = false; + } + +#ifdef _CG_CPP11_FEATURES + static_assert(sizeof(tg_data) <= 16, "Failed size check"); + static_assert(sizeof(gg_data) <= 16, "Failed size check"); +# ifdef _CG_ABI_EXPERIMENTAL + static_assert(sizeof(mg_data) <= 16, "Failed size check"); +# endif +#endif + +public: + _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_device) + + _CG_QUALIFIER unsigned long long size() const; + _CG_QUALIFIER unsigned long long num_threads() const; + _CG_QUALIFIER unsigned long long thread_rank() const; + _CG_QUALIFIER void sync() const; + _CG_QUALIFIER unsigned int get_type() const { + return _data.group.type; + } + +}; + +template +struct thread_group_base : public thread_group { + _CG_QUALIFIER thread_group_base() : thread_group(TyId) {} + _CG_STATIC_CONST_DECL unsigned int id = TyId; +}; + +#if defined(_CG_HAS_MULTI_GRID_GROUP) + +/** + * class multi_grid_group; + * + * Threads within this this group are guaranteed to be co-resident on the + * same system, on multiple devices within the same launched kernels. + * To use this group, the kernel must have been launched with + * cuLaunchCooperativeKernelMultiDevice (or the CUDA Runtime equivalent), + * and the device must support it (queryable device attribute). + * + * Constructed via this_multi_grid(); + */ + + +# if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL) +class multi_grid_group; + +// Multi grid group requires these functions to be templated to prevent ptxas from trying to use CG syscalls +template +__device__ _CG_DEPRECATED multi_grid_group this_multi_grid(); + +class multi_grid_group : public thread_group_base +{ +private: + template + _CG_QUALIFIER multi_grid_group() { + _data.multi_grid.functions = details::multi_grid::load_grid_intrinsics(); + _data.multi_grid.handle = _data.multi_grid.functions->get_intrinsic_handle(); + } + + friend multi_grid_group this_multi_grid(); + +public: + _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_system) + + _CG_QUALIFIER bool is_valid() const { + return (_data.multi_grid.handle != 0); + } + + _CG_QUALIFIER void sync() const { + if (!is_valid()) { + _CG_ABORT(); + } + _data.multi_grid.functions->sync(_data.multi_grid.handle); + } + + _CG_QUALIFIER unsigned long long num_threads() const { + _CG_ASSERT(is_valid()); + return _data.multi_grid.functions->size(_data.multi_grid.handle); + } + + _CG_QUALIFIER unsigned long long size() const { + return num_threads(); + } + + _CG_QUALIFIER unsigned long long thread_rank() const { + _CG_ASSERT(is_valid()); + return _data.multi_grid.functions->thread_rank(_data.multi_grid.handle); + } + + _CG_QUALIFIER unsigned int grid_rank() const { + _CG_ASSERT(is_valid()); + return (_data.multi_grid.functions->grid_rank(_data.multi_grid.handle)); + } + + _CG_QUALIFIER unsigned int num_grids() const { + _CG_ASSERT(is_valid()); + return (_data.multi_grid.functions->num_grids(_data.multi_grid.handle)); + } +}; +# else +class multi_grid_group +{ +private: + unsigned long long _handle; + unsigned int _size; + unsigned int _rank; + + friend _CG_QUALIFIER multi_grid_group this_multi_grid(); + + _CG_QUALIFIER multi_grid_group() { + _handle = details::multi_grid::get_intrinsic_handle(); + _size = details::multi_grid::size(_handle); + _rank = details::multi_grid::thread_rank(_handle); + } + +public: + _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_system) + + _CG_QUALIFIER _CG_DEPRECATED bool is_valid() const { + return (_handle != 0); + } + + _CG_QUALIFIER _CG_DEPRECATED void sync() const { + if (!is_valid()) { + _CG_ABORT(); + } + details::multi_grid::sync(_handle); + } + + _CG_QUALIFIER _CG_DEPRECATED unsigned long long num_threads() const { + _CG_ASSERT(is_valid()); + return _size; + } + + _CG_QUALIFIER _CG_DEPRECATED unsigned long long size() const { + return num_threads(); + } + + _CG_QUALIFIER _CG_DEPRECATED unsigned long long thread_rank() const { + _CG_ASSERT(is_valid()); + return _rank; + } + + _CG_QUALIFIER _CG_DEPRECATED unsigned int grid_rank() const { + _CG_ASSERT(is_valid()); + return (details::multi_grid::grid_rank(_handle)); + } + + _CG_QUALIFIER _CG_DEPRECATED unsigned int num_grids() const { + _CG_ASSERT(is_valid()); + return (details::multi_grid::num_grids(_handle)); + } +}; +# endif + +/** + * multi_grid_group this_multi_grid() + * + * Constructs a multi_grid_group + */ +# if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL) +template +__device__ +#else +_CG_QUALIFIER +# endif +_CG_DEPRECATED +multi_grid_group this_multi_grid() +{ + return multi_grid_group(); +} +#endif + +/** + * class grid_group; + * + * Threads within this this group are guaranteed to be co-resident on the + * same device within the same launched kernel. To use this group, the kernel + * must have been launched with cuLaunchCooperativeKernel (or the CUDA Runtime equivalent), + * and the device must support it (queryable device attribute). + * + * Constructed via this_grid(); + */ +class grid_group : public thread_group_base +{ + _CG_STATIC_CONST_DECL unsigned int _group_id = details::grid_group_id; + friend _CG_QUALIFIER grid_group this_grid(); + +private: + _CG_QUALIFIER grid_group(details::grid_workspace *gridWs) { + _data.grid.gridWs = gridWs; + } + + public: + _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_device) + + _CG_QUALIFIER bool is_valid() const { + return (_data.grid.gridWs != NULL); + } + + _CG_QUALIFIER void sync() const { + if (!is_valid()) { + _CG_ABORT(); + } + details::grid::sync(&_data.grid.gridWs->barrier); + } + + _CG_STATIC_QUALIFIER unsigned long long size() { + return details::grid::size(); + } + + _CG_STATIC_QUALIFIER unsigned long long thread_rank() { + return details::grid::thread_rank(); + } + + _CG_STATIC_QUALIFIER dim3 group_dim() { + return details::grid::grid_dim(); + } + + _CG_STATIC_QUALIFIER unsigned long long num_threads() { + return details::grid::num_threads(); + } + + _CG_STATIC_QUALIFIER dim3 dim_blocks() { + return details::grid::dim_blocks(); + } + + _CG_STATIC_QUALIFIER unsigned long long num_blocks() { + return details::grid::num_blocks(); + } + + _CG_STATIC_QUALIFIER dim3 block_index() { + return details::grid::block_index(); + } + + _CG_STATIC_QUALIFIER unsigned long long block_rank() { + return details::grid::block_rank(); + } + +# if defined(_CG_HAS_CLUSTER_GROUP) + _CG_STATIC_QUALIFIER dim3 dim_clusters() { + return details::grid::dim_clusters(); + } + + _CG_STATIC_QUALIFIER unsigned long long num_clusters() { + return details::grid::num_clusters(); + } + + _CG_STATIC_QUALIFIER dim3 cluster_index() { + return details::grid::cluster_index(); + } + + _CG_STATIC_QUALIFIER unsigned long long cluster_rank() { + return details::grid::cluster_rank(); + } +# endif +}; + +_CG_QUALIFIER grid_group this_grid() { + // Load a workspace from the driver + grid_group gg(details::get_grid_workspace()); +#ifdef _CG_DEBUG + // *all* threads must be available to synchronize + gg.sync(); +#endif // _CG_DEBUG + return gg; +} + +#if defined(_CG_HAS_CLUSTER_GROUP) +/** + * class cluster_group + * + * Every GPU kernel is executed by a grid of thread blocks. A grid can be evenly + * divided along all dimensions to form groups of blocks, each group of which is + * a block cluster. Clustered grids are subject to various restrictions and + * limitations. Primarily, a cluster consists of at most 8 blocks by default + * (although the user is allowed to opt-in to non-standard sizes,) and clustered + * grids are subject to additional occupancy limitations due to per-cluster + * hardware resource consumption. In exchange, a block cluster is guaranteed to + * be a cooperative group, with access to all cooperative group capabilities, as + * well as cluster specific capabilities and accelerations. A cluster_group + * represents a block cluster. + * + * Constructed via this_cluster_group(); + */ +class cluster_group : public thread_group_base +{ + // Friends + friend _CG_QUALIFIER cluster_group this_cluster(); + + // Disable constructor + _CG_QUALIFIER cluster_group() + { + } + + public: + //_CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_cluster) + + using arrival_token = struct {}; + + // Functionality exposed by the group + _CG_STATIC_QUALIFIER void sync() + { + return details::cluster::sync(); + } + + _CG_STATIC_QUALIFIER arrival_token barrier_arrive() + { + details::cluster::barrier_arrive(); + return arrival_token(); + } + + _CG_STATIC_QUALIFIER void barrier_wait() + { + return details::cluster::barrier_wait(); + } + + _CG_STATIC_QUALIFIER void barrier_wait(arrival_token&&) + { + return details::cluster::barrier_wait(); + } + + _CG_STATIC_QUALIFIER unsigned int query_shared_rank(const void *addr) + { + return details::cluster::query_shared_rank(addr); + } + + template + _CG_STATIC_QUALIFIER T* map_shared_rank(T *addr, int rank) + { + return details::cluster::map_shared_rank(addr, rank); + } + + _CG_STATIC_QUALIFIER dim3 block_index() + { + return details::cluster::block_index(); + } + + _CG_STATIC_QUALIFIER unsigned int block_rank() + { + return details::cluster::block_rank(); + } + + _CG_STATIC_QUALIFIER unsigned int thread_rank() + { + return details::cluster::thread_rank(); + } + + _CG_STATIC_QUALIFIER dim3 dim_blocks() + { + return details::cluster::dim_blocks(); + } + + _CG_STATIC_QUALIFIER unsigned int num_blocks() + { + return details::cluster::num_blocks(); + } + + _CG_STATIC_QUALIFIER dim3 dim_threads() + { + return details::cluster::dim_threads(); + } + + _CG_STATIC_QUALIFIER unsigned int num_threads() + { + return details::cluster::num_threads(); + } + + // Legacy aliases + _CG_STATIC_QUALIFIER unsigned int size() + { + return num_threads(); + } +}; + +/* + * cluster_group this_cluster() + * + * Constructs a cluster_group + */ +_CG_QUALIFIER cluster_group this_cluster() +{ + cluster_group cg; +#ifdef _CG_DEBUG + cg.sync(); +#endif + return cg; +} +#endif + +#if defined(_CG_CPP11_FEATURES) +class thread_block; +template +_CG_QUALIFIER thread_block this_thread_block(block_tile_memory& scratch); +#endif + +/** + * class thread_block + * + * Every GPU kernel is executed by a grid of thread blocks, and threads within + * each block are guaranteed to reside on the same streaming multiprocessor. + * A thread_block represents a thread block whose dimensions are not known until runtime. + * + * Constructed via this_thread_block(); + */ +class thread_block : public thread_group_base +{ + // Friends + friend _CG_QUALIFIER thread_block this_thread_block(); + friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz); + friend _CG_QUALIFIER thread_group tiled_partition(const thread_block& parent, unsigned int tilesz); + +#if defined(_CG_CPP11_FEATURES) + template + friend _CG_QUALIFIER thread_block this_thread_block(block_tile_memory& scratch); + template + friend class __static_size_multi_warp_tile_base; + + details::multi_warp_scratch* const tile_memory; + + template + _CG_QUALIFIER thread_block(block_tile_memory& scratch) : + tile_memory(details::get_scratch_ptr(&scratch)) { +#ifdef _CG_DEBUG + if (num_threads() > MaxBlockSize) { + details::abort(); + } +#endif +#if !defined(_CG_HAS_RESERVED_SHARED) + tile_memory->init_barriers(thread_rank()); + sync(); +#endif + } +#endif + + // Disable constructor + _CG_QUALIFIER thread_block() +#if defined(_CG_CPP11_FEATURES) + : tile_memory(details::get_scratch_ptr(NULL)) +#endif + { } + + // Internal Use + _CG_QUALIFIER thread_group _get_tiled_threads(unsigned int tilesz) const { + const bool pow2_tilesz = ((tilesz & (tilesz - 1)) == 0); + + // Invalid, immediately fail + if (tilesz == 0 || (tilesz > 32) || !pow2_tilesz) { + details::abort(); + return (thread_block()); + } + + unsigned int mask; + unsigned int base_offset = thread_rank() & (~(tilesz - 1)); + unsigned int masklength = min((unsigned int)size() - base_offset, tilesz); + + mask = (unsigned int)(-1) >> (32 - masklength); + mask <<= (details::laneid() & ~(tilesz - 1)); + thread_group tile = thread_group(details::coalesced_group_id); + tile._data.coalesced.mask = mask; + tile._data.coalesced.size = __popc(mask); + tile._data.coalesced.metaGroupSize = (details::cta::size() + tilesz - 1) / tilesz; + tile._data.coalesced.metaGroupRank = details::cta::thread_rank() / tilesz; + tile._data.coalesced.is_tiled = true; + return (tile); + } + + public: + _CG_STATIC_CONST_DECL unsigned int _group_id = details::thread_block_id; + _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_block) + + _CG_STATIC_QUALIFIER void sync() { + details::cta::sync(); + } + + _CG_STATIC_QUALIFIER unsigned int size() { + return details::cta::size(); + } + + _CG_STATIC_QUALIFIER unsigned int thread_rank() { + return details::cta::thread_rank(); + } + + // Additional functionality exposed by the group + _CG_STATIC_QUALIFIER dim3 group_index() { + return details::cta::group_index(); + } + + _CG_STATIC_QUALIFIER dim3 thread_index() { + return details::cta::thread_index(); + } + + _CG_STATIC_QUALIFIER dim3 group_dim() { + return details::cta::block_dim(); + } + + _CG_STATIC_QUALIFIER dim3 dim_threads() { + return details::cta::dim_threads(); + } + + _CG_STATIC_QUALIFIER unsigned int num_threads() { + return details::cta::num_threads(); + } + +}; + +/** + * thread_block this_thread_block() + * + * Constructs a thread_block group + */ +_CG_QUALIFIER thread_block this_thread_block() +{ + return (thread_block()); +} + +#if defined(_CG_CPP11_FEATURES) +template +_CG_QUALIFIER thread_block this_thread_block(block_tile_memory& scratch) { + return (thread_block(scratch)); +} +#endif + +/** + * class coalesced_group + * + * A group representing the current set of converged threads in a warp. + * The size of the group is not guaranteed and it may return a group of + * only one thread (itself). + * + * This group exposes warp-synchronous builtins. + * Constructed via coalesced_threads(); + */ +class coalesced_group : public thread_group_base +{ +private: + friend _CG_QUALIFIER coalesced_group coalesced_threads(); + friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz); + friend _CG_QUALIFIER coalesced_group tiled_partition(const coalesced_group& parent, unsigned int tilesz); + friend class details::_coalesced_group_data_access; + + _CG_QUALIFIER unsigned int _packLanes(unsigned laneMask) const { + unsigned int member_pack = 0; + unsigned int member_rank = 0; + for (int bit_idx = 0; bit_idx < 32; bit_idx++) { + unsigned int lane_bit = _data.coalesced.mask & (1 << bit_idx); + if (lane_bit) { + if (laneMask & lane_bit) + member_pack |= 1 << member_rank; + member_rank++; + } + } + return (member_pack); + } + + // Internal Use + _CG_QUALIFIER coalesced_group _get_tiled_threads(unsigned int tilesz) const { + const bool pow2_tilesz = ((tilesz & (tilesz - 1)) == 0); + + // Invalid, immediately fail + if (tilesz == 0 || (tilesz > 32) || !pow2_tilesz) { + details::abort(); + return (coalesced_group(0)); + } + if (size() <= tilesz) { + return (*this); + } + + if ((_data.coalesced.is_tiled == true) && pow2_tilesz) { + unsigned int base_offset = (thread_rank() & (~(tilesz - 1))); + unsigned int masklength = min((unsigned int)size() - base_offset, tilesz); + unsigned int mask = (unsigned int)(-1) >> (32 - masklength); + + mask <<= (details::laneid() & ~(tilesz - 1)); + coalesced_group coalesced_tile = coalesced_group(mask); + coalesced_tile._data.coalesced.metaGroupSize = size() / tilesz; + coalesced_tile._data.coalesced.metaGroupRank = thread_rank() / tilesz; + coalesced_tile._data.coalesced.is_tiled = true; + return (coalesced_tile); + } + else if ((_data.coalesced.is_tiled == false) && pow2_tilesz) { + unsigned int mask = 0; + unsigned int member_rank = 0; + int seen_lanes = (thread_rank() / tilesz) * tilesz; + for (unsigned int bit_idx = 0; bit_idx < 32; bit_idx++) { + unsigned int lane_bit = _data.coalesced.mask & (1 << bit_idx); + if (lane_bit) { + if (seen_lanes <= 0 && member_rank < tilesz) { + mask |= lane_bit; + member_rank++; + } + seen_lanes--; + } + } + coalesced_group coalesced_tile = coalesced_group(mask); + // Override parent with the size of this group + coalesced_tile._data.coalesced.metaGroupSize = (size() + tilesz - 1) / tilesz; + coalesced_tile._data.coalesced.metaGroupRank = thread_rank() / tilesz; + return coalesced_tile; + } + else { + // None in _CG_VERSION 1000 + details::abort(); + } + + return (coalesced_group(0)); + } + + protected: + _CG_QUALIFIER coalesced_group(unsigned int mask) { + _data.coalesced.mask = mask; + _data.coalesced.size = __popc(mask); + _data.coalesced.metaGroupRank = 0; + _data.coalesced.metaGroupSize = 1; + _data.coalesced.is_tiled = false; + } + + _CG_QUALIFIER unsigned int get_mask() const { + return (_data.coalesced.mask); + } + + public: + _CG_STATIC_CONST_DECL unsigned int _group_id = details::coalesced_group_id; + _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_block) + + _CG_QUALIFIER unsigned int num_threads() const { + return _data.coalesced.size; + } + + _CG_QUALIFIER unsigned int size() const { + return num_threads(); + } + + _CG_QUALIFIER unsigned int thread_rank() const { + return (__popc(_data.coalesced.mask & details::lanemask32_lt())); + } + + // Rank of this group in the upper level of the hierarchy + _CG_QUALIFIER unsigned int meta_group_rank() const { + return _data.coalesced.metaGroupRank; + } + + // Total num partitions created out of all CTAs when the group was created + _CG_QUALIFIER unsigned int meta_group_size() const { + return _data.coalesced.metaGroupSize; + } + + _CG_QUALIFIER void sync() const { + __syncwarp(_data.coalesced.mask); + } + +#ifdef _CG_CPP11_FEATURES + template > + _CG_QUALIFIER TyRet shfl(TyElem&& elem, int srcRank) const { + unsigned int lane = (srcRank == 0) ? __ffs(_data.coalesced.mask) - 1 : + (size() == 32) ? srcRank : __fns(_data.coalesced.mask, 0, (srcRank + 1)); + + return details::tile::shuffle_dispatch::shfl( + _CG_STL_NAMESPACE::forward(elem), _data.coalesced.mask, lane, 32); + } + + template > + _CG_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int delta) const { + if (size() == 32) { + return details::tile::shuffle_dispatch::shfl_down( + _CG_STL_NAMESPACE::forward(elem), 0xFFFFFFFF, delta, 32); + } + + unsigned int lane = __fns(_data.coalesced.mask, details::laneid(), delta + 1); + + if (lane >= 32) + lane = details::laneid(); + + return details::tile::shuffle_dispatch::shfl( + _CG_STL_NAMESPACE::forward(elem), _data.coalesced.mask, lane, 32); + } + + template > + _CG_QUALIFIER TyRet shfl_up(TyElem&& elem, int delta) const { + if (size() == 32) { + return details::tile::shuffle_dispatch::shfl_up( + _CG_STL_NAMESPACE::forward(elem), 0xFFFFFFFF, delta, 32); + } + + unsigned lane = __fns(_data.coalesced.mask, details::laneid(), -(delta + 1)); + if (lane >= 32) + lane = details::laneid(); + + return details::tile::shuffle_dispatch::shfl( + _CG_STL_NAMESPACE::forward(elem), _data.coalesced.mask, lane, 32); + } +#else + template + _CG_QUALIFIER TyIntegral shfl(TyIntegral var, unsigned int src_rank) const { + details::assert_if_not_arithmetic(); + unsigned int lane = (src_rank == 0) ? __ffs(_data.coalesced.mask) - 1 : + (size() == 32) ? src_rank : __fns(_data.coalesced.mask, 0, (src_rank + 1)); + return (__shfl_sync(_data.coalesced.mask, var, lane, 32)); + } + + template + _CG_QUALIFIER TyIntegral shfl_up(TyIntegral var, int delta) const { + details::assert_if_not_arithmetic(); + if (size() == 32) { + return (__shfl_up_sync(0xFFFFFFFF, var, delta, 32)); + } + unsigned lane = __fns(_data.coalesced.mask, details::laneid(), -(delta + 1)); + if (lane >= 32) lane = details::laneid(); + return (__shfl_sync(_data.coalesced.mask, var, lane, 32)); + } + + template + _CG_QUALIFIER TyIntegral shfl_down(TyIntegral var, int delta) const { + details::assert_if_not_arithmetic(); + if (size() == 32) { + return (__shfl_down_sync(0xFFFFFFFF, var, delta, 32)); + } + unsigned int lane = __fns(_data.coalesced.mask, details::laneid(), delta + 1); + if (lane >= 32) lane = details::laneid(); + return (__shfl_sync(_data.coalesced.mask, var, lane, 32)); + } +#endif + + _CG_QUALIFIER int any(int predicate) const { + return (__ballot_sync(_data.coalesced.mask, predicate) != 0); + } + _CG_QUALIFIER int all(int predicate) const { + return (__ballot_sync(_data.coalesced.mask, predicate) == _data.coalesced.mask); + } + _CG_QUALIFIER unsigned int ballot(int predicate) const { + if (size() == 32) { + return (__ballot_sync(0xFFFFFFFF, predicate)); + } + unsigned int lane_ballot = __ballot_sync(_data.coalesced.mask, predicate); + return (_packLanes(lane_ballot)); + } + +#ifdef _CG_HAS_MATCH_COLLECTIVE + + template + _CG_QUALIFIER unsigned int match_any(TyIntegral val) const { + details::assert_if_not_arithmetic(); + if (size() == 32) { + return (__match_any_sync(0xFFFFFFFF, val)); + } + unsigned int lane_match = __match_any_sync(_data.coalesced.mask, val); + return (_packLanes(lane_match)); + } + + template + _CG_QUALIFIER unsigned int match_all(TyIntegral val, int &pred) const { + details::assert_if_not_arithmetic(); + if (size() == 32) { + return (__match_all_sync(0xFFFFFFFF, val, &pred)); + } + unsigned int lane_match = __match_all_sync(_data.coalesced.mask, val, &pred); + return (_packLanes(lane_match)); + } + +#endif /* !_CG_HAS_MATCH_COLLECTIVE */ + +}; + +_CG_QUALIFIER coalesced_group coalesced_threads() +{ + return (coalesced_group(__activemask())); +} + +namespace details { + template struct verify_thread_block_tile_size; + template <> struct verify_thread_block_tile_size<32> { typedef void OK; }; + template <> struct verify_thread_block_tile_size<16> { typedef void OK; }; + template <> struct verify_thread_block_tile_size<8> { typedef void OK; }; + template <> struct verify_thread_block_tile_size<4> { typedef void OK; }; + template <> struct verify_thread_block_tile_size<2> { typedef void OK; }; + template <> struct verify_thread_block_tile_size<1> { typedef void OK; }; + +#ifdef _CG_CPP11_FEATURES + template + using _is_power_of_2 = _CG_STL_NAMESPACE::integral_constant; + + template + using _is_single_warp = _CG_STL_NAMESPACE::integral_constant; + template + using _is_multi_warp = + _CG_STL_NAMESPACE::integral_constant 32) && (Size <= 1024)>; + + template + using _is_valid_single_warp_tile = + _CG_STL_NAMESPACE::integral_constant::value && _is_single_warp::value>; + template + using _is_valid_multi_warp_tile = + _CG_STL_NAMESPACE::integral_constant::value && _is_multi_warp::value>; +#else + template + struct _is_multi_warp { + static const bool value = false; + }; +#endif +} + +template +class __static_size_tile_base +{ +protected: + _CG_STATIC_CONST_DECL unsigned int numThreads = Size; + +public: + _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_block) + + // Rank of thread within tile + _CG_STATIC_QUALIFIER unsigned int thread_rank() { + return (details::cta::thread_rank() & (numThreads - 1)); + } + + // Number of threads within tile + _CG_STATIC_CONSTEXPR_QUALIFIER unsigned int num_threads() { + return numThreads; + } + + _CG_STATIC_CONSTEXPR_QUALIFIER unsigned int size() { + return num_threads(); + } +}; + +template +class __static_size_thread_block_tile_base : public __static_size_tile_base +{ + friend class details::_coalesced_group_data_access; + typedef details::tile::tile_helpers th; + +#ifdef _CG_CPP11_FEATURES + static_assert(details::_is_valid_single_warp_tile::value, "Size must be one of 1/2/4/8/16/32"); +#else + typedef typename details::verify_thread_block_tile_size::OK valid; +#endif + using __static_size_tile_base::numThreads; + _CG_STATIC_CONST_DECL unsigned int fullMask = 0xFFFFFFFF; + + protected: + _CG_STATIC_QUALIFIER unsigned int build_mask() { + unsigned int mask = fullMask; + if (numThreads != 32) { + // [0,31] representing the current active thread in the warp + unsigned int laneId = details::laneid(); + // shift mask according to the partition it belongs to + mask = th::tileMask << (laneId & ~(th::laneMask)); + } + return (mask); + } + +public: + _CG_STATIC_CONST_DECL unsigned int _group_id = details::coalesced_group_id; + + _CG_STATIC_QUALIFIER void sync() { + __syncwarp(build_mask()); + } + +#ifdef _CG_CPP11_FEATURES + // PTX supported collectives + template > + _CG_QUALIFIER TyRet shfl(TyElem&& elem, int srcRank) const { + return details::tile::shuffle_dispatch::shfl( + _CG_STL_NAMESPACE::forward(elem), build_mask(), srcRank, numThreads); + } + + template > + _CG_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int delta) const { + return details::tile::shuffle_dispatch::shfl_down( + _CG_STL_NAMESPACE::forward(elem), build_mask(), delta, numThreads); + } + + template > + _CG_QUALIFIER TyRet shfl_up(TyElem&& elem, unsigned int delta) const { + return details::tile::shuffle_dispatch::shfl_up( + _CG_STL_NAMESPACE::forward(elem), build_mask(), delta, numThreads); + } + + template > + _CG_QUALIFIER TyRet shfl_xor(TyElem&& elem, unsigned int laneMask) const { + return details::tile::shuffle_dispatch::shfl_xor( + _CG_STL_NAMESPACE::forward(elem), build_mask(), laneMask, numThreads); + } +#else + template + _CG_QUALIFIER TyIntegral shfl(TyIntegral var, int srcRank) const { + details::assert_if_not_arithmetic(); + return (__shfl_sync(build_mask(), var, srcRank, numThreads)); + } + + template + _CG_QUALIFIER TyIntegral shfl_down(TyIntegral var, unsigned int delta) const { + details::assert_if_not_arithmetic(); + return (__shfl_down_sync(build_mask(), var, delta, numThreads)); + } + + template + _CG_QUALIFIER TyIntegral shfl_up(TyIntegral var, unsigned int delta) const { + details::assert_if_not_arithmetic(); + return (__shfl_up_sync(build_mask(), var, delta, numThreads)); + } + + template + _CG_QUALIFIER TyIntegral shfl_xor(TyIntegral var, unsigned int laneMask) const { + details::assert_if_not_arithmetic(); + return (__shfl_xor_sync(build_mask(), var, laneMask, numThreads)); + } +#endif //_CG_CPP11_FEATURES + + _CG_QUALIFIER int any(int predicate) const { + unsigned int lane_ballot = __ballot_sync(build_mask(), predicate); + return (lane_ballot != 0); + } + _CG_QUALIFIER int all(int predicate) const { + unsigned int lane_ballot = __ballot_sync(build_mask(), predicate); + return (lane_ballot == build_mask()); + } + _CG_QUALIFIER unsigned int ballot(int predicate) const { + unsigned int lane_ballot = __ballot_sync(build_mask(), predicate); + return (lane_ballot >> (details::laneid() & (~(th::laneMask)))); + } + +#ifdef _CG_HAS_MATCH_COLLECTIVE + template + _CG_QUALIFIER unsigned int match_any(TyIntegral val) const { + details::assert_if_not_arithmetic(); + unsigned int lane_match = __match_any_sync(build_mask(), val); + return (lane_match >> (details::laneid() & (~(th::laneMask)))); + } + + template + _CG_QUALIFIER unsigned int match_all(TyIntegral val, int &pred) const { + details::assert_if_not_arithmetic(); + unsigned int lane_match = __match_all_sync(build_mask(), val, &pred); + return (lane_match >> (details::laneid() & (~(th::laneMask)))); + } +#endif + +}; + +template +class __static_parent_thread_block_tile_base +{ +public: + // Rank of this group in the upper level of the hierarchy + _CG_STATIC_QUALIFIER unsigned int meta_group_rank() { + return ParentT::thread_rank() / Size; + } + + // Total num partitions created out of all CTAs when the group was created + _CG_STATIC_QUALIFIER unsigned int meta_group_size() { + return (ParentT::size() + Size - 1) / Size; + } +}; + +/** + * class thread_block_tile + * + * Statically-sized group type, representing one tile of a thread block. + * The only specializations currently supported are those with native + * hardware support (1/2/4/8/16/32) + * + * This group exposes warp-synchronous builtins. + * Can only be constructed via tiled_partition(ParentT&) + */ + +template +class __single_warp_thread_block_tile : + public __static_size_thread_block_tile_base, + public __static_parent_thread_block_tile_base +{ + typedef __static_parent_thread_block_tile_base staticParentBaseT; + friend class details::_coalesced_group_data_access; + +protected: + _CG_QUALIFIER __single_warp_thread_block_tile() { }; + _CG_QUALIFIER __single_warp_thread_block_tile(unsigned int, unsigned int) { }; + + _CG_STATIC_QUALIFIER unsigned int get_mask() { + return __static_size_thread_block_tile_base::build_mask(); + } +}; + +template +class __single_warp_thread_block_tile : + public __static_size_thread_block_tile_base, + public thread_group_base +{ + _CG_STATIC_CONST_DECL unsigned int numThreads = Size; + + template friend class __single_warp_thread_block_tile; + friend class details::_coalesced_group_data_access; + + typedef __static_size_thread_block_tile_base staticSizeBaseT; + +protected: + _CG_QUALIFIER __single_warp_thread_block_tile(unsigned int meta_group_rank, unsigned int meta_group_size) { + _data.coalesced.mask = staticSizeBaseT::build_mask(); + _data.coalesced.size = numThreads; + _data.coalesced.metaGroupRank = meta_group_rank; + _data.coalesced.metaGroupSize = meta_group_size; + _data.coalesced.is_tiled = true; + } + + _CG_QUALIFIER unsigned int get_mask() const { + return (_data.coalesced.mask); + } + +public: + using staticSizeBaseT::sync; + using staticSizeBaseT::size; + using staticSizeBaseT::num_threads; + using staticSizeBaseT::thread_rank; + + _CG_QUALIFIER unsigned int meta_group_rank() const { + return _data.coalesced.metaGroupRank; + } + + _CG_QUALIFIER unsigned int meta_group_size() const { + return _data.coalesced.metaGroupSize; + } +}; + +/** + * Outer level API calls + * void sync(GroupT) - see .sync() + * void thread_rank(GroupT) - see .thread_rank() + * void group_size(GroupT) - see .size() + */ +template +_CG_QUALIFIER void sync(GroupT const &g) +{ + g.sync(); +} + +// TODO: Use a static dispatch to determine appropriate return type +// C++03 is stuck with unsigned long long for now +#ifdef _CG_CPP11_FEATURES +template +_CG_QUALIFIER auto thread_rank(GroupT const& g) -> decltype(g.thread_rank()) { + return g.thread_rank(); +} + + +template +_CG_QUALIFIER auto group_size(GroupT const &g) -> decltype(g.num_threads()) { + return g.num_threads(); +} +#else +template +_CG_QUALIFIER unsigned long long thread_rank(GroupT const& g) { + return static_cast(g.thread_rank()); +} + + +template +_CG_QUALIFIER unsigned long long group_size(GroupT const &g) { + return static_cast(g.num_threads()); +} +#endif + + +/** + * tiled_partition + * + * The tiled_partition(parent, tilesz) method is a collective operation that + * partitions the parent group into a one-dimensional, row-major, tiling of subgroups. + * + * A total of ((size(parent)+tilesz-1)/tilesz) subgroups will + * be created where threads having identical k = (thread_rank(parent)/tilesz) + * will be members of the same subgroup. + * + * The implementation may cause the calling thread to wait until all the members + * of the parent group have invoked the operation before resuming execution. + * + * Functionality is limited to power-of-two sized subgorup instances of at most + * 32 threads. Only thread_block, thread_block_tile<>, and their subgroups can be + * tiled_partition() in _CG_VERSION 1000. + */ +_CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz) +{ + if (parent.get_type() == details::coalesced_group_id) { + const coalesced_group *_cg = static_cast(&parent); + return _cg->_get_tiled_threads(tilesz); + } + else { + const thread_block *_tb = static_cast(&parent); + return _tb->_get_tiled_threads(tilesz); + } +} + +// Thread block type overload: returns a basic thread_group for now (may be specialized later) +_CG_QUALIFIER thread_group tiled_partition(const thread_block& parent, unsigned int tilesz) +{ + return (parent._get_tiled_threads(tilesz)); +} + +// Coalesced group type overload: retains its ability to stay coalesced +_CG_QUALIFIER coalesced_group tiled_partition(const coalesced_group& parent, unsigned int tilesz) +{ + return (parent._get_tiled_threads(tilesz)); +} + +namespace details { + template + class internal_thread_block_tile : public __single_warp_thread_block_tile {}; + + template + _CG_QUALIFIER internal_thread_block_tile tiled_partition_internal() { + return internal_thread_block_tile(); + } + + template + _CG_QUALIFIER TyVal multi_warp_collectives_helper( + const GroupT& group, + WarpLambda warp_lambda, + InterWarpLambda inter_warp_lambda) { + return group.template collectives_scheme(warp_lambda, inter_warp_lambda); + } + + template + _CG_QUALIFIER T* multi_warp_scratch_location_getter(const GroupT& group, unsigned int warp_id) { + return group.template get_scratch_location(warp_id); + } + + template + _CG_QUALIFIER details::barrier_t* multi_warp_sync_location_getter(const GroupT& group) { + return group.get_sync_location(); + } + +} +/** + * tiled_partition + * + * The tiled_partition(parent) method is a collective operation that + * partitions the parent group into a one-dimensional, row-major, tiling of subgroups. + * + * A total of ((size(parent)/tilesz) subgroups will be created, + * therefore the parent group size must be evenly divisible by the tilesz. + * The allow parent groups are thread_block or thread_block_tile. + * + * The implementation may cause the calling thread to wait until all the members + * of the parent group have invoked the operation before resuming execution. + * + * Functionality is limited to native hardware sizes, 1/2/4/8/16/32. + * The size(parent) must be greater than the template Size parameter + * otherwise the results are undefined. + */ + +#if defined(_CG_CPP11_FEATURES) +template +class __static_size_multi_warp_tile_base : public __static_size_tile_base +{ + static_assert(details::_is_valid_multi_warp_tile::value, "Size must be one of 64/128/256/512"); + + template + friend __device__ TyVal details::multi_warp_collectives_helper( + const GroupT& group, + WarpLambda warp_lambda, + InterWarpLambda inter_warp_lambda); + template + friend __device__ T* details::multi_warp_scratch_location_getter(const GroupT& group, unsigned int warp_id); + template + friend __device__ details::barrier_t* details::multi_warp_sync_location_getter(const GroupT& group); + template + friend class __static_size_multi_warp_tile_base; + using WarpType = details::internal_thread_block_tile<32, __static_size_multi_warp_tile_base>; + using ThisType = __static_size_multi_warp_tile_base; + _CG_STATIC_CONST_DECL int numWarps = Size / 32; + +protected: + details::multi_warp_scratch* const tile_memory; + + template + _CG_QUALIFIER __static_size_multi_warp_tile_base(const GroupT& g) : tile_memory(g.tile_memory) { +#if defined(_CG_HAS_RESERVED_SHARED) + details::sync_warps_reset(get_sync_location(), details::cta::thread_rank()); + g.sync(); +#endif + } + + +private: + _CG_QUALIFIER details::barrier_t* get_sync_location() const { + // Different group sizes use different barriers, all groups of a given size share one barrier. + unsigned int sync_id = details::log2(Size / 64); + return &tile_memory->barriers[sync_id]; + } + + template + _CG_QUALIFIER T* get_scratch_location(unsigned int warp_id) const { + unsigned int scratch_id = (details::cta::thread_rank() - thread_rank()) / 32 + warp_id; + return reinterpret_cast(&tile_memory->communication_memory[scratch_id]); + } + + template + _CG_QUALIFIER T* get_scratch_location() const { + unsigned int scratch_id = details::cta::thread_rank() / 32; + return reinterpret_cast(&tile_memory->communication_memory[scratch_id]); + } + + template + _CG_QUALIFIER TyVal shfl_impl(TyVal val, unsigned int src) const { + unsigned int src_warp = src / 32; + auto warp = details::tiled_partition_internal<32, ThisType>(); + details::barrier_t* sync_location = get_sync_location(); + + // Get warp slot of the source threads warp. + TyVal* warp_scratch_location = get_scratch_location(src_warp); + + if (warp.meta_group_rank() == src_warp) { + warp.sync(); + // Put shuffled value into my warp slot and let my warp arrive at the barrier. + if (thread_rank() == src) { + *warp_scratch_location = val; + } + details::sync_warps_arrive(sync_location, details::cta::thread_rank(), numWarps); + TyVal result = *warp_scratch_location; + details::sync_warps_wait(sync_location, details::cta::thread_rank()); + return result; + } + else { + // Wait for the source warp to arrive on the barrier. + details::sync_warps_wait_for_specific_warp(sync_location, + (details::cta::thread_rank() / 32 - warp.meta_group_rank() + src_warp)); + TyVal result = *warp_scratch_location; + details::sync_warps(sync_location, details::cta::thread_rank(), numWarps); + return result; + } + } + + template + _CG_QUALIFIER TyVal collectives_scheme(const WarpLambda& warp_lambda, const InterWarpLambda& inter_warp_lambda) const { + static_assert(sizeof(TyVal) <= details::multi_warp_scratch::communication_size, + "Collectives with tiles larger than 32 threads are limited to types smaller then 8 bytes"); + auto warp = details::tiled_partition_internal<32, ThisType>(); + details::barrier_t* sync_location = get_sync_location(); + TyVal* warp_scratch_location = get_scratch_location(); + + warp_lambda(warp, warp_scratch_location); + + if (details::sync_warps_last_releases(sync_location, details::cta::thread_rank(), numWarps)) { + auto subwarp = details::tiled_partition_internal(); + if (subwarp.meta_group_rank() == 0) { + TyVal* thread_scratch_location = get_scratch_location(subwarp.thread_rank()); + inter_warp_lambda(subwarp, thread_scratch_location); + } + warp.sync(); + details::sync_warps_release(sync_location, warp.thread_rank() == 0, details::cta::thread_rank(), numWarps); + } + TyVal result = *warp_scratch_location; + return result; + } + +public: + _CG_STATIC_CONST_DECL unsigned int _group_id = details::multi_tile_group_id; + + using __static_size_tile_base::thread_rank; + + template + _CG_QUALIFIER TyVal shfl(TyVal val, unsigned int src) const { + static_assert(sizeof(TyVal) <= details::multi_warp_scratch::communication_size, + "Collectives with tiles larger than 32 threads are limited to types smaller then 8 bytes"); + return shfl_impl(val, src); + } + + _CG_QUALIFIER void sync() const { + details::sync_warps(get_sync_location(), details::cta::thread_rank(), numWarps); + } + + _CG_QUALIFIER int any(int predicate) const { + auto warp_lambda = [=] (WarpType& warp, int* warp_scratch_location) { + *warp_scratch_location = __any_sync(0xFFFFFFFF, predicate); + }; + auto inter_warp_lambda = + [] (details::internal_thread_block_tile& subwarp, int* thread_scratch_location) { + *thread_scratch_location = __any_sync(0xFFFFFFFFU >> (32 - numWarps), *thread_scratch_location); + }; + return collectives_scheme(warp_lambda, inter_warp_lambda); + } + + _CG_QUALIFIER int all(int predicate) const { + auto warp_lambda = [=] (WarpType& warp, int* warp_scratch_location) { + *warp_scratch_location = __all_sync(0xFFFFFFFF, predicate); + }; + auto inter_warp_lambda = + [] (details::internal_thread_block_tile& subwarp, int* thread_scratch_location) { + *thread_scratch_location = __all_sync(0xFFFFFFFFU >> (32 - numWarps), *thread_scratch_location); + }; + return collectives_scheme(warp_lambda, inter_warp_lambda); + } +}; + + +template +class __multi_warp_thread_block_tile : + public __static_size_multi_warp_tile_base, + public __static_parent_thread_block_tile_base +{ + typedef __static_parent_thread_block_tile_base staticParentBaseT; + typedef __static_size_multi_warp_tile_base staticTileBaseT; +protected: + _CG_QUALIFIER __multi_warp_thread_block_tile(const ParentT& g) : + __static_size_multi_warp_tile_base(g) {} +}; + +template +class __multi_warp_thread_block_tile : public __static_size_multi_warp_tile_base +{ + const unsigned int metaGroupRank; + const unsigned int metaGroupSize; + +protected: + template + _CG_QUALIFIER __multi_warp_thread_block_tile(const __multi_warp_thread_block_tile& g) : + __static_size_multi_warp_tile_base(g), metaGroupRank(g.meta_group_rank()), metaGroupSize(g.meta_group_size()) {} + +public: + _CG_QUALIFIER unsigned int meta_group_rank() const { + return metaGroupRank; + } + + _CG_QUALIFIER unsigned int meta_group_size() const { + return metaGroupSize; + } +}; +#endif + +template +class thread_block_tile; + +namespace details { + template + class thread_block_tile_impl; + + template + class thread_block_tile_impl: public __single_warp_thread_block_tile + { + protected: + template + _CG_QUALIFIER thread_block_tile_impl(const thread_block_tile_impl& g) : + __single_warp_thread_block_tile(g.meta_group_rank(), g.meta_group_size()) {} + + _CG_QUALIFIER thread_block_tile_impl(const thread_block& g) : + __single_warp_thread_block_tile() {} + }; + +#if defined(_CG_CPP11_FEATURES) + template + class thread_block_tile_impl : public __multi_warp_thread_block_tile + { + protected: + template + _CG_QUALIFIER thread_block_tile_impl(const GroupT& g) : + __multi_warp_thread_block_tile(g) {} + }; +#else + template + class thread_block_tile_impl + { + protected: + template + _CG_QUALIFIER thread_block_tile_impl(const GroupT& g) {} + }; +#endif +} + +template +class thread_block_tile : public details::thread_block_tile_impl::value> +{ + friend _CG_QUALIFIER thread_block_tile<1, void> this_thread(); + +protected: + _CG_QUALIFIER thread_block_tile(const ParentT& g) : + details::thread_block_tile_impl::value>(g) {} + +public: + _CG_QUALIFIER operator thread_block_tile() const { + return thread_block_tile(*this); + } +}; + +template +class thread_block_tile : public details::thread_block_tile_impl::value> +{ + template + friend class thread_block_tile; + +protected: + template + _CG_QUALIFIER thread_block_tile(const thread_block_tile& g) : + details::thread_block_tile_impl::value>(g) {} + +public: + template + _CG_QUALIFIER thread_block_tile(const thread_block_tile& g) : + details::thread_block_tile_impl::value>(g) {} +}; + +namespace details { + template + struct tiled_partition_impl; + + template + struct tiled_partition_impl : public thread_block_tile { + _CG_QUALIFIER tiled_partition_impl(const thread_block& g) : + thread_block_tile(g) {} + }; + + // ParentT = static thread_block_tile specialization + template + struct tiled_partition_impl > : + public thread_block_tile > { +#ifdef _CG_CPP11_FEATURES + static_assert(Size < ParentSize, "Tile size bigger or equal to the parent group size"); +#endif + _CG_QUALIFIER tiled_partition_impl(const thread_block_tile& g) : + thread_block_tile >(g) {} + }; + +} + +template +_CG_QUALIFIER thread_block_tile tiled_partition(const ParentT& g) +{ + return details::tiled_partition_impl(g); +} + +/** + * thread_group this_thread() + * + * Constructs a generic thread_group containing only the calling thread + */ +_CG_QUALIFIER thread_block_tile<1, void> this_thread() +{ + // Make thread_block_tile<1, thread_block> parent of the returned group, so it will have its + // meta group rank and size set to 0 and 1 respectively. + return thread_block_tile<1, thread_block_tile<1, thread_block> >(this_thread_block()); +} + +/** + * .sync() + * + * Executes a barrier across the group + * + * Implements both a compiler fence and an architectural fence to prevent, + * memory reordering around the barrier. + */ +_CG_QUALIFIER void thread_group::sync() const +{ + switch (_data.group.type) { + case details::coalesced_group_id: + cooperative_groups::sync(*static_cast(this)); + break; + case details::thread_block_id: + cooperative_groups::sync(*static_cast(this)); + break; + case details::grid_group_id: + cooperative_groups::sync(*static_cast(this)); + break; +#if defined(_CG_HAS_MULTI_GRID_GROUP) && defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL) + case details::multi_grid_group_id: + cooperative_groups::sync(*static_cast(this)); + break; +#endif +#if defined(_CG_HAS_CLUSTER_GROUP) + case details::cluster_group_id: + cooperative_groups::sync(*static_cast(this)); + break; +#endif + default: + break; + } +} + +/** + * .size() + * + * Returns the total number of threads in the group. + */ +_CG_QUALIFIER unsigned long long thread_group::size() const +{ + unsigned long long size = 0; + switch (_data.group.type) { + case details::coalesced_group_id: + size = cooperative_groups::group_size(*static_cast(this)); + break; + case details::thread_block_id: + size = cooperative_groups::group_size(*static_cast(this)); + break; + case details::grid_group_id: + size = cooperative_groups::group_size(*static_cast(this)); + break; +#if defined(_CG_HAS_MULTI_GRID_GROUP) && defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL) + case details::multi_grid_group_id: + size = cooperative_groups::group_size(*static_cast(this)); + break; +#endif +#if defined(_CG_HAS_CLUSTER_GROUP) + case details::cluster_group_id: + size = cooperative_groups::group_size(*static_cast(this)); + break; +#endif + default: + break; + } + return size; +} + +/** + * .thread_rank() + * + * Returns the linearized rank of the calling thread along the interval [0, size()). + */ +_CG_QUALIFIER unsigned long long thread_group::thread_rank() const +{ + unsigned long long rank = 0; + switch (_data.group.type) { + case details::coalesced_group_id: + rank = cooperative_groups::thread_rank(*static_cast(this)); + break; + case details::thread_block_id: + rank = cooperative_groups::thread_rank(*static_cast(this)); + break; + case details::grid_group_id: + rank = cooperative_groups::thread_rank(*static_cast(this)); + break; +#if defined(_CG_HAS_MULTI_GRID_GROUP) && defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL) + case details::multi_grid_group_id: + rank = cooperative_groups::thread_rank(*static_cast(this)); + break; +#endif +#if defined(_CG_HAS_CLUSTER_GROUP) + case details::cluster_group_id: + rank = cooperative_groups::thread_rank(*static_cast(this)); + break; +#endif + default: + break; + } + return rank; +} + +_CG_END_NAMESPACE + +#include +#if (!defined(_MSC_VER) || defined(_WIN64)) +# include +#endif + +# endif /* ! (__cplusplus, __CUDACC__) */ + +#endif /* !_COOPERATIVE_GROUPS_H_ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuComplex.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuComplex.h new file mode 100644 index 0000000000000000000000000000000000000000..7b167111b0b387a5279da6749d946560e1c42c1b --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuComplex.h @@ -0,0 +1,348 @@ +/* + * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(CU_COMPLEX_H_) +#define CU_COMPLEX_H_ + +#if !defined(__CUDACC_RTC__) +#if defined(__GNUC__) +#if defined(__clang__) || (!defined(__PGIC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2))) +#pragma GCC diagnostic ignored "-Wunused-function" +#endif +#endif +#endif + +/* When trying to include C header file in C++ Code extern "C" is required + * But the Standard QNX headers already have ifdef extern in them when compiling C++ Code + * extern "C" cannot be nested + * Hence keep the header out of extern "C" block + */ + +#if !defined(__CUDACC__) +#include /* import fabsf, sqrt */ +#endif /* !defined(__CUDACC__) */ + +#if defined(__cplusplus) +extern "C" { +#endif /* __cplusplus */ + +#include "vector_types.h" + +typedef float2 cuFloatComplex; + +__host__ __device__ static __inline__ float cuCrealf (cuFloatComplex x) +{ + return x.x; +} + +__host__ __device__ static __inline__ float cuCimagf (cuFloatComplex x) +{ + return x.y; +} + +__host__ __device__ static __inline__ cuFloatComplex make_cuFloatComplex + (float r, float i) +{ + cuFloatComplex res; + res.x = r; + res.y = i; + return res; +} + +__host__ __device__ static __inline__ cuFloatComplex cuConjf (cuFloatComplex x) +{ + return make_cuFloatComplex (cuCrealf(x), -cuCimagf(x)); +} +__host__ __device__ static __inline__ cuFloatComplex cuCaddf (cuFloatComplex x, + cuFloatComplex y) +{ + return make_cuFloatComplex (cuCrealf(x) + cuCrealf(y), + cuCimagf(x) + cuCimagf(y)); +} + +__host__ __device__ static __inline__ cuFloatComplex cuCsubf (cuFloatComplex x, + cuFloatComplex y) +{ + return make_cuFloatComplex (cuCrealf(x) - cuCrealf(y), + cuCimagf(x) - cuCimagf(y)); +} + +/* This implementation could suffer from intermediate overflow even though + * the final result would be in range. However, various implementations do + * not guard against this (presumably to avoid losing performance), so we + * don't do it either to stay competitive. + */ +__host__ __device__ static __inline__ cuFloatComplex cuCmulf (cuFloatComplex x, + cuFloatComplex y) +{ + cuFloatComplex prod; + prod = make_cuFloatComplex ((cuCrealf(x) * cuCrealf(y)) - + (cuCimagf(x) * cuCimagf(y)), + (cuCrealf(x) * cuCimagf(y)) + + (cuCimagf(x) * cuCrealf(y))); + return prod; +} + +/* This implementation guards against intermediate underflow and overflow + * by scaling. Such guarded implementations are usually the default for + * complex library implementations, with some also offering an unguarded, + * faster version. + */ +__host__ __device__ static __inline__ cuFloatComplex cuCdivf (cuFloatComplex x, + cuFloatComplex y) +{ + cuFloatComplex quot; + float s = fabsf(cuCrealf(y)) + fabsf(cuCimagf(y)); + float oos = 1.0f / s; + float ars = cuCrealf(x) * oos; + float ais = cuCimagf(x) * oos; + float brs = cuCrealf(y) * oos; + float bis = cuCimagf(y) * oos; + s = (brs * brs) + (bis * bis); + oos = 1.0f / s; + quot = make_cuFloatComplex (((ars * brs) + (ais * bis)) * oos, + ((ais * brs) - (ars * bis)) * oos); + return quot; +} + +/* + * We would like to call hypotf(), but it's not available on all platforms. + * This discrete implementation guards against intermediate underflow and + * overflow by scaling. Otherwise we would lose half the exponent range. + * There are various ways of doing guarded computation. For now chose the + * simplest and fastest solution, however this may suffer from inaccuracies + * if sqrt and division are not IEEE compliant. + */ +__host__ __device__ static __inline__ float cuCabsf (cuFloatComplex x) +{ + float a = cuCrealf(x); + float b = cuCimagf(x); + float v, w, t; + a = fabsf(a); + b = fabsf(b); + if (a > b) { + v = a; + w = b; + } else { + v = b; + w = a; + } + t = w / v; + t = 1.0f + t * t; + t = v * sqrtf(t); + if ((v == 0.0f) || (v > 3.402823466e38f) || (w > 3.402823466e38f)) { + t = v + w; + } + return t; +} + +/* Double precision */ +typedef double2 cuDoubleComplex; + +__host__ __device__ static __inline__ double cuCreal (cuDoubleComplex x) +{ + return x.x; +} + +__host__ __device__ static __inline__ double cuCimag (cuDoubleComplex x) +{ + return x.y; +} + +__host__ __device__ static __inline__ cuDoubleComplex make_cuDoubleComplex + (double r, double i) +{ + cuDoubleComplex res; + res.x = r; + res.y = i; + return res; +} + +__host__ __device__ static __inline__ cuDoubleComplex cuConj(cuDoubleComplex x) +{ + return make_cuDoubleComplex (cuCreal(x), -cuCimag(x)); +} + +__host__ __device__ static __inline__ cuDoubleComplex cuCadd(cuDoubleComplex x, + cuDoubleComplex y) +{ + return make_cuDoubleComplex (cuCreal(x) + cuCreal(y), + cuCimag(x) + cuCimag(y)); +} + +__host__ __device__ static __inline__ cuDoubleComplex cuCsub(cuDoubleComplex x, + cuDoubleComplex y) +{ + return make_cuDoubleComplex (cuCreal(x) - cuCreal(y), + cuCimag(x) - cuCimag(y)); +} + +/* This implementation could suffer from intermediate overflow even though + * the final result would be in range. However, various implementations do + * not guard against this (presumably to avoid losing performance), so we + * don't do it either to stay competitive. + */ +__host__ __device__ static __inline__ cuDoubleComplex cuCmul(cuDoubleComplex x, + cuDoubleComplex y) +{ + cuDoubleComplex prod; + prod = make_cuDoubleComplex ((cuCreal(x) * cuCreal(y)) - + (cuCimag(x) * cuCimag(y)), + (cuCreal(x) * cuCimag(y)) + + (cuCimag(x) * cuCreal(y))); + return prod; +} + +/* This implementation guards against intermediate underflow and overflow + * by scaling. Such guarded implementations are usually the default for + * complex library implementations, with some also offering an unguarded, + * faster version. + */ +__host__ __device__ static __inline__ cuDoubleComplex cuCdiv(cuDoubleComplex x, + cuDoubleComplex y) +{ + cuDoubleComplex quot; + double s = (fabs(cuCreal(y))) + (fabs(cuCimag(y))); + double oos = 1.0 / s; + double ars = cuCreal(x) * oos; + double ais = cuCimag(x) * oos; + double brs = cuCreal(y) * oos; + double bis = cuCimag(y) * oos; + s = (brs * brs) + (bis * bis); + oos = 1.0 / s; + quot = make_cuDoubleComplex (((ars * brs) + (ais * bis)) * oos, + ((ais * brs) - (ars * bis)) * oos); + return quot; +} + +/* This implementation guards against intermediate underflow and overflow + * by scaling. Otherwise we would lose half the exponent range. There are + * various ways of doing guarded computation. For now chose the simplest + * and fastest solution, however this may suffer from inaccuracies if sqrt + * and division are not IEEE compliant. + */ +__host__ __device__ static __inline__ double cuCabs (cuDoubleComplex x) +{ + double a = cuCreal(x); + double b = cuCimag(x); + double v, w, t; + a = fabs(a); + b = fabs(b); + if (a > b) { + v = a; + w = b; + } else { + v = b; + w = a; + } + t = w / v; + t = 1.0 + t * t; + t = v * sqrt(t); + if ((v == 0.0) || + (v > 1.79769313486231570e+308) || (w > 1.79769313486231570e+308)) { + t = v + w; + } + return t; +} + +#if defined(__cplusplus) +} +#endif /* __cplusplus */ + +/* aliases */ +typedef cuFloatComplex cuComplex; +__host__ __device__ static __inline__ cuComplex make_cuComplex (float x, + float y) +{ + return make_cuFloatComplex (x, y); +} + +/* float-to-double promotion */ +__host__ __device__ static __inline__ cuDoubleComplex cuComplexFloatToDouble + (cuFloatComplex c) +{ + return make_cuDoubleComplex ((double)cuCrealf(c), (double)cuCimagf(c)); +} + +__host__ __device__ static __inline__ cuFloatComplex cuComplexDoubleToFloat +(cuDoubleComplex c) +{ + return make_cuFloatComplex ((float)cuCreal(c), (float)cuCimag(c)); +} + + +__host__ __device__ static __inline__ cuComplex cuCfmaf( cuComplex x, cuComplex y, cuComplex d) +{ + float real_res; + float imag_res; + + real_res = (cuCrealf(x) * cuCrealf(y)) + cuCrealf(d); + imag_res = (cuCrealf(x) * cuCimagf(y)) + cuCimagf(d); + + real_res = -(cuCimagf(x) * cuCimagf(y)) + real_res; + imag_res = (cuCimagf(x) * cuCrealf(y)) + imag_res; + + return make_cuComplex(real_res, imag_res); +} + +__host__ __device__ static __inline__ cuDoubleComplex cuCfma( cuDoubleComplex x, cuDoubleComplex y, cuDoubleComplex d) +{ + double real_res; + double imag_res; + + real_res = (cuCreal(x) * cuCreal(y)) + cuCreal(d); + imag_res = (cuCreal(x) * cuCimag(y)) + cuCimag(d); + + real_res = -(cuCimag(x) * cuCimag(y)) + real_res; + imag_res = (cuCimag(x) * cuCreal(y)) + imag_res; + + return make_cuDoubleComplex(real_res, imag_res); +} + +#endif /* !defined(CU_COMPLEX_H_) */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaEGL.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaEGL.h new file mode 100644 index 0000000000000000000000000000000000000000..e304ae904c42dc2f05fa403b15ec02c0bb6e376c --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaEGL.h @@ -0,0 +1,659 @@ +/* + * Copyright 2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef CUDAEGL_H +#define CUDAEGL_H + +#include "cuda.h" +#include "EGL/egl.h" +#include "EGL/eglext.h" + + +#ifdef CUDA_FORCE_API_VERSION +#error "CUDA_FORCE_API_VERSION is no longer supported." +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \addtogroup CUDA_TYPES + * @{ + */ + +/** + * Maximum number of planes per frame + */ +#define MAX_PLANES 3 + +/** + * CUDA EglFrame type - array or pointer + */ +typedef enum CUeglFrameType_enum { + CU_EGL_FRAME_TYPE_ARRAY = 0, /**< Frame type CUDA array */ + CU_EGL_FRAME_TYPE_PITCH = 1, /**< Frame type pointer */ +} CUeglFrameType; + +/** + * Indicates that timeout for ::cuEGLStreamConsumerAcquireFrame is infinite. + */ +#define CUDA_EGL_INFINITE_TIMEOUT 0xFFFFFFFF + +/** + * Resource location flags- sysmem or vidmem + * + * For CUDA context on iGPU, since video and system memory are equivalent - + * these flags will not have an effect on the execution. + * + * For CUDA context on dGPU, applications can use the flag ::CUeglResourceLocationFlags + * to give a hint about the desired location. + * + * ::CU_EGL_RESOURCE_LOCATION_SYSMEM - the frame data is made resident on the system memory + * to be accessed by CUDA. + * + * ::CU_EGL_RESOURCE_LOCATION_VIDMEM - the frame data is made resident on the dedicated + * video memory to be accessed by CUDA. + * + * There may be an additional latency due to new allocation and data migration, + * if the frame is produced on a different memory. + + */ +typedef enum CUeglResourceLocationFlags_enum { + CU_EGL_RESOURCE_LOCATION_SYSMEM = 0x00, /**< Resource location sysmem */ + CU_EGL_RESOURCE_LOCATION_VIDMEM = 0x01 /**< Resource location vidmem */ +} CUeglResourceLocationFlags; + +/** + * CUDA EGL Color Format - The different planar and multiplanar formats currently supported for CUDA_EGL interops. + * Three channel formats are currently not supported for ::CU_EGL_FRAME_TYPE_ARRAY + */ +typedef enum CUeglColorFormat_enum { + CU_EGL_COLOR_FORMAT_YUV420_PLANAR = 0x00, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR = 0x01, /**< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV420Planar. */ + CU_EGL_COLOR_FORMAT_YUV422_PLANAR = 0x02, /**< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR = 0x03, /**< Y, UV in two surfaces with VU byte ordering, width, height ratio same as YUV422Planar. */ + CU_EGL_COLOR_FORMAT_RGB = 0x04, /**< R/G/B three channels in one surface with BGR byte ordering. Only pitch linear format supported. */ + CU_EGL_COLOR_FORMAT_BGR = 0x05, /**< R/G/B three channels in one surface with RGB byte ordering. Only pitch linear format supported. */ + CU_EGL_COLOR_FORMAT_ARGB = 0x06, /**< R/G/B/A four channels in one surface with BGRA byte ordering. */ + CU_EGL_COLOR_FORMAT_RGBA = 0x07, /**< R/G/B/A four channels in one surface with ABGR byte ordering. */ + CU_EGL_COLOR_FORMAT_L = 0x08, /**< single luminance channel in one surface. */ + CU_EGL_COLOR_FORMAT_R = 0x09, /**< single color channel in one surface. */ + CU_EGL_COLOR_FORMAT_YUV444_PLANAR = 0x0A, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR = 0x0B, /**< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV444Planar. */ + CU_EGL_COLOR_FORMAT_YUYV_422 = 0x0C, /**< Y, U, V in one surface, interleaved as UYVY in one channel. */ + CU_EGL_COLOR_FORMAT_UYVY_422 = 0x0D, /**< Y, U, V in one surface, interleaved as YUYV in one channel. */ + CU_EGL_COLOR_FORMAT_ABGR = 0x0E, /**< R/G/B/A four channels in one surface with RGBA byte ordering. */ + CU_EGL_COLOR_FORMAT_BGRA = 0x0F, /**< R/G/B/A four channels in one surface with ARGB byte ordering. */ + CU_EGL_COLOR_FORMAT_A = 0x10, /**< Alpha color format - one channel in one surface. */ + CU_EGL_COLOR_FORMAT_RG = 0x11, /**< R/G color format - two channels in one surface with GR byte ordering */ + CU_EGL_COLOR_FORMAT_AYUV = 0x12, /**< Y, U, V, A four channels in one surface, interleaved as VUYA. */ + CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR = 0x13, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR = 0x14, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR = 0x15, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR = 0x16, /**< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR = 0x17, /**< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR = 0x18, /**< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR = 0x19, /**< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_VYUY_ER = 0x1A, /**< Extended Range Y, U, V in one surface, interleaved as YVYU in one channel. */ + CU_EGL_COLOR_FORMAT_UYVY_ER = 0x1B, /**< Extended Range Y, U, V in one surface, interleaved as YUYV in one channel. */ + CU_EGL_COLOR_FORMAT_YUYV_ER = 0x1C, /**< Extended Range Y, U, V in one surface, interleaved as UYVY in one channel. */ + CU_EGL_COLOR_FORMAT_YVYU_ER = 0x1D, /**< Extended Range Y, U, V in one surface, interleaved as VYUY in one channel. */ + CU_EGL_COLOR_FORMAT_YUV_ER = 0x1E, /**< Extended Range Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported. */ + CU_EGL_COLOR_FORMAT_YUVA_ER = 0x1F, /**< Extended Range Y, U, V, A four channels in one surface, interleaved as AVUY. */ + CU_EGL_COLOR_FORMAT_AYUV_ER = 0x20, /**< Extended Range Y, U, V, A four channels in one surface, interleaved as VUYA. */ + CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER = 0x21, /**< Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER = 0x22, /**< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER = 0x23, /**< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER = 0x24, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER = 0x25, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER = 0x26, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER = 0x27, /**< Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER = 0x28, /**< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER = 0x29, /**< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER = 0x2A, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER = 0x2B, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER = 0x2C, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_BAYER_RGGB = 0x2D, /**< Bayer format - one channel in one surface with interleaved RGGB ordering. */ + CU_EGL_COLOR_FORMAT_BAYER_BGGR = 0x2E, /**< Bayer format - one channel in one surface with interleaved BGGR ordering. */ + CU_EGL_COLOR_FORMAT_BAYER_GRBG = 0x2F, /**< Bayer format - one channel in one surface with interleaved GRBG ordering. */ + CU_EGL_COLOR_FORMAT_BAYER_GBRG = 0x30, /**< Bayer format - one channel in one surface with interleaved GBRG ordering. */ + CU_EGL_COLOR_FORMAT_BAYER10_RGGB = 0x31, /**< Bayer10 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 10 bits used 6 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER10_BGGR = 0x32, /**< Bayer10 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 10 bits used 6 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER10_GRBG = 0x33, /**< Bayer10 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 10 bits used 6 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER10_GBRG = 0x34, /**< Bayer10 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 10 bits used 6 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER12_RGGB = 0x35, /**< Bayer12 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER12_BGGR = 0x36, /**< Bayer12 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER12_GRBG = 0x37, /**< Bayer12 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER12_GBRG = 0x38, /**< Bayer12 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER14_RGGB = 0x39, /**< Bayer14 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 14 bits used 2 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER14_BGGR = 0x3A, /**< Bayer14 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 14 bits used 2 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER14_GRBG = 0x3B, /**< Bayer14 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 14 bits used 2 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER14_GBRG = 0x3C, /**< Bayer14 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 14 bits used 2 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER20_RGGB = 0x3D, /**< Bayer20 format - one channel in one surface with interleaved RGGB ordering. Out of 32 bits, 20 bits used 12 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER20_BGGR = 0x3E, /**< Bayer20 format - one channel in one surface with interleaved BGGR ordering. Out of 32 bits, 20 bits used 12 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER20_GRBG = 0x3F, /**< Bayer20 format - one channel in one surface with interleaved GRBG ordering. Out of 32 bits, 20 bits used 12 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER20_GBRG = 0x40, /**< Bayer20 format - one channel in one surface with interleaved GBRG ordering. Out of 32 bits, 20 bits used 12 bits No-op. */ + CU_EGL_COLOR_FORMAT_YVU444_PLANAR = 0x41, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YVU422_PLANAR = 0x42, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YVU420_PLANAR = 0x43, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB = 0x44, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved RGGB ordering and mapped to opaque integer datatype. */ + CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR = 0x45, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved BGGR ordering and mapped to opaque integer datatype. */ + CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG = 0x46, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GRBG ordering and mapped to opaque integer datatype. */ + CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG = 0x47, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GBRG ordering and mapped to opaque integer datatype. */ + CU_EGL_COLOR_FORMAT_BAYER_BCCR = 0x48, /**< Bayer format - one channel in one surface with interleaved BCCR ordering. */ + CU_EGL_COLOR_FORMAT_BAYER_RCCB = 0x49, /**< Bayer format - one channel in one surface with interleaved RCCB ordering. */ + CU_EGL_COLOR_FORMAT_BAYER_CRBC = 0x4A, /**< Bayer format - one channel in one surface with interleaved CRBC ordering. */ + CU_EGL_COLOR_FORMAT_BAYER_CBRC = 0x4B, /**< Bayer format - one channel in one surface with interleaved CBRC ordering. */ + CU_EGL_COLOR_FORMAT_BAYER10_CCCC = 0x4C, /**< Bayer10 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 10 bits used 6 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER12_BCCR = 0x4D, /**< Bayer12 format - one channel in one surface with interleaved BCCR ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER12_RCCB = 0x4E, /**< Bayer12 format - one channel in one surface with interleaved RCCB ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER12_CRBC = 0x4F, /**< Bayer12 format - one channel in one surface with interleaved CRBC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER12_CBRC = 0x50, /**< Bayer12 format - one channel in one surface with interleaved CBRC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER12_CCCC = 0x51, /**< Bayer12 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + CU_EGL_COLOR_FORMAT_Y = 0x52, /**< Color format for single Y plane. */ + CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020 = 0x53, /**< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020 = 0x54, /**< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020 = 0x55, /**< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height= 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020 = 0x56, /**< Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height += 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709 = 0x57, /**< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709 = 0x58, /**< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709 = 0x59, /**< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height += 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709 = 0x5A, /**< Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709 = 0x5B, /**< Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020 = 0x5C, /**< Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020 = 0x5D, /**< Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR = 0x5E, /**< Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709 = 0x5F, /**< Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_Y_ER = 0x60, /**< Extended Range Color format for single Y plane. */ + CU_EGL_COLOR_FORMAT_Y_709_ER = 0x61, /**< Extended Range Color format for single Y plane. */ + CU_EGL_COLOR_FORMAT_Y10_ER = 0x62, /**< Extended Range Color format for single Y10 plane. */ + CU_EGL_COLOR_FORMAT_Y10_709_ER = 0x63, /**< Extended Range Color format for single Y10 plane. */ + CU_EGL_COLOR_FORMAT_Y12_ER = 0x64, /**< Extended Range Color format for single Y12 plane. */ + CU_EGL_COLOR_FORMAT_Y12_709_ER = 0x65, /**< Extended Range Color format for single Y12 plane. */ + CU_EGL_COLOR_FORMAT_YUVA = 0x66, /**< Y, U, V, A four channels in one surface, interleaved as AVUY. */ + CU_EGL_COLOR_FORMAT_YUV = 0x67, /**< Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported. */ + CU_EGL_COLOR_FORMAT_YVYU = 0x68, /**< Y, U, V in one surface, interleaved as YVYU in one channel. */ + CU_EGL_COLOR_FORMAT_VYUY = 0x69, /**< Y, U, V in one surface, interleaved as VYUY in one channel. */ + CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER = 0x6A, /**< Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER = 0x6B, /**< Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER = 0x6C, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER = 0x6D, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER = 0x6E, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER = 0x6F, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER = 0x70, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER = 0x71, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_MAX +} CUeglColorFormat; + +/** + * CUDA EGLFrame structure Descriptor - structure defining one frame of EGL. + * + * Each frame may contain one or more planes depending on whether the surface * is Multiplanar or not. + */ +typedef struct CUeglFrame_st { + union { + CUarray pArray[MAX_PLANES]; /**< Array of CUarray corresponding to each plane*/ + void* pPitch[MAX_PLANES]; /**< Array of Pointers corresponding to each plane*/ + } frame; + unsigned int width; /**< Width of first plane */ + unsigned int height; /**< Height of first plane */ + unsigned int depth; /**< Depth of first plane */ + unsigned int pitch; /**< Pitch of first plane */ + unsigned int planeCount; /**< Number of planes */ + unsigned int numChannels; /**< Number of channels for the plane */ + CUeglFrameType frameType; /**< Array or Pitch */ + CUeglColorFormat eglColorFormat; /**< CUDA EGL Color Format*/ + CUarray_format cuFormat; /**< CUDA Array Format*/ +} CUeglFrame_v1; +typedef CUeglFrame_v1 CUeglFrame; + +/** + * CUDA EGLSream Connection + */ +typedef struct CUeglStreamConnection_st* CUeglStreamConnection; + +/** @} */ /* END CUDA_TYPES */ + +/** + * \file cudaEGL.h + * \brief Header file for the EGL interoperability functions of the + * low-level CUDA driver application programming interface. + */ + +/** + * \defgroup CUDA_EGL EGL Interoperability + * \ingroup CUDA_DRIVER + * + * ___MANBRIEF___ EGL interoperability functions of the low-level CUDA + * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the EGL interoperability functions of the + * low-level CUDA driver application programming interface. + * + * @{ + */ + +/** + * \brief Registers an EGL image + * + * Registers the EGLImageKHR specified by \p image for access by + * CUDA. A handle to the registered object is returned as \p pCudaResource. + * Additional Mapping/Unmapping is not required for the registered resource and + * ::cuGraphicsResourceGetMappedEglFrame can be directly called on the \p pCudaResource. + * + * The application will be responsible for synchronizing access to shared objects. + * The application must ensure that any pending operation which access the objects have completed + * before passing control to CUDA. This may be accomplished by issuing and waiting for + * glFinish command on all GLcontexts (for OpenGL and likewise for other APIs). + * The application will be also responsible for ensuring that any pending operation on the + * registered CUDA resource has completed prior to executing subsequent commands in other APIs + * accesing the same memory objects. + * This can be accomplished by calling cuCtxSynchronize or cuEventSynchronize (preferably). + * + * The surface's intended usage is specified using \p flags, as follows: + * + * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this + * resource will be used. It is therefore assumed that this resource will be + * read from and written to by CUDA. This is the default value. + * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY: Specifies that CUDA + * will not write to this resource. + * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD: Specifies that + * CUDA will not read from this resource and will write over the + * entire contents of the resource, so none of the data previously + * stored in the resource will be preserved. + * + * The EGLImageKHR is an object which can be used to create EGLImage target resource. It is defined as a void pointer. + * typedef void* EGLImageKHR + * + * \param pCudaResource - Pointer to the returned object handle + * \param image - An EGLImageKHR image which can be used to create target resource. + * \param flags - Map flags + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_ALREADY_MAPPED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * + * \sa ::cuGraphicsEGLRegisterImage, ::cuGraphicsUnregisterResource, + * ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources, + * ::cuGraphicsUnmapResources, + * ::cudaGraphicsEGLRegisterImage + */ +CUresult CUDAAPI cuGraphicsEGLRegisterImage(CUgraphicsResource *pCudaResource, EGLImageKHR image, unsigned int flags); + +/** + * \brief Connect CUDA to EGLStream as a consumer. + * + * Connect CUDA as a consumer to EGLStreamKHR specified by \p stream. + * + * The EGLStreamKHR is an EGL object that transfers a sequence of image frames from one + * API to another. + * + * \param conn - Pointer to the returned connection handle + * \param stream - EGLStreamKHR handle + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_CONTEXT, + * + * \sa ::cuEGLStreamConsumerConnect, ::cuEGLStreamConsumerDisconnect, + * ::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame, + * ::cudaEGLStreamConsumerConnect + */ +CUresult CUDAAPI cuEGLStreamConsumerConnect(CUeglStreamConnection *conn, EGLStreamKHR stream); + +/** + * \brief Connect CUDA to EGLStream as a consumer with given flags. + * + * Connect CUDA as a consumer to EGLStreamKHR specified by \p stream with specified \p flags defined by CUeglResourceLocationFlags. + * + * The flags specify whether the consumer wants to access frames from system memory or video memory. + * Default is ::CU_EGL_RESOURCE_LOCATION_VIDMEM. + * + * \param conn - Pointer to the returned connection handle + * \param stream - EGLStreamKHR handle + * \param flags - Flags denote intended location - system or video. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_CONTEXT, + * + * \sa ::cuEGLStreamConsumerConnect, ::cuEGLStreamConsumerDisconnect, + * ::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame, + * ::cudaEGLStreamConsumerConnectWithFlags + */ + +CUresult CUDAAPI cuEGLStreamConsumerConnectWithFlags(CUeglStreamConnection *conn, EGLStreamKHR stream, unsigned int flags); + +/** + * \brief Disconnect CUDA as a consumer to EGLStream . + * + * Disconnect CUDA as a consumer to EGLStreamKHR. + * + * \param conn - Conection to disconnect. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_CONTEXT, + * + * \sa ::cuEGLStreamConsumerConnect, ::cuEGLStreamConsumerDisconnect, + * ::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame, + * ::cudaEGLStreamConsumerDisconnect + */ +CUresult CUDAAPI cuEGLStreamConsumerDisconnect(CUeglStreamConnection *conn); + +/** + * \brief Acquire an image frame from the EGLStream with CUDA as a consumer. + * + * Acquire an image frame from EGLStreamKHR. This API can also acquire an old frame presented + * by the producer unless explicitly disabled by setting EGL_SUPPORT_REUSE_NV flag to EGL_FALSE + * during stream initialization. By default, EGLStream is created with this flag set to EGL_TRUE. + * ::cuGraphicsResourceGetMappedEglFrame can be called on \p pCudaResource to get + * ::CUeglFrame. + * + * \param conn - Connection on which to acquire + * \param pCudaResource - CUDA resource on which the stream frame will be mapped for use. + * \param pStream - CUDA stream for synchronization and any data migrations + * implied by ::CUeglResourceLocationFlags. + * \param timeout - Desired timeout in usec for a new frame to be acquired. + * If set as ::CUDA_EGL_INFINITE_TIMEOUT, acquire waits infinitely. + * After timeout occurs CUDA consumer tries to acquire an old frame + * if available and EGL_SUPPORT_REUSE_NV flag is set. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_LAUNCH_TIMEOUT, + * + * \sa ::cuEGLStreamConsumerConnect, ::cuEGLStreamConsumerDisconnect, + * ::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame, + * ::cudaEGLStreamConsumerAcquireFrame + */ +CUresult CUDAAPI cuEGLStreamConsumerAcquireFrame(CUeglStreamConnection *conn, + CUgraphicsResource *pCudaResource, CUstream *pStream, unsigned int timeout); +/** + * \brief Releases the last frame acquired from the EGLStream. + * + * Release the acquired image frame specified by \p pCudaResource to EGLStreamKHR. + * If EGL_SUPPORT_REUSE_NV flag is set to EGL_TRUE, at the time of EGL creation + * this API doesn't release the last frame acquired on the EGLStream. + * By default, EGLStream is created with this flag set to EGL_TRUE. + * + * \param conn - Connection on which to release + * \param pCudaResource - CUDA resource whose corresponding frame is to be released + * \param pStream - CUDA stream on which release will be done. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * + * \sa ::cuEGLStreamConsumerConnect, ::cuEGLStreamConsumerDisconnect, + * ::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame, + * ::cudaEGLStreamConsumerReleaseFrame + */ +CUresult CUDAAPI cuEGLStreamConsumerReleaseFrame(CUeglStreamConnection *conn, + CUgraphicsResource pCudaResource, CUstream *pStream); + +/** + * \brief Connect CUDA to EGLStream as a producer. + * + * Connect CUDA as a producer to EGLStreamKHR specified by \p stream. + * + * The EGLStreamKHR is an EGL object that transfers a sequence of image frames from one + * API to another. + * + * \param conn - Pointer to the returned connection handle + * \param stream - EGLStreamKHR handle + * \param width - width of the image to be submitted to the stream + * \param height - height of the image to be submitted to the stream + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_CONTEXT, + * + * \sa ::cuEGLStreamProducerConnect, ::cuEGLStreamProducerDisconnect, + * ::cuEGLStreamProducerPresentFrame, + * ::cudaEGLStreamProducerConnect + */ +CUresult CUDAAPI cuEGLStreamProducerConnect(CUeglStreamConnection *conn, EGLStreamKHR stream, + EGLint width, EGLint height); + +/** + * \brief Disconnect CUDA as a producer to EGLStream . + * + * Disconnect CUDA as a producer to EGLStreamKHR. + * + * \param conn - Conection to disconnect. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_CONTEXT, + * + * \sa ::cuEGLStreamProducerConnect, ::cuEGLStreamProducerDisconnect, + * ::cuEGLStreamProducerPresentFrame, + * ::cudaEGLStreamProducerDisconnect + */ +CUresult CUDAAPI cuEGLStreamProducerDisconnect(CUeglStreamConnection *conn); + +/** + * \brief Present a CUDA eglFrame to the EGLStream with CUDA as a producer. + * + * When a frame is presented by the producer, it gets associated with the EGLStream + * and thus it is illegal to free the frame before the producer is disconnected. + * If a frame is freed and reused it may lead to undefined behavior. + * + * If producer and consumer are on different GPUs (iGPU and dGPU) then frametype + * ::CU_EGL_FRAME_TYPE_ARRAY is not supported. ::CU_EGL_FRAME_TYPE_PITCH can be used for + * such cross-device applications. + * + * The ::CUeglFrame is defined as: + * \code + * typedef struct CUeglFrame_st { + * union { + * CUarray pArray[MAX_PLANES]; + * void* pPitch[MAX_PLANES]; + * } frame; + * unsigned int width; + * unsigned int height; + * unsigned int depth; + * unsigned int pitch; + * unsigned int planeCount; + * unsigned int numChannels; + * CUeglFrameType frameType; + * CUeglColorFormat eglColorFormat; + * CUarray_format cuFormat; + * } CUeglFrame; + * \endcode + * + * For ::CUeglFrame of type ::CU_EGL_FRAME_TYPE_PITCH, the application may present sub-region of a memory + * allocation. In that case, the pitched pointer will specify the start address of the sub-region in + * the allocation and corresponding ::CUeglFrame fields will specify the dimensions of the sub-region. + * + * \param conn - Connection on which to present the CUDA array + * \param eglframe - CUDA Eglstream Proucer Frame handle to be sent to the consumer over EglStream. + * \param pStream - CUDA stream on which to present the frame. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * + * \sa ::cuEGLStreamProducerConnect, ::cuEGLStreamProducerDisconnect, + * ::cuEGLStreamProducerReturnFrame, + * ::cudaEGLStreamProducerPresentFrame + */ +CUresult CUDAAPI cuEGLStreamProducerPresentFrame(CUeglStreamConnection *conn, + CUeglFrame eglframe, CUstream *pStream); + +/** + * \brief Return the CUDA eglFrame to the EGLStream released by the consumer. + * + * This API can potentially return CUDA_ERROR_LAUNCH_TIMEOUT if the consumer has not + * returned a frame to EGL stream. If timeout is returned the application can retry. + * + * \param conn - Connection on which to return + * \param eglframe - CUDA Eglstream Proucer Frame handle returned from the consumer over EglStream. + * \param pStream - CUDA stream on which to return the frame. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_LAUNCH_TIMEOUT + * + * \sa ::cuEGLStreamProducerConnect, ::cuEGLStreamProducerDisconnect, + * ::cuEGLStreamProducerPresentFrame, + * ::cudaEGLStreamProducerReturnFrame + */ +CUresult CUDAAPI cuEGLStreamProducerReturnFrame(CUeglStreamConnection *conn, + CUeglFrame *eglframe, CUstream *pStream); + +/** + * \brief Get an eglFrame through which to access a registered EGL graphics resource. + * + * Returns in \p *eglFrame an eglFrame pointer through which the registered graphics resource + * \p resource may be accessed. + * This API can only be called for registered EGL graphics resources. + * + * The ::CUeglFrame is defined as: + * \code + * typedef struct CUeglFrame_st { + * union { + * CUarray pArray[MAX_PLANES]; + * void* pPitch[MAX_PLANES]; + * } frame; + * unsigned int width; + * unsigned int height; + * unsigned int depth; + * unsigned int pitch; + * unsigned int planeCount; + * unsigned int numChannels; + * CUeglFrameType frameType; + * CUeglColorFormat eglColorFormat; + * CUarray_format cuFormat; + * } CUeglFrame; + * \endcode + * + * If \p resource is not registered then ::CUDA_ERROR_NOT_MAPPED is returned. + * * + * \param eglFrame - Returned eglFrame. + * \param resource - Registered resource to access. + * \param index - Index for cubemap surfaces. + * \param mipLevel - Mipmap level for the subresource to access. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_MAPPED + * + * \sa + * ::cuGraphicsMapResources, + * ::cuGraphicsSubResourceGetMappedArray, + * ::cuGraphicsResourceGetMappedPointer, + * ::cudaGraphicsResourceGetMappedEglFrame + */ +CUresult CUDAAPI cuGraphicsResourceGetMappedEglFrame(CUeglFrame* eglFrame, CUgraphicsResource resource, unsigned int index, unsigned int mipLevel); + +/** + * \brief Creates an event from EGLSync object + * + * Creates an event *phEvent from an EGLSyncKHR eglSync with the flags specified + * via \p flags. Valid flags include: + * - ::CU_EVENT_DEFAULT: Default event creation flag. + * - ::CU_EVENT_BLOCKING_SYNC: Specifies that the created event should use blocking + * synchronization. A CPU thread that uses ::cuEventSynchronize() to wait on + * an event created with this flag will block until the event has actually + * been completed. + * + * Once the \p eglSync gets destroyed, ::cuEventDestroy is the only API + * that can be invoked on the event. + * + * ::cuEventRecord and TimingData are not supported for events created from EGLSync. + * + * The EGLSyncKHR is an opaque handle to an EGL sync object. + * typedef void* EGLSyncKHR + * + * \param phEvent - Returns newly created event + * \param eglSync - Opaque handle to EGLSync object + * \param flags - Event creation flags + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * + * \sa + * ::cuEventQuery, + * ::cuEventSynchronize, + * ::cuEventDestroy + */ +CUresult CUDAAPI cuEventCreateFromEGLSync(CUevent *phEvent, EGLSyncKHR eglSync, unsigned int flags); + +/** @} */ /* END CUDA_EGL */ + +#ifdef __cplusplus +}; +#endif + +#endif + diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaEGLTypedefs.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaEGLTypedefs.h new file mode 100644 index 0000000000000000000000000000000000000000..61b82337dc4bb280869934b11c2105db62ae20c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaEGLTypedefs.h @@ -0,0 +1,96 @@ +/* + * Copyright 2020-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef CUDAEGLTYPEDEFS_H +#define CUDAEGLTYPEDEFS_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +/* + * Macros for the latest version for each driver function in cudaEGL.h + */ +#define PFN_cuGraphicsEGLRegisterImage PFN_cuGraphicsEGLRegisterImage_v7000 +#define PFN_cuEGLStreamConsumerConnect PFN_cuEGLStreamConsumerConnect_v7000 +#define PFN_cuEGLStreamConsumerConnectWithFlags PFN_cuEGLStreamConsumerConnectWithFlags_v8000 +#define PFN_cuEGLStreamConsumerDisconnect PFN_cuEGLStreamConsumerDisconnect_v7000 +#define PFN_cuEGLStreamConsumerAcquireFrame PFN_cuEGLStreamConsumerAcquireFrame_v7000 +#define PFN_cuEGLStreamConsumerReleaseFrame PFN_cuEGLStreamConsumerReleaseFrame_v7000 +#define PFN_cuEGLStreamProducerConnect PFN_cuEGLStreamProducerConnect_v7000 +#define PFN_cuEGLStreamProducerDisconnect PFN_cuEGLStreamProducerDisconnect_v7000 +#define PFN_cuEGLStreamProducerPresentFrame PFN_cuEGLStreamProducerPresentFrame_v7000 +#define PFN_cuEGLStreamProducerReturnFrame PFN_cuEGLStreamProducerReturnFrame_v7000 +#define PFN_cuGraphicsResourceGetMappedEglFrame PFN_cuGraphicsResourceGetMappedEglFrame_v7000 +#define PFN_cuEventCreateFromEGLSync PFN_cuEventCreateFromEGLSync_v9000 + + +/** + * Type definitions for functions defined in cudaEGL.h + */ +typedef CUresult (CUDAAPI *PFN_cuGraphicsEGLRegisterImage_v7000)(CUgraphicsResource CUDAAPI *pCudaResource, EGLImageKHR image, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerConnect_v7000)(CUeglStreamConnection CUDAAPI *conn, EGLStreamKHR stream); +typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerConnectWithFlags_v8000)(CUeglStreamConnection CUDAAPI *conn, EGLStreamKHR stream, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerDisconnect_v7000)(CUeglStreamConnection CUDAAPI *conn); +typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerAcquireFrame_v7000)(CUeglStreamConnection CUDAAPI *conn, CUgraphicsResource CUDAAPI *pCudaResource, CUstream CUDAAPI *pStream, unsigned int timeout); +typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerReleaseFrame_v7000)(CUeglStreamConnection CUDAAPI *conn, CUgraphicsResource pCudaResource, CUstream CUDAAPI *pStream); +typedef CUresult (CUDAAPI *PFN_cuEGLStreamProducerConnect_v7000)(CUeglStreamConnection CUDAAPI *conn, EGLStreamKHR stream, EGLint width, EGLint height); +typedef CUresult (CUDAAPI *PFN_cuEGLStreamProducerDisconnect_v7000)(CUeglStreamConnection CUDAAPI *conn); +typedef CUresult (CUDAAPI *PFN_cuEGLStreamProducerPresentFrame_v7000)(CUeglStreamConnection CUDAAPI *conn, CUeglFrame_v1 eglframe, CUstream CUDAAPI *pStream); +typedef CUresult (CUDAAPI *PFN_cuEGLStreamProducerReturnFrame_v7000)(CUeglStreamConnection CUDAAPI *conn, CUeglFrame_v1 CUDAAPI *eglframe, CUstream CUDAAPI *pStream); +typedef CUresult (CUDAAPI *PFN_cuGraphicsResourceGetMappedEglFrame_v7000)(CUeglFrame_v1 CUDAAPI *eglFrame, CUgraphicsResource resource, unsigned int index, unsigned int mipLevel); +typedef CUresult (CUDAAPI *PFN_cuEventCreateFromEGLSync_v9000)(CUevent CUDAAPI *phEvent, EGLSyncKHR eglSync, unsigned int flags); + +#ifdef __cplusplus +} +#endif // __cplusplus + +#endif // file guard diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaGL.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaGL.h new file mode 100644 index 0000000000000000000000000000000000000000..1a9c70e881774c8f3cf8b6430e7aa53a98d74669 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaGL.h @@ -0,0 +1,608 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef CUDAGL_H +#define CUDAGL_H + +#include +#include + +#if defined(__CUDA_API_VERSION_INTERNAL) || defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED) +#define __CUDA_DEPRECATED +#elif defined(_MSC_VER) +#define __CUDA_DEPRECATED __declspec(deprecated) +#elif defined(__GNUC__) +#define __CUDA_DEPRECATED __attribute__((deprecated)) +#else +#define __CUDA_DEPRECATED +#endif + +#ifdef CUDA_FORCE_API_VERSION +#error "CUDA_FORCE_API_VERSION is no longer supported." +#endif + +#if defined(__CUDA_API_VERSION_INTERNAL) || defined(CUDA_API_PER_THREAD_DEFAULT_STREAM) + #define __CUDA_API_PER_THREAD_DEFAULT_STREAM + #define __CUDA_API_PTDS(api) api ## _ptds + #define __CUDA_API_PTSZ(api) api ## _ptsz +#else + #define __CUDA_API_PTDS(api) api + #define __CUDA_API_PTSZ(api) api +#endif + +#define cuGLCtxCreate cuGLCtxCreate_v2 +#define cuGLMapBufferObject __CUDA_API_PTDS(cuGLMapBufferObject_v2) +#define cuGLMapBufferObjectAsync __CUDA_API_PTSZ(cuGLMapBufferObjectAsync_v2) +#define cuGLGetDevices cuGLGetDevices_v2 + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \file cudaGL.h + * \brief Header file for the OpenGL interoperability functions of the + * low-level CUDA driver application programming interface. + */ + +/** + * \defgroup CUDA_GL OpenGL Interoperability + * \ingroup CUDA_DRIVER + * + * ___MANBRIEF___ OpenGL interoperability functions of the low-level CUDA + * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the OpenGL interoperability functions of the + * low-level CUDA driver application programming interface. Note that mapping + * of OpenGL resources is performed with the graphics API agnostic, resource + * mapping interface described in \ref CUDA_GRAPHICS "Graphics Interoperability". + * + * @{ + */ + +#if defined(_WIN32) +#if !defined(WGL_NV_gpu_affinity) +typedef void* HGPUNV; +#endif +#endif /* _WIN32 */ + +/** + * \brief Registers an OpenGL buffer object + * + * Registers the buffer object specified by \p buffer for access by + * CUDA. A handle to the registered object is returned as \p + * pCudaResource. The register flags \p Flags specify the intended usage, + * as follows: + * + * - ::CU_GRAPHICS_REGISTER_FLAGS_NONE: Specifies no hints about how this + * resource will be used. It is therefore assumed that this resource will be + * read from and written to by CUDA. This is the default value. + * - ::CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY: Specifies that CUDA + * will not write to this resource. + * - ::CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD: Specifies that + * CUDA will not read from this resource and will write over the + * entire contents of the resource, so none of the data previously + * stored in the resource will be preserved. + * + * \param pCudaResource - Pointer to the returned object handle + * \param buffer - name of buffer object to be registered + * \param Flags - Register flags + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_ALREADY_MAPPED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_OPERATING_SYSTEM + * \notefnerr + * + * \sa + * ::cuGraphicsUnregisterResource, + * ::cuGraphicsMapResources, + * ::cuGraphicsResourceGetMappedPointer, + * ::cudaGraphicsGLRegisterBuffer + */ +CUresult CUDAAPI cuGraphicsGLRegisterBuffer(CUgraphicsResource *pCudaResource, GLuint buffer, unsigned int Flags); + +/** + * \brief Register an OpenGL texture or renderbuffer object + * + * Registers the texture or renderbuffer object specified by \p image for access by CUDA. + * A handle to the registered object is returned as \p pCudaResource. + * + * \p target must match the type of the object, and must be one of ::GL_TEXTURE_2D, + * ::GL_TEXTURE_RECTANGLE, ::GL_TEXTURE_CUBE_MAP, ::GL_TEXTURE_3D, ::GL_TEXTURE_2D_ARRAY, + * or ::GL_RENDERBUFFER. + * + * The register flags \p Flags specify the intended usage, as follows: + * + * - ::CU_GRAPHICS_REGISTER_FLAGS_NONE: Specifies no hints about how this + * resource will be used. It is therefore assumed that this resource will be + * read from and written to by CUDA. This is the default value. + * - ::CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY: Specifies that CUDA + * will not write to this resource. + * - ::CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD: Specifies that + * CUDA will not read from this resource and will write over the + * entire contents of the resource, so none of the data previously + * stored in the resource will be preserved. + * - ::CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST: Specifies that CUDA will + * bind this resource to a surface reference. + * - ::CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER: Specifies that CUDA will perform + * texture gather operations on this resource. + * + * The following image formats are supported. For brevity's sake, the list is abbreviated. + * For ex., {GL_R, GL_RG} X {8, 16} would expand to the following 4 formats + * {GL_R8, GL_R16, GL_RG8, GL_RG16} : + * - GL_RED, GL_RG, GL_RGBA, GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY + * - {GL_R, GL_RG, GL_RGBA} X {8, 16, 16F, 32F, 8UI, 16UI, 32UI, 8I, 16I, 32I} + * - {GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY} X + * {8, 16, 16F_ARB, 32F_ARB, 8UI_EXT, 16UI_EXT, 32UI_EXT, 8I_EXT, 16I_EXT, 32I_EXT} + * + * The following image classes are currently disallowed: + * - Textures with borders + * - Multisampled renderbuffers + * + * \param pCudaResource - Pointer to the returned object handle + * \param image - name of texture or renderbuffer object to be registered + * \param target - Identifies the type of object specified by \p image + * \param Flags - Register flags + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_ALREADY_MAPPED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_OPERATING_SYSTEM + * \notefnerr + * + * \sa + * ::cuGraphicsUnregisterResource, + * ::cuGraphicsMapResources, + * ::cuGraphicsSubResourceGetMappedArray, + * ::cudaGraphicsGLRegisterImage + */ +CUresult CUDAAPI cuGraphicsGLRegisterImage(CUgraphicsResource *pCudaResource, GLuint image, GLenum target, unsigned int Flags); + +#ifdef _WIN32 +/** + * \brief Gets the CUDA device associated with hGpu + * + * Returns in \p *pDevice the CUDA device associated with a \p hGpu, if + * applicable. + * + * \param pDevice - Device associated with hGpu + * \param hGpu - Handle to a GPU, as queried via ::WGL_NV_gpu_affinity() + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuGLMapBufferObject, + * ::cuGLRegisterBufferObject, ::cuGLUnmapBufferObject, + * ::cuGLUnregisterBufferObject, ::cuGLUnmapBufferObjectAsync, + * ::cuGLSetBufferObjectMapFlags, + * ::cudaWGLGetDevice + */ +CUresult CUDAAPI cuWGLGetDevice(CUdevice *pDevice, HGPUNV hGpu); +#endif /* _WIN32 */ + +/** + * CUDA devices corresponding to an OpenGL device + */ +typedef enum CUGLDeviceList_enum { + CU_GL_DEVICE_LIST_ALL = 0x01, /**< The CUDA devices for all GPUs used by the current OpenGL context */ + CU_GL_DEVICE_LIST_CURRENT_FRAME = 0x02, /**< The CUDA devices for the GPUs used by the current OpenGL context in its currently rendering frame */ + CU_GL_DEVICE_LIST_NEXT_FRAME = 0x03, /**< The CUDA devices for the GPUs to be used by the current OpenGL context in the next frame */ +} CUGLDeviceList; + +/** + * \brief Gets the CUDA devices associated with the current OpenGL context + * + * Returns in \p *pCudaDeviceCount the number of CUDA-compatible devices + * corresponding to the current OpenGL context. Also returns in \p *pCudaDevices + * at most cudaDeviceCount of the CUDA-compatible devices corresponding to + * the current OpenGL context. If any of the GPUs being used by the current OpenGL + * context are not CUDA capable then the call will return CUDA_ERROR_NO_DEVICE. + * + * The \p deviceList argument may be any of the following: + * - ::CU_GL_DEVICE_LIST_ALL: Query all devices used by the current OpenGL context. + * - ::CU_GL_DEVICE_LIST_CURRENT_FRAME: Query the devices used by the current OpenGL context to + * render the current frame (in SLI). + * - ::CU_GL_DEVICE_LIST_NEXT_FRAME: Query the devices used by the current OpenGL context to + * render the next frame (in SLI). Note that this is a prediction, it can't be guaranteed that + * this is correct in all cases. + * + * \param pCudaDeviceCount - Returned number of CUDA devices. + * \param pCudaDevices - Returned CUDA devices. + * \param cudaDeviceCount - The size of the output device array pCudaDevices. + * \param deviceList - The set of devices to return. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NO_DEVICE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_GRAPHICS_CONTEXT, + * ::CUDA_ERROR_OPERATING_SYSTEM + * + * \notefnerr + * + * \sa + * ::cuWGLGetDevice, + * ::cudaGLGetDevices + */ +CUresult CUDAAPI cuGLGetDevices(unsigned int *pCudaDeviceCount, CUdevice *pCudaDevices, unsigned int cudaDeviceCount, CUGLDeviceList deviceList); + +/** + * \defgroup CUDA_GL_DEPRECATED OpenGL Interoperability [DEPRECATED] + * + * ___MANBRIEF___ deprecated OpenGL interoperability functions of the low-level + * CUDA driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes deprecated OpenGL interoperability functionality. + * + * @{ + */ + +/** Flags to map or unmap a resource */ +typedef enum CUGLmap_flags_enum { + CU_GL_MAP_RESOURCE_FLAGS_NONE = 0x00, + CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY = 0x01, + CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD = 0x02, +} CUGLmap_flags; + +/** + * \brief Create a CUDA context for interoperability with OpenGL + * + * \deprecated This function is deprecated as of Cuda 5.0. + * + * This function is deprecated and should no longer be used. It is + * no longer necessary to associate a CUDA context with an OpenGL + * context in order to achieve maximum interoperability performance. + * + * \param pCtx - Returned CUDA context + * \param Flags - Options for CUDA context creation + * \param device - Device on which to create the context + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \notefnerr + * + * \sa ::cuCtxCreate, ::cuGLInit, ::cuGLMapBufferObject, + * ::cuGLRegisterBufferObject, ::cuGLUnmapBufferObject, + * ::cuGLUnregisterBufferObject, ::cuGLMapBufferObjectAsync, + * ::cuGLUnmapBufferObjectAsync, ::cuGLSetBufferObjectMapFlags, + * ::cuWGLGetDevice + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuGLCtxCreate(CUcontext *pCtx, unsigned int Flags, CUdevice device ); + +/** + * \brief Initializes OpenGL interoperability + * + * \deprecated This function is deprecated as of Cuda 3.0. + * + * Initializes OpenGL interoperability. This function is deprecated + * and calling it is no longer required. It may fail if the needed + * OpenGL driver facilities are not available. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa ::cuGLMapBufferObject, + * ::cuGLRegisterBufferObject, ::cuGLUnmapBufferObject, + * ::cuGLUnregisterBufferObject, ::cuGLMapBufferObjectAsync, + * ::cuGLUnmapBufferObjectAsync, ::cuGLSetBufferObjectMapFlags, + * ::cuWGLGetDevice + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuGLInit(void); + +/** + * \brief Registers an OpenGL buffer object + * + * \deprecated This function is deprecated as of Cuda 3.0. + * + * Registers the buffer object specified by \p buffer for access by + * CUDA. This function must be called before CUDA can map the buffer + * object. There must be a valid OpenGL context bound to the current + * thread when this function is called, and the buffer name is + * resolved by that context. + * + * \param buffer - The name of the buffer object to register. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_ALREADY_MAPPED + * \notefnerr + * + * \sa ::cuGraphicsGLRegisterBuffer + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuGLRegisterBufferObject(GLuint buffer); + +/** + * \brief Maps an OpenGL buffer object + * + * \deprecated This function is deprecated as of Cuda 3.0. + * + * Maps the buffer object specified by \p buffer into the address space of the + * current CUDA context and returns in \p *dptr and \p *size the base pointer + * and size of the resulting mapping. + * + * There must be a valid OpenGL context bound to the current thread + * when this function is called. This must be the same context, or a + * member of the same shareGroup, as the context that was bound when + * the buffer was registered. + * + * All streams in the current CUDA context are synchronized with the + * current GL context. + * + * \param dptr - Returned mapped base pointer + * \param size - Returned size of mapping + * \param buffer - The name of the buffer object to map + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_MAP_FAILED + * \notefnerr + * + * \sa ::cuGraphicsMapResources + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuGLMapBufferObject(CUdeviceptr *dptr, size_t *size, GLuint buffer); + +/** + * \brief Unmaps an OpenGL buffer object + * + * \deprecated This function is deprecated as of Cuda 3.0. + * + * Unmaps the buffer object specified by \p buffer for access by CUDA. + * + * There must be a valid OpenGL context bound to the current thread + * when this function is called. This must be the same context, or a + * member of the same shareGroup, as the context that was bound when + * the buffer was registered. + * + * All streams in the current CUDA context are synchronized with the + * current GL context. + * + * \param buffer - Buffer object to unmap + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuGraphicsUnmapResources + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuGLUnmapBufferObject(GLuint buffer); + +/** + * \brief Unregister an OpenGL buffer object + * + * \deprecated This function is deprecated as of Cuda 3.0. + * + * Unregisters the buffer object specified by \p buffer. This + * releases any resources associated with the registered buffer. + * After this call, the buffer may no longer be mapped for access by + * CUDA. + * + * There must be a valid OpenGL context bound to the current thread + * when this function is called. This must be the same context, or a + * member of the same shareGroup, as the context that was bound when + * the buffer was registered. + * + * \param buffer - Name of the buffer object to unregister + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuGraphicsUnregisterResource + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuGLUnregisterBufferObject(GLuint buffer); + +/** + * \brief Set the map flags for an OpenGL buffer object + * + * \deprecated This function is deprecated as of Cuda 3.0. + * + * Sets the map flags for the buffer object specified by \p buffer. + * + * Changes to \p Flags will take effect the next time \p buffer is mapped. + * The \p Flags argument may be any of the following: + * - ::CU_GL_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this + * resource will be used. It is therefore assumed that this resource will be + * read from and written to by CUDA kernels. This is the default value. + * - ::CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY: Specifies that CUDA kernels which + * access this resource will not write to this resource. + * - ::CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD: Specifies that CUDA kernels + * which access this resource will not read from this resource and will + * write over the entire contents of the resource, so none of the data + * previously stored in the resource will be preserved. + * + * If \p buffer has not been registered for use with CUDA, then + * ::CUDA_ERROR_INVALID_HANDLE is returned. If \p buffer is presently + * mapped for access by CUDA, then ::CUDA_ERROR_ALREADY_MAPPED is returned. + * + * There must be a valid OpenGL context bound to the current thread + * when this function is called. This must be the same context, or a + * member of the same shareGroup, as the context that was bound when + * the buffer was registered. + * + * \param buffer - Buffer object to unmap + * \param Flags - Map flags + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_ALREADY_MAPPED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * \notefnerr + * + * \sa ::cuGraphicsResourceSetMapFlags + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuGLSetBufferObjectMapFlags(GLuint buffer, unsigned int Flags); + +/** + * \brief Maps an OpenGL buffer object + * + * \deprecated This function is deprecated as of Cuda 3.0. + * + * Maps the buffer object specified by \p buffer into the address space of the + * current CUDA context and returns in \p *dptr and \p *size the base pointer + * and size of the resulting mapping. + * + * There must be a valid OpenGL context bound to the current thread + * when this function is called. This must be the same context, or a + * member of the same shareGroup, as the context that was bound when + * the buffer was registered. + * + * Stream \p hStream in the current CUDA context is synchronized with + * the current GL context. + * + * \param dptr - Returned mapped base pointer + * \param size - Returned size of mapping + * \param buffer - The name of the buffer object to map + * \param hStream - Stream to synchronize + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_MAP_FAILED + * \notefnerr + * + * \sa ::cuGraphicsMapResources + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuGLMapBufferObjectAsync(CUdeviceptr *dptr, size_t *size, GLuint buffer, CUstream hStream); + +/** + * \brief Unmaps an OpenGL buffer object + * + * \deprecated This function is deprecated as of Cuda 3.0. + * + * Unmaps the buffer object specified by \p buffer for access by CUDA. + * + * There must be a valid OpenGL context bound to the current thread + * when this function is called. This must be the same context, or a + * member of the same shareGroup, as the context that was bound when + * the buffer was registered. + * + * Stream \p hStream in the current CUDA context is synchronized with + * the current GL context. + * + * \param buffer - Name of the buffer object to unmap + * \param hStream - Stream to synchronize + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuGraphicsUnmapResources + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuGLUnmapBufferObjectAsync(GLuint buffer, CUstream hStream); + +/** @} */ /* END CUDA_GL_DEPRECATED */ +/** @} */ /* END CUDA_GL */ + + +#if defined(__CUDA_API_VERSION_INTERNAL) + #undef cuGLCtxCreate + #undef cuGLMapBufferObject + #undef cuGLMapBufferObjectAsync + #undef cuGLGetDevices + + CUresult CUDAAPI cuGLGetDevices(unsigned int *pCudaDeviceCount, CUdevice *pCudaDevices, unsigned int cudaDeviceCount, CUGLDeviceList deviceList); + CUresult CUDAAPI cuGLMapBufferObject_v2(CUdeviceptr *dptr, size_t *size, GLuint buffer); + CUresult CUDAAPI cuGLMapBufferObjectAsync_v2(CUdeviceptr *dptr, size_t *size, GLuint buffer, CUstream hStream); + CUresult CUDAAPI cuGLCtxCreate(CUcontext *pCtx, unsigned int Flags, CUdevice device ); + CUresult CUDAAPI cuGLMapBufferObject(CUdeviceptr_v1 *dptr, unsigned int *size, GLuint buffer); + CUresult CUDAAPI cuGLMapBufferObjectAsync(CUdeviceptr_v1 *dptr, unsigned int *size, GLuint buffer, CUstream hStream); +#endif /* __CUDA_API_VERSION_INTERNAL */ + +#ifdef __cplusplus +}; +#endif + +#undef __CUDA_DEPRECATED + +#endif diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaGLTypedefs.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaGLTypedefs.h new file mode 100644 index 0000000000000000000000000000000000000000..81f0d5349e435159647af9af379d1e8e8441221c --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaGLTypedefs.h @@ -0,0 +1,123 @@ +/* + * Copyright 2020-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef CUDAGLTYPEDEFS_H +#define CUDAGLTYPEDEFS_H + +// Dependent includes for cudagl.h +#include + +#include + +#if defined(CUDA_API_PER_THREAD_DEFAULT_STREAM) + #define __API_TYPEDEF_PTDS(api, default_version, ptds_version) api ## _v ## ptds_version ## _ptds + #define __API_TYPEDEF_PTSZ(api, default_version, ptds_version) api ## _v ## ptds_version ## _ptsz +#else + #define __API_TYPEDEF_PTDS(api, default_version, ptds_version) api ## _v ## default_version + #define __API_TYPEDEF_PTSZ(api, default_version, ptds_version) api ## _v ## default_version +#endif + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +/* + * Macros for the latest version for each driver function in cudaGL.h + */ +#define PFN_cuGraphicsGLRegisterBuffer PFN_cuGraphicsGLRegisterBuffer_v3000 +#define PFN_cuGraphicsGLRegisterImage PFN_cuGraphicsGLRegisterImage_v3000 +#define PFN_cuWGLGetDevice PFN_cuWGLGetDevice_v2020 +#define PFN_cuGLGetDevices PFN_cuGLGetDevices_v6050 +#define PFN_cuGLCtxCreate PFN_cuGLCtxCreate_v3020 +#define PFN_cuGLInit PFN_cuGLInit_v2000 +#define PFN_cuGLRegisterBufferObject PFN_cuGLRegisterBufferObject_v2000 +#define PFN_cuGLMapBufferObject __API_TYPEDEF_PTDS(PFN_cuGLMapBufferObject, 3020, 7000) +#define PFN_cuGLUnmapBufferObject PFN_cuGLUnmapBufferObject_v2000 +#define PFN_cuGLUnregisterBufferObject PFN_cuGLUnregisterBufferObject_v2000 +#define PFN_cuGLSetBufferObjectMapFlags PFN_cuGLSetBufferObjectMapFlags_v2030 +#define PFN_cuGLMapBufferObjectAsync __API_TYPEDEF_PTSZ(PFN_cuGLMapBufferObjectAsync, 3020, 7000) +#define PFN_cuGLUnmapBufferObjectAsync PFN_cuGLUnmapBufferObjectAsync_v2030 + + +/** + * Type definitions for functions defined in cudaGL.h + */ +typedef CUresult (CUDAAPI *PFN_cuGraphicsGLRegisterBuffer_v3000)(CUgraphicsResource *pCudaResource, GLuint buffer, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuGraphicsGLRegisterImage_v3000)(CUgraphicsResource *pCudaResource, GLuint image, GLenum target, unsigned int Flags); +#ifdef _WIN32 +typedef CUresult (CUDAAPI *PFN_cuWGLGetDevice_v2020)(CUdevice_v1 *pDevice, HGPUNV hGpu); +#endif +typedef CUresult (CUDAAPI *PFN_cuGLGetDevices_v6050)(unsigned int *pCudaDeviceCount, CUdevice_v1 *pCudaDevices, unsigned int cudaDeviceCount, CUGLDeviceList deviceList); +typedef CUresult (CUDAAPI *PFN_cuGLCtxCreate_v3020)(CUcontext *pCtx, unsigned int Flags, CUdevice_v1 device); +typedef CUresult (CUDAAPI *PFN_cuGLInit_v2000)(void); +typedef CUresult (CUDAAPI *PFN_cuGLRegisterBufferObject_v2000)(GLuint buffer); +typedef CUresult (CUDAAPI *PFN_cuGLMapBufferObject_v7000_ptds)(CUdeviceptr_v2 *dptr, size_t *size, GLuint buffer); +typedef CUresult (CUDAAPI *PFN_cuGLUnmapBufferObject_v2000)(GLuint buffer); +typedef CUresult (CUDAAPI *PFN_cuGLUnregisterBufferObject_v2000)(GLuint buffer); +typedef CUresult (CUDAAPI *PFN_cuGLSetBufferObjectMapFlags_v2030)(GLuint buffer, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuGLMapBufferObjectAsync_v7000_ptsz)(CUdeviceptr_v2 *dptr, size_t *size, GLuint buffer, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuGLUnmapBufferObjectAsync_v2030)(GLuint buffer, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuGLMapBufferObject_v3020)(CUdeviceptr_v2 *dptr, size_t *size, GLuint buffer); +typedef CUresult (CUDAAPI *PFN_cuGLMapBufferObjectAsync_v3020)(CUdeviceptr_v2 *dptr, size_t *size, GLuint buffer, CUstream hStream); + +/* + * Type definitions for older versioned functions in cuda.h + */ +#if defined(__CUDA_API_VERSION_INTERNAL) +typedef CUresult (CUDAAPI *PFN_cuGLGetDevices_v4010)(unsigned int *pCudaDeviceCount, CUdevice_v1 *pCudaDevices, unsigned int cudaDeviceCount, CUGLDeviceList deviceList); +typedef CUresult (CUDAAPI *PFN_cuGLMapBufferObject_v2000)(CUdeviceptr_v1 *dptr, unsigned int *size, GLuint buffer); +typedef CUresult (CUDAAPI *PFN_cuGLMapBufferObjectAsync_v2030)(CUdeviceptr_v1 *dptr, unsigned int *size, GLuint buffer, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuGLCtxCreate_v2000)(CUcontext *pCtx, unsigned int Flags, CUdevice_v1 device); +#endif + +#ifdef __cplusplus +} +#endif // __cplusplus + +#endif // file guard diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaProfilerTypedefs.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaProfilerTypedefs.h new file mode 100644 index 0000000000000000000000000000000000000000..bea7df4573aff2fa5b0d0029ce9d40a7ebe2de46 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaProfilerTypedefs.h @@ -0,0 +1,78 @@ +/* + * Copyright 2020-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef CUDAPROFILERTYPEDEFS_H +#define CUDAPROFILERTYPEDEFS_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +/* + * Macros for the latest version for each driver function in cudaProfiler.h + */ +#define PFN_cuProfilerInitialize PFN_cuProfilerInitialize_v4000 +#define PFN_cuProfilerStart PFN_cuProfilerStart_v4000 +#define PFN_cuProfilerStop PFN_cuProfilerStop_v4000 + + +/** + * Type definitions for functions defined in cudaProfiler.h + */ +typedef CUresult (CUDAAPI *PFN_cuProfilerInitialize_v4000)(const char *configFile, const char *outputFile, CUoutput_mode outputMode); +typedef CUresult (CUDAAPI *PFN_cuProfilerStart_v4000)(void); +typedef CUresult (CUDAAPI *PFN_cuProfilerStop_v4000)(void); + +#ifdef __cplusplus +} +#endif // __cplusplus + +#endif // file guard diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaTypedefs.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaTypedefs.h new file mode 100644 index 0000000000000000000000000000000000000000..c3c7117a9044d428f0ced8ade419bcf520c72732 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaTypedefs.h @@ -0,0 +1,1029 @@ +/* + * Copyright 2020-2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef CUDATYPEDEFS_H +#define CUDATYPEDEFS_H + +#include + +#if defined(CUDA_API_PER_THREAD_DEFAULT_STREAM) + #define __API_TYPEDEF_PTDS(api, default_version, ptds_version) api ## _v ## ptds_version ## _ptds + #define __API_TYPEDEF_PTSZ(api, default_version, ptds_version) api ## _v ## ptds_version ## _ptsz +#else + #define __API_TYPEDEF_PTDS(api, default_version, ptds_version) api ## _v ## default_version + #define __API_TYPEDEF_PTSZ(api, default_version, ptds_version) api ## _v ## default_version +#endif + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +/* + * Macros for the latest version for each driver function in cuda.h + */ +#define PFN_cuGetErrorString PFN_cuGetErrorString_v6000 +#define PFN_cuGetErrorName PFN_cuGetErrorName_v6000 +#define PFN_cuInit PFN_cuInit_v2000 +#define PFN_cuDriverGetVersion PFN_cuDriverGetVersion_v2020 +#define PFN_cuDeviceGet PFN_cuDeviceGet_v2000 +#define PFN_cuDeviceGetCount PFN_cuDeviceGetCount_v2000 +#define PFN_cuDeviceGetName PFN_cuDeviceGetName_v2000 +#define PFN_cuDeviceGetUuid PFN_cuDeviceGetUuid_v11040 +#define PFN_cuDeviceGetLuid PFN_cuDeviceGetLuid_v10000 +#define PFN_cuDeviceTotalMem PFN_cuDeviceTotalMem_v3020 +#define PFN_cuDeviceGetTexture1DLinearMaxWidth PFN_cuDeviceGetTexture1DLinearMaxWidth_v11010 +#define PFN_cuDeviceGetAttribute PFN_cuDeviceGetAttribute_v2000 +#define PFN_cuDeviceGetNvSciSyncAttributes PFN_cuDeviceGetNvSciSyncAttributes_v10020 +#define PFN_cuDeviceSetMemPool PFN_cuDeviceSetMemPool_v11020 +#define PFN_cuDeviceGetMemPool PFN_cuDeviceGetMemPool_v11020 +#define PFN_cuDeviceGetDefaultMemPool PFN_cuDeviceGetDefaultMemPool_v11020 +#define PFN_cuDeviceGetProperties PFN_cuDeviceGetProperties_v2000 +#define PFN_cuDeviceComputeCapability PFN_cuDeviceComputeCapability_v2000 +#define PFN_cuDevicePrimaryCtxRetain PFN_cuDevicePrimaryCtxRetain_v7000 +#define PFN_cuDevicePrimaryCtxRelease PFN_cuDevicePrimaryCtxRelease_v11000 +#define PFN_cuDevicePrimaryCtxSetFlags PFN_cuDevicePrimaryCtxSetFlags_v11000 +#define PFN_cuDevicePrimaryCtxGetState PFN_cuDevicePrimaryCtxGetState_v7000 +#define PFN_cuDevicePrimaryCtxReset PFN_cuDevicePrimaryCtxReset_v11000 +#define PFN_cuDeviceGetExecAffinitySupport PFN_cuDeviceGetExecAffinitySupport_v11040 +#define PFN_cuCtxCreate PFN_cuCtxCreate_v11040 +#define PFN_cuCtxGetId PFN_cuCtxGetId_v12000 +#define PFN_cuCtxDestroy PFN_cuCtxDestroy_v4000 +#define PFN_cuCtxPushCurrent PFN_cuCtxPushCurrent_v4000 +#define PFN_cuCtxPopCurrent PFN_cuCtxPopCurrent_v4000 +#define PFN_cuCtxSetCurrent PFN_cuCtxSetCurrent_v4000 +#define PFN_cuCtxGetCurrent PFN_cuCtxGetCurrent_v4000 +#define PFN_cuCtxGetDevice PFN_cuCtxGetDevice_v2000 +#define PFN_cuCtxGetFlags PFN_cuCtxGetFlags_v7000 +#define PFN_cuCtxSetFlags PFN_cuCtxSetFlags_v12010 +#define PFN_cuCtxSynchronize PFN_cuCtxSynchronize_v2000 +#define PFN_cuCtxSetLimit PFN_cuCtxSetLimit_v3010 +#define PFN_cuCtxGetLimit PFN_cuCtxGetLimit_v3010 +#define PFN_cuCtxGetCacheConfig PFN_cuCtxGetCacheConfig_v3020 +#define PFN_cuCtxSetCacheConfig PFN_cuCtxSetCacheConfig_v3020 +#define PFN_cuCtxGetSharedMemConfig PFN_cuCtxGetSharedMemConfig_v4020 +#define PFN_cuCtxSetSharedMemConfig PFN_cuCtxSetSharedMemConfig_v4020 +#define PFN_cuCtxGetApiVersion PFN_cuCtxGetApiVersion_v3020 +#define PFN_cuCtxGetStreamPriorityRange PFN_cuCtxGetStreamPriorityRange_v5050 +#define PFN_cuCtxResetPersistingL2Cache PFN_cuCtxResetPersistingL2Cache_v11000 +#define PFN_cuCtxAttach PFN_cuCtxAttach_v2000 +#define PFN_cuCtxDetach PFN_cuCtxDetach_v2000 +#define PFN_cuCtxGetExecAffinity PFN_cuCtxGetExecAffinity_v11040 +#define PFN_cuModuleLoad PFN_cuModuleLoad_v2000 +#define PFN_cuModuleLoadData PFN_cuModuleLoadData_v2000 +#define PFN_cuModuleLoadDataEx PFN_cuModuleLoadDataEx_v2010 +#define PFN_cuModuleLoadFatBinary PFN_cuModuleLoadFatBinary_v2000 +#define PFN_cuModuleUnload PFN_cuModuleUnload_v2000 +#define PFN_cuModuleGetFunction PFN_cuModuleGetFunction_v2000 +#define PFN_cuModuleGetGlobal PFN_cuModuleGetGlobal_v3020 +#define PFN_cuModuleGetTexRef PFN_cuModuleGetTexRef_v2000 +#define PFN_cuModuleGetSurfRef PFN_cuModuleGetSurfRef_v3000 +#define PFN_cuLinkCreate PFN_cuLinkCreate_v6050 +#define PFN_cuLinkAddData PFN_cuLinkAddData_v6050 +#define PFN_cuLinkAddFile PFN_cuLinkAddFile_v6050 +#define PFN_cuLinkComplete PFN_cuLinkComplete_v5050 +#define PFN_cuLinkDestroy PFN_cuLinkDestroy_v5050 +#define PFN_cuMemGetInfo PFN_cuMemGetInfo_v3020 +#define PFN_cuMemAlloc PFN_cuMemAlloc_v3020 +#define PFN_cuMemAllocPitch PFN_cuMemAllocPitch_v3020 +#define PFN_cuMemFree PFN_cuMemFree_v3020 +#define PFN_cuMemGetAddressRange PFN_cuMemGetAddressRange_v3020 +#define PFN_cuMemAllocHost PFN_cuMemAllocHost_v3020 +#define PFN_cuMemFreeHost PFN_cuMemFreeHost_v2000 +#define PFN_cuMemHostAlloc PFN_cuMemHostAlloc_v2020 +#define PFN_cuMemHostGetDevicePointer PFN_cuMemHostGetDevicePointer_v3020 +#define PFN_cuMemHostGetFlags PFN_cuMemHostGetFlags_v2030 +#define PFN_cuMemAllocManaged PFN_cuMemAllocManaged_v6000 +#define PFN_cuDeviceGetByPCIBusId PFN_cuDeviceGetByPCIBusId_v4010 +#define PFN_cuDeviceGetPCIBusId PFN_cuDeviceGetPCIBusId_v4010 +#define PFN_cuIpcGetEventHandle PFN_cuIpcGetEventHandle_v4010 +#define PFN_cuIpcOpenEventHandle PFN_cuIpcOpenEventHandle_v4010 +#define PFN_cuIpcGetMemHandle PFN_cuIpcGetMemHandle_v4010 +#define PFN_cuIpcOpenMemHandle PFN_cuIpcOpenMemHandle_v11000 +#define PFN_cuIpcCloseMemHandle PFN_cuIpcCloseMemHandle_v4010 +#define PFN_cuMemHostRegister PFN_cuMemHostRegister_v6050 +#define PFN_cuMemHostUnregister PFN_cuMemHostUnregister_v4000 +#define PFN_cuMemcpy __API_TYPEDEF_PTDS(PFN_cuMemcpy, 4000, 7000) +#define PFN_cuMemcpyPeer __API_TYPEDEF_PTDS(PFN_cuMemcpyPeer, 4000, 7000) +#define PFN_cuMemcpyHtoD __API_TYPEDEF_PTDS(PFN_cuMemcpyHtoD, 3020, 7000) +#define PFN_cuMemcpyDtoH __API_TYPEDEF_PTDS(PFN_cuMemcpyDtoH, 3020, 7000) +#define PFN_cuMemcpyDtoD __API_TYPEDEF_PTDS(PFN_cuMemcpyDtoD, 3020, 7000) +#define PFN_cuMemcpyDtoA __API_TYPEDEF_PTDS(PFN_cuMemcpyDtoA, 3020, 7000) +#define PFN_cuMemcpyAtoD __API_TYPEDEF_PTDS(PFN_cuMemcpyAtoD, 3020, 7000) +#define PFN_cuMemcpyHtoA __API_TYPEDEF_PTDS(PFN_cuMemcpyHtoA, 3020, 7000) +#define PFN_cuMemcpyAtoH __API_TYPEDEF_PTDS(PFN_cuMemcpyAtoH, 3020, 7000) +#define PFN_cuMemcpyAtoA __API_TYPEDEF_PTDS(PFN_cuMemcpyAtoA, 3020, 7000) +#define PFN_cuMemcpy2D __API_TYPEDEF_PTDS(PFN_cuMemcpy2D, 3020, 7000) +#define PFN_cuMemcpy2DUnaligned __API_TYPEDEF_PTDS(PFN_cuMemcpy2DUnaligned, 3020, 7000) +#define PFN_cuMemcpy3D __API_TYPEDEF_PTDS(PFN_cuMemcpy3D, 3020, 7000) +#define PFN_cuMemcpy3DPeer __API_TYPEDEF_PTDS(PFN_cuMemcpy3DPeer, 4000, 7000) +#define PFN_cuMemcpyAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpyAsync, 4000, 7000) +#define PFN_cuMemcpyPeerAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpyPeerAsync, 4000, 7000) +#define PFN_cuMemcpyHtoDAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpyHtoDAsync, 3020, 7000) +#define PFN_cuMemcpyDtoHAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpyDtoHAsync, 3020, 7000) +#define PFN_cuMemcpyDtoDAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpyDtoDAsync, 3020, 7000) +#define PFN_cuMemcpyHtoAAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpyHtoAAsync, 3020, 7000) +#define PFN_cuMemcpyAtoHAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpyAtoHAsync, 3020, 7000) +#define PFN_cuMemcpy2DAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpy2DAsync, 3020, 7000) +#define PFN_cuMemcpy3DAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpy3DAsync, 3020, 7000) +#define PFN_cuMemcpy3DPeerAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpy3DPeerAsync, 4000, 7000) +#define PFN_cuMemsetD8 __API_TYPEDEF_PTDS(PFN_cuMemsetD8, 3020, 7000) +#define PFN_cuMemsetD16 __API_TYPEDEF_PTDS(PFN_cuMemsetD16, 3020, 7000) +#define PFN_cuMemsetD32 __API_TYPEDEF_PTDS(PFN_cuMemsetD32, 3020, 7000) +#define PFN_cuMemsetD2D8 __API_TYPEDEF_PTDS(PFN_cuMemsetD2D8, 3020, 7000) +#define PFN_cuMemsetD2D16 __API_TYPEDEF_PTDS(PFN_cuMemsetD2D16, 3020, 7000) +#define PFN_cuMemsetD2D32 __API_TYPEDEF_PTDS(PFN_cuMemsetD2D32, 3020, 7000) +#define PFN_cuMemsetD8Async __API_TYPEDEF_PTSZ(PFN_cuMemsetD8Async, 3020, 7000) +#define PFN_cuMemsetD16Async __API_TYPEDEF_PTSZ(PFN_cuMemsetD16Async, 3020, 7000) +#define PFN_cuMemsetD32Async __API_TYPEDEF_PTSZ(PFN_cuMemsetD32Async, 3020, 7000) +#define PFN_cuMemsetD2D8Async __API_TYPEDEF_PTSZ(PFN_cuMemsetD2D8Async, 3020, 7000) +#define PFN_cuMemsetD2D16Async __API_TYPEDEF_PTSZ(PFN_cuMemsetD2D16Async, 3020, 7000) +#define PFN_cuMemsetD2D32Async __API_TYPEDEF_PTSZ(PFN_cuMemsetD2D32Async, 3020, 7000) +#define PFN_cuArrayCreate PFN_cuArrayCreate_v3020 +#define PFN_cuArrayGetDescriptor PFN_cuArrayGetDescriptor_v3020 +#define PFN_cuArrayGetSparseProperties PFN_cuArrayGetSparseProperties_v11010 +#define PFN_cuMipmappedArrayGetSparseProperties PFN_cuMipmappedArrayGetSparseProperties_v11010 +#define PFN_cuArrayGetMemoryRequirements PFN_cuArrayGetMemoryRequirements_v11060 +#define PFN_cuMipmappedArrayGetMemoryRequirements PFN_cuMipmappedArrayGetMemoryRequirements_v11060 +#define PFN_cuArrayGetPlane PFN_cuArrayGetPlane_v11020 +#define PFN_cuArrayDestroy PFN_cuArrayDestroy_v2000 +#define PFN_cuArray3DCreate PFN_cuArray3DCreate_v3020 +#define PFN_cuArray3DGetDescriptor PFN_cuArray3DGetDescriptor_v3020 +#define PFN_cuMipmappedArrayCreate PFN_cuMipmappedArrayCreate_v5000 +#define PFN_cuMipmappedArrayGetLevel PFN_cuMipmappedArrayGetLevel_v5000 +#define PFN_cuMipmappedArrayDestroy PFN_cuMipmappedArrayDestroy_v5000 +#define PFN_cuMemAddressReserve PFN_cuMemAddressReserve_v10020 +#define PFN_cuMemAddressFree PFN_cuMemAddressFree_v10020 +#define PFN_cuMemCreate PFN_cuMemCreate_v10020 +#define PFN_cuMemRelease PFN_cuMemRelease_v10020 +#define PFN_cuMemMap PFN_cuMemMap_v10020 +#define PFN_cuMemMapArrayAsync __API_TYPEDEF_PTSZ(PFN_cuMemMapArrayAsync, 11010, 11010) +#define PFN_cuMemUnmap PFN_cuMemUnmap_v10020 +#define PFN_cuMemSetAccess PFN_cuMemSetAccess_v10020 +#define PFN_cuMemGetAccess PFN_cuMemGetAccess_v10020 +#define PFN_cuMemExportToShareableHandle PFN_cuMemExportToShareableHandle_v10020 +#define PFN_cuMemImportFromShareableHandle PFN_cuMemImportFromShareableHandle_v10020 +#define PFN_cuMemGetAllocationGranularity PFN_cuMemGetAllocationGranularity_v10020 +#define PFN_cuMemGetAllocationPropertiesFromHandle PFN_cuMemGetAllocationPropertiesFromHandle_v10020 +#define PFN_cuMemRetainAllocationHandle PFN_cuMemRetainAllocationHandle_v11000 +#define PFN_cuMemFreeAsync __API_TYPEDEF_PTSZ(PFN_cuMemFreeAsync, 11020, 11020) +#define PFN_cuMemAllocAsync __API_TYPEDEF_PTSZ(PFN_cuMemAllocAsync, 11020, 11020) +#define PFN_cuMemPoolTrimTo PFN_cuMemPoolTrimTo_v11020 +#define PFN_cuMemPoolSetAttribute PFN_cuMemPoolSetAttribute_v11020 +#define PFN_cuMemPoolGetAttribute PFN_cuMemPoolGetAttribute_v11020 +#define PFN_cuMemPoolSetAccess PFN_cuMemPoolSetAccess_v11020 +#define PFN_cuMemPoolGetAccess PFN_cuMemPoolGetAccess_v11020 +#define PFN_cuMemPoolCreate PFN_cuMemPoolCreate_v11020 +#define PFN_cuMemPoolDestroy PFN_cuMemPoolDestroy_v11020 +#define PFN_cuMemAllocFromPoolAsync __API_TYPEDEF_PTSZ(PFN_cuMemAllocFromPoolAsync, 11020, 11020) +#define PFN_cuMemPoolExportToShareableHandle PFN_cuMemPoolExportToShareableHandle_v11020 +#define PFN_cuMemPoolImportFromShareableHandle PFN_cuMemPoolImportFromShareableHandle_v11020 +#define PFN_cuMemPoolExportPointer PFN_cuMemPoolExportPointer_v11020 +#define PFN_cuMemPoolImportPointer PFN_cuMemPoolImportPointer_v11020 +#define PFN_cuPointerGetAttribute PFN_cuPointerGetAttribute_v4000 +#define PFN_cuMemPrefetchAsync __API_TYPEDEF_PTSZ(PFN_cuMemPrefetchAsync, 8000, 8000) +#define PFN_cuMemAdvise PFN_cuMemAdvise_v8000 +#define PFN_cuMemRangeGetAttribute PFN_cuMemRangeGetAttribute_v8000 +#define PFN_cuMemRangeGetAttributes PFN_cuMemRangeGetAttributes_v8000 +#define PFN_cuMulticastCreate PFN_cuMulticastCreate_v12010 +#define PFN_cuMulticastAddDevice PFN_cuMulticastAddDevice_v12010 +#define PFN_cuMulticastBindMem PFN_cuMulticastBindMem_v12010 +#define PFN_cuMulticastBindAddr PFN_cuMulticastBindAddr_v12010 +#define PFN_cuMulticastUnbind PFN_cuMulticastUnbind_v12010 +#define PFN_cuMulticastGetGranularity PFN_cuMulticastGetGranularity_v12010 +#define PFN_cuPointerSetAttribute PFN_cuPointerSetAttribute_v6000 +#define PFN_cuPointerGetAttributes PFN_cuPointerGetAttributes_v7000 +#define PFN_cuStreamCreate PFN_cuStreamCreate_v2000 +#define PFN_cuStreamCreateWithPriority PFN_cuStreamCreateWithPriority_v5050 +#define PFN_cuStreamGetId __API_TYPEDEF_PTSZ(PFN_cuStreamGetId_v12000, 12000, 12000) +#define PFN_cuStreamGetPriority __API_TYPEDEF_PTSZ(PFN_cuStreamGetPriority, 5050, 7000) +#define PFN_cuStreamGetFlags __API_TYPEDEF_PTSZ(PFN_cuStreamGetFlags, 5050, 7000) +#define PFN_cuStreamGetCtx __API_TYPEDEF_PTSZ(PFN_cuStreamGetCtx, 9020, 9020) +#define PFN_cuStreamWaitEvent __API_TYPEDEF_PTSZ(PFN_cuStreamWaitEvent, 3020, 7000) +#define PFN_cuStreamAddCallback __API_TYPEDEF_PTSZ(PFN_cuStreamAddCallback, 5000, 7000) +#define PFN_cuStreamBeginCapture __API_TYPEDEF_PTSZ(PFN_cuStreamBeginCapture, 10010, 10010) +#define PFN_cuThreadExchangeStreamCaptureMode PFN_cuThreadExchangeStreamCaptureMode_v10010 +#define PFN_cuStreamEndCapture __API_TYPEDEF_PTSZ(PFN_cuStreamEndCapture, 10000, 10000) +#define PFN_cuStreamIsCapturing __API_TYPEDEF_PTSZ(PFN_cuStreamIsCapturing, 10000, 10000) +#define PFN_cuStreamGetCaptureInfo __API_TYPEDEF_PTSZ(PFN_cuStreamGetCaptureInfo, 10010, 10010) +#define PFN_cuStreamGetCaptureInfo_v2 __API_TYPEDEF_PTSZ(PFN_cuStreamGetCaptureInfo, 11030, 11030) +#define PFN_cuStreamUpdateCaptureDependencies __API_TYPEDEF_PTSZ(PFN_cuStreamUpdateCaptureDependencies, 11030, 11030) +#define PFN_cuStreamAttachMemAsync __API_TYPEDEF_PTSZ(PFN_cuStreamAttachMemAsync, 6000, 7000) +#define PFN_cuStreamQuery __API_TYPEDEF_PTSZ(PFN_cuStreamQuery, 2000, 7000) +#define PFN_cuStreamSynchronize __API_TYPEDEF_PTSZ(PFN_cuStreamSynchronize, 2000, 7000) +#define PFN_cuStreamDestroy PFN_cuStreamDestroy_v4000 +#define PFN_cuStreamCopyAttributes __API_TYPEDEF_PTSZ(PFN_cuStreamCopyAttributes, 11000, 11000) +#define PFN_cuStreamGetAttribute __API_TYPEDEF_PTSZ(PFN_cuStreamGetAttribute, 11000, 11000) +#define PFN_cuStreamSetAttribute __API_TYPEDEF_PTSZ(PFN_cuStreamSetAttribute, 11000, 11000) +#define PFN_cuEventCreate PFN_cuEventCreate_v2000 +#define PFN_cuEventRecord __API_TYPEDEF_PTSZ(PFN_cuEventRecord, 2000, 7000) +#define PFN_cuEventRecordWithFlags __API_TYPEDEF_PTSZ(PFN_cuEventRecordWithFlags, 11010, 11010) +#define PFN_cuEventQuery PFN_cuEventQuery_v2000 +#define PFN_cuEventSynchronize PFN_cuEventSynchronize_v2000 +#define PFN_cuEventDestroy PFN_cuEventDestroy_v4000 +#define PFN_cuEventElapsedTime PFN_cuEventElapsedTime_v2000 +#define PFN_cuImportExternalMemory PFN_cuImportExternalMemory_v10000 +#define PFN_cuExternalMemoryGetMappedBuffer PFN_cuExternalMemoryGetMappedBuffer_v10000 +#define PFN_cuExternalMemoryGetMappedMipmappedArray PFN_cuExternalMemoryGetMappedMipmappedArray_v10000 +#define PFN_cuDestroyExternalMemory PFN_cuDestroyExternalMemory_v10000 +#define PFN_cuImportExternalSemaphore PFN_cuImportExternalSemaphore_v10000 +#define PFN_cuSignalExternalSemaphoresAsync __API_TYPEDEF_PTSZ(PFN_cuSignalExternalSemaphoresAsync, 10000, 10000) +#define PFN_cuWaitExternalSemaphoresAsync __API_TYPEDEF_PTSZ(PFN_cuWaitExternalSemaphoresAsync, 10000, 10000) +#define PFN_cuDestroyExternalSemaphore PFN_cuDestroyExternalSemaphore_v10000 +#define PFN_cuStreamWaitValue32 __API_TYPEDEF_PTSZ(PFN_cuStreamWaitValue32, 8000, 8000) +#define PFN_cuStreamWaitValue64 __API_TYPEDEF_PTSZ(PFN_cuStreamWaitValue64, 9000, 9000) +#define PFN_cuStreamWriteValue32 __API_TYPEDEF_PTSZ(PFN_cuStreamWriteValue32, 8000, 8000) +#define PFN_cuStreamWriteValue64 __API_TYPEDEF_PTSZ(PFN_cuStreamWriteValue64, 9000, 9000) +#define PFN_cuStreamBatchMemOp __API_TYPEDEF_PTSZ(PFN_cuStreamBatchMemOp, 8000, 8000) +#define PFN_cuStreamWaitValue32_v2 __API_TYPEDEF_PTSZ(PFN_cuStreamWaitValue32, 11070, 11070) +#define PFN_cuStreamWaitValue64_v2 __API_TYPEDEF_PTSZ(PFN_cuStreamWaitValue64, 11070, 11070) +#define PFN_cuStreamWriteValue32_v2 __API_TYPEDEF_PTSZ(PFN_cuStreamWriteValue32, 11070, 11070) +#define PFN_cuStreamWriteValue64_v2 __API_TYPEDEF_PTSZ(PFN_cuStreamWriteValue64, 11070, 11070) +#define PFN_cuStreamBatchMemOp_v2 __API_TYPEDEF_PTSZ(PFN_cuStreamBatchMemOp, 11070, 11070) +#define PFN_cuFuncGetAttribute PFN_cuFuncGetAttribute_v2020 +#define PFN_cuFuncSetAttribute PFN_cuFuncSetAttribute_v9000 +#define PFN_cuFuncSetCacheConfig PFN_cuFuncSetCacheConfig_v3000 +#define PFN_cuFuncSetSharedMemConfig PFN_cuFuncSetSharedMemConfig_v4020 +#define PFN_cuLaunchKernel __API_TYPEDEF_PTSZ(PFN_cuLaunchKernel, 4000, 7000) +#define PFN_cuLaunchKernelEx __API_TYPEDEF_PTSZ(PFN_cuLaunchKernelEx, 11060, 11060) +#define PFN_cuLaunchCooperativeKernel __API_TYPEDEF_PTSZ(PFN_cuLaunchCooperativeKernel, 9000, 9000) +#define PFN_cuLaunchCooperativeKernelMultiDevice PFN_cuLaunchCooperativeKernelMultiDevice_v9000 +#define PFN_cuLaunchHostFunc __API_TYPEDEF_PTSZ(PFN_cuLaunchHostFunc, 10000, 10000) +#define PFN_cuFuncSetBlockShape PFN_cuFuncSetBlockShape_v2000 +#define PFN_cuFuncSetSharedSize PFN_cuFuncSetSharedSize_v2000 +#define PFN_cuParamSetSize PFN_cuParamSetSize_v2000 +#define PFN_cuParamSeti PFN_cuParamSeti_v2000 +#define PFN_cuParamSetf PFN_cuParamSetf_v2000 +#define PFN_cuParamSetv PFN_cuParamSetv_v2000 +#define PFN_cuLaunch PFN_cuLaunch_v2000 +#define PFN_cuLaunchGrid PFN_cuLaunchGrid_v2000 +#define PFN_cuLaunchGridAsync PFN_cuLaunchGridAsync_v2000 +#define PFN_cuParamSetTexRef PFN_cuParamSetTexRef_v2000 +#define PFN_cuGraphCreate PFN_cuGraphCreate_v10000 +#define PFN_cuGraphAddKernelNode PFN_cuGraphAddKernelNode_v12000 +#define PFN_cuGraphKernelNodeGetParams PFN_cuGraphKernelNodeGetParams_v12000 +#define PFN_cuGraphKernelNodeSetParams PFN_cuGraphKernelNodeSetParams_v12000 +#define PFN_cuGraphAddMemcpyNode PFN_cuGraphAddMemcpyNode_v10000 +#define PFN_cuGraphMemcpyNodeGetParams PFN_cuGraphMemcpyNodeGetParams_v10000 +#define PFN_cuGraphMemcpyNodeSetParams PFN_cuGraphMemcpyNodeSetParams_v10000 +#define PFN_cuGraphAddMemsetNode PFN_cuGraphAddMemsetNode_v10000 +#define PFN_cuGraphMemsetNodeGetParams PFN_cuGraphMemsetNodeGetParams_v10000 +#define PFN_cuGraphMemsetNodeSetParams PFN_cuGraphMemsetNodeSetParams_v10000 +#define PFN_cuGraphAddHostNode PFN_cuGraphAddHostNode_v10000 +#define PFN_cuGraphHostNodeGetParams PFN_cuGraphHostNodeGetParams_v10000 +#define PFN_cuGraphHostNodeSetParams PFN_cuGraphHostNodeSetParams_v10000 +#define PFN_cuGraphAddChildGraphNode PFN_cuGraphAddChildGraphNode_v10000 +#define PFN_cuGraphChildGraphNodeGetGraph PFN_cuGraphChildGraphNodeGetGraph_v10000 +#define PFN_cuGraphAddEmptyNode PFN_cuGraphAddEmptyNode_v10000 +#define PFN_cuGraphAddEventRecordNode PFN_cuGraphAddEventRecordNode_v11010 +#define PFN_cuGraphEventRecordNodeGetEvent PFN_cuGraphEventRecordNodeGetEvent_v11010 +#define PFN_cuGraphEventRecordNodeSetEvent PFN_cuGraphEventRecordNodeSetEvent_v11010 +#define PFN_cuGraphAddEventWaitNode PFN_cuGraphAddEventWaitNode_v11010 +#define PFN_cuGraphEventWaitNodeGetEvent PFN_cuGraphEventWaitNodeGetEvent_v11010 +#define PFN_cuGraphEventWaitNodeSetEvent PFN_cuGraphEventWaitNodeSetEvent_v11010 +#define PFN_cuGraphAddExternalSemaphoresSignalNode PFN_cuGraphAddExternalSemaphoresSignalNode_v11020 +#define PFN_cuGraphExternalSemaphoresSignalNodeGetParams PFN_cuGraphExternalSemaphoresSignalNodeGetParams_v11020 +#define PFN_cuGraphExternalSemaphoresSignalNodeSetParams PFN_cuGraphExternalSemaphoresSignalNodeSetParams_v11020 +#define PFN_cuGraphAddExternalSemaphoresWaitNode PFN_cuGraphAddExternalSemaphoresWaitNode_v11020 +#define PFN_cuGraphExternalSemaphoresWaitNodeGetParams PFN_cuGraphExternalSemaphoresWaitNodeGetParams_v11020 +#define PFN_cuGraphExternalSemaphoresWaitNodeSetParams PFN_cuGraphExternalSemaphoresWaitNodeSetParams_v11020 +#define PFN_cuGraphAddBatchMemOpNode PFN_cuGraphAddBatchMemOpNode_v11070 +#define PFN_cuGraphBatchMemOpNodeGetParams PFN_cuGraphBatchMemOpNodeGetParams_v11070 +#define PFN_cuGraphBatchMemOpNodeSetParams PFN_cuGraphBatchMemOpNodeSetParams _v11070 +#define PFN_cuGraphExecBatchMemOpNodeSetParams PFN_cuGraphExecBatchMemOpNodeSetParams_v11070 +#define PFN_cuGraphClone PFN_cuGraphClone_v10000 +#define PFN_cuGraphNodeFindInClone PFN_cuGraphNodeFindInClone_v10000 +#define PFN_cuGraphNodeGetType PFN_cuGraphNodeGetType_v10000 +#define PFN_cuGraphGetNodes PFN_cuGraphGetNodes_v10000 +#define PFN_cuGraphGetRootNodes PFN_cuGraphGetRootNodes_v10000 +#define PFN_cuGraphGetEdges PFN_cuGraphGetEdges_v10000 +#define PFN_cuGraphNodeGetDependencies PFN_cuGraphNodeGetDependencies_v10000 +#define PFN_cuGraphNodeGetDependentNodes PFN_cuGraphNodeGetDependentNodes_v10000 +#define PFN_cuGraphAddDependencies PFN_cuGraphAddDependencies_v10000 +#define PFN_cuGraphRemoveDependencies PFN_cuGraphRemoveDependencies_v10000 +#define PFN_cuGraphDestroyNode PFN_cuGraphDestroyNode_v10000 + +#define PFN_cuGraphInstantiate PFN_cuGraphInstantiateWithFlags_v11040 + +#define PFN_cuGraphInstantiateWithFlags PFN_cuGraphInstantiateWithFlags_v11040 +#define PFN_cuGraphInstantiateWithParams __API_TYPEDEF_PTSZ(PFN_cuGraphInstantiateWithParams, 12000, 12000) +#define PFN_cuGraphExecGetFlags PFN_cuGraphExecGetFlags_v12000 +#define PFN_cuGraphExecKernelNodeSetParams PFN_cuGraphExecKernelNodeSetParams_v12000 +#define PFN_cuGraphExecMemcpyNodeSetParams PFN_cuGraphExecMemcpyNodeSetParams_v10020 +#define PFN_cuGraphExecMemsetNodeSetParams PFN_cuGraphExecMemsetNodeSetParams_v10020 +#define PFN_cuGraphExecHostNodeSetParams PFN_cuGraphExecHostNodeSetParams_v10020 +#define PFN_cuGraphExecChildGraphNodeSetParams PFN_cuGraphExecChildGraphNodeSetParams_v11010 +#define PFN_cuGraphExecEventRecordNodeSetEvent PFN_cuGraphExecEventRecordNodeSetEvent_v11010 +#define PFN_cuGraphExecEventWaitNodeSetEvent PFN_cuGraphExecEventWaitNodeSetEvent_v11010 +#define PFN_cuGraphExecExternalSemaphoresSignalNodeSetParams PFN_cuGraphExecExternalSemaphoresSignalNodeSetParams_v11020 +#define PFN_cuGraphExecExternalSemaphoresWaitNodeSetParams PFN_cuGraphExecExternalSemaphoresWaitNodeSetParams_v11020 +#define PFN_cuGraphUpload __API_TYPEDEF_PTSZ(PFN_cuGraphUpload, 11010, 11010) +#define PFN_cuGraphLaunch __API_TYPEDEF_PTSZ(PFN_cuGraphLaunch, 10000, 10000) +#define PFN_cuGraphExecDestroy PFN_cuGraphExecDestroy_v10000 +#define PFN_cuGraphDestroy PFN_cuGraphDestroy_v10000 +#define PFN_cuGraphExecUpdate PFN_cuGraphExecUpdate_v12000 +#define PFN_cuGraphKernelNodeCopyAttributes PFN_cuGraphKernelNodeCopyAttributes_v11000 +#define PFN_cuGraphKernelNodeGetAttribute PFN_cuGraphKernelNodeGetAttribute_v11000 +#define PFN_cuGraphKernelNodeSetAttribute PFN_cuGraphKernelNodeSetAttribute_v11000 +#define PFN_cuGraphDebugDotPrint PFN_cuGraphDebugDotPrint_v11030 +#define PFN_cuGraphAddMemAllocNode PFN_cuGraphAddMemAllocNode_v11040 +#define PFN_cuGraphMemAllocNodeGetParams PFN_cuGraphMemAllocNodeGetParams_v11040 +#define PFN_cuGraphAddMemFreeNode PFN_cuGraphAddMemFreeNode_v11040 +#define PFN_cuGraphMemFreeNodeGetParams PFN_cuGraphMemFreeNodeGetParams_v11040 +#define PFN_cuGraphNodeSetEnabled PFN_cuGraphNodeSetEnabled_v11060 +#define PFN_cuGraphNodeGetEnabled PFN_cuGraphNodeGetEnabled_v11060 +#define PFN_cuDeviceGraphMemTrim PFN_cuDeviceGraphMemTrim_v11040 +#define PFN_cuDeviceGetGraphMemAttribute PFN_cuDeviceGetGraphMemAttribute_v11040 +#define PFN_cuDeviceSetGraphMemAttribute PFN_cuDeviceSetGraphMemAttribute_v11040 +#define PFN_cuOccupancyMaxActiveBlocksPerMultiprocessor PFN_cuOccupancyMaxActiveBlocksPerMultiprocessor_v6050 +#define PFN_cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags PFN_cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000 +#define PFN_cuOccupancyMaxPotentialBlockSize PFN_cuOccupancyMaxPotentialBlockSize_v6050 +#define PFN_cuOccupancyMaxPotentialBlockSizeWithFlags PFN_cuOccupancyMaxPotentialBlockSizeWithFlags_v7000 +#define PFN_cuOccupancyAvailableDynamicSMemPerBlock PFN_cuOccupancyAvailableDynamicSMemPerBlock_v10020 +#define PFN_cuOccupancyMaxPotentialClusterSize PFN_cuOccupancyMaxPotentialClusterSize_v11070 +#define PFN_cuOccupancyMaxActiveClusters PFN_cuOccupancyMaxActiveClusters_v11070 +#define PFN_cuTexRefSetArray PFN_cuTexRefSetArray_v2000 +#define PFN_cuTexRefSetMipmappedArray PFN_cuTexRefSetMipmappedArray_v5000 +#define PFN_cuTexRefSetAddress PFN_cuTexRefSetAddress_v3020 +#define PFN_cuTexRefSetAddress2D PFN_cuTexRefSetAddress2D_v4010 +#define PFN_cuTexRefSetFormat PFN_cuTexRefSetFormat_v2000 +#define PFN_cuTexRefSetAddressMode PFN_cuTexRefSetAddressMode_v2000 +#define PFN_cuTexRefSetFilterMode PFN_cuTexRefSetFilterMode_v2000 +#define PFN_cuTexRefSetMipmapFilterMode PFN_cuTexRefSetMipmapFilterMode_v5000 +#define PFN_cuTexRefSetMipmapLevelBias PFN_cuTexRefSetMipmapLevelBias_v5000 +#define PFN_cuTexRefSetMipmapLevelClamp PFN_cuTexRefSetMipmapLevelClamp_v5000 +#define PFN_cuTexRefSetMaxAnisotropy PFN_cuTexRefSetMaxAnisotropy_v5000 +#define PFN_cuTexRefSetBorderColor PFN_cuTexRefSetBorderColor_v8000 +#define PFN_cuTexRefSetFlags PFN_cuTexRefSetFlags_v2000 +#define PFN_cuTexRefGetAddress PFN_cuTexRefGetAddress_v3020 +#define PFN_cuTexRefGetArray PFN_cuTexRefGetArray_v2000 +#define PFN_cuTexRefGetMipmappedArray PFN_cuTexRefGetMipmappedArray_v5000 +#define PFN_cuTexRefGetAddressMode PFN_cuTexRefGetAddressMode_v2000 +#define PFN_cuTexRefGetFilterMode PFN_cuTexRefGetFilterMode_v2000 +#define PFN_cuTexRefGetFormat PFN_cuTexRefGetFormat_v2000 +#define PFN_cuTexRefGetMipmapFilterMode PFN_cuTexRefGetMipmapFilterMode_v5000 +#define PFN_cuTexRefGetMipmapLevelBias PFN_cuTexRefGetMipmapLevelBias_v5000 +#define PFN_cuTexRefGetMipmapLevelClamp PFN_cuTexRefGetMipmapLevelClamp_v5000 +#define PFN_cuTexRefGetMaxAnisotropy PFN_cuTexRefGetMaxAnisotropy_v5000 +#define PFN_cuTexRefGetBorderColor PFN_cuTexRefGetBorderColor_v8000 +#define PFN_cuTexRefGetFlags PFN_cuTexRefGetFlags_v2000 +#define PFN_cuTexRefCreate PFN_cuTexRefCreate_v2000 +#define PFN_cuTexRefDestroy PFN_cuTexRefDestroy_v2000 +#define PFN_cuSurfRefSetArray PFN_cuSurfRefSetArray_v3000 +#define PFN_cuSurfRefGetArray PFN_cuSurfRefGetArray_v3000 +#define PFN_cuTexObjectCreate PFN_cuTexObjectCreate_v5000 +#define PFN_cuTexObjectDestroy PFN_cuTexObjectDestroy_v5000 +#define PFN_cuTexObjectGetResourceDesc PFN_cuTexObjectGetResourceDesc_v5000 +#define PFN_cuTexObjectGetTextureDesc PFN_cuTexObjectGetTextureDesc_v5000 +#define PFN_cuTexObjectGetResourceViewDesc PFN_cuTexObjectGetResourceViewDesc_v5000 +#define PFN_cuSurfObjectCreate PFN_cuSurfObjectCreate_v5000 +#define PFN_cuSurfObjectDestroy PFN_cuSurfObjectDestroy_v5000 +#define PFN_cuSurfObjectGetResourceDesc PFN_cuSurfObjectGetResourceDesc_v5000 +#define PFN_cuTensorMapEncodeTiled PFN_cuTensorMapEncodeTiled_v12000 +#define PFN_cuTensorMapEncodeIm2col PFN_cuTensorMapEncodeIm2col_v12000 +#define PFN_cuTensorMapReplaceAddress PFN_cuTensorMapReplaceAddress_v12000 +#define PFN_cuDeviceCanAccessPeer PFN_cuDeviceCanAccessPeer_v4000 +#define PFN_cuCtxEnablePeerAccess PFN_cuCtxEnablePeerAccess_v4000 +#define PFN_cuCtxDisablePeerAccess PFN_cuCtxDisablePeerAccess_v4000 +#define PFN_cuDeviceGetP2PAttribute PFN_cuDeviceGetP2PAttribute_v8000 +#define PFN_cuGraphicsUnregisterResource PFN_cuGraphicsUnregisterResource_v3000 +#define PFN_cuGraphicsSubResourceGetMappedArray PFN_cuGraphicsSubResourceGetMappedArray_v3000 +#define PFN_cuGraphicsResourceGetMappedMipmappedArray PFN_cuGraphicsResourceGetMappedMipmappedArray_v5000 +#define PFN_cuGraphicsResourceGetMappedPointer PFN_cuGraphicsResourceGetMappedPointer_v3020 +#define PFN_cuGraphicsResourceSetMapFlags PFN_cuGraphicsResourceSetMapFlags_v6050 +#define PFN_cuGraphicsMapResources __API_TYPEDEF_PTSZ(PFN_cuGraphicsMapResources, 3000, 7000) +#define PFN_cuGraphicsUnmapResources __API_TYPEDEF_PTSZ(PFN_cuGraphicsUnmapResources, 3000, 7000) +#define PFN_cuGetExportTable PFN_cuGetExportTable_v3000 +#define PFN_cuFuncGetModule PFN_cuFuncGetModule_v11000 +#define PFN_cuFlushGPUDirectRDMAWrites PFN_cuFlushGPUDirectRDMAWrites_v11030 +#define PFN_cuGetProcAddress PFN_cuGetProcAddress_v12000 +#define PFN_cuUserObjectCreate PFN_cuUserObjectCreate_v11030 +#define PFN_cuUserObjectRetain PFN_cuUserObjectRetain_v11030 +#define PFN_cuUserObjectRelease PFN_cuUserObjectRelease_v11030 +#define PFN_cuGraphRetainUserObject PFN_cuGraphRetainUserObject_v11030 +#define PFN_cuGraphReleaseUserObject PFN_cuGraphReleaseUserObject_v11030 +#define PFN_cuModuleGetLoadingMode PFN_cuModuleGetLoadingMode_v11070 +#define PFN_cuMemGetHandleForAddressRange PFN_cuMemGetHandleForAddressRange_v11070 +#define PFN_cuLibraryLoadData PFN_cuLibraryLoadData_v12000 +#define PFN_cuLibraryLoadFromFile PFN_cuLibraryLoadFromFile_v12000 +#define PFN_cuLibraryUnload PFN_cuLibraryUnload_v12000 +#define PFN_cuLibraryGetKernel PFN_cuLibraryGetKernel_v12000 +#define PFN_cuLibraryGetModule PFN_cuLibraryGetModule_v12000 +#define PFN_cuKernelGetFunction PFN_cuKernelGetFunction_v12000 +#define PFN_cuLibraryGetGlobal PFN_cuLibraryGetGlobal_v12000 +#define PFN_cuLibraryGetManaged PFN_cuLibraryGetManaged_v12000 +#define PFN_cuKernelGetAttribute PFN_cuKernelGetAttribute_v12000 +#define PFN_cuKernelSetAttribute PFN_cuKernelSetAttribute_v12000 +#define PFN_cuKernelSetCacheConfig PFN_cuKernelSetCacheConfig_v12000 +#define PFN_cuLibraryGetUnifiedFunction PFN_cuLibraryGetUnifiedFunction_v12000 +#define PFN_cuCoredumpGetAttribute PFN_cuCoredumpGetAttribute_v12010 +#define PFN_cuCoredumpGetAttributeGlobal PFN_cuCoredumpGetAttributeGlobal_v12010 +#define PFN_cuCoredumpSetAttribute PFN_cuCoredumpSetAttribute_v12010 +#define PFN_cuCoredumpSetAttributeGlobal PFN_cuCoredumpSetAttributeGlobal_v12010 + + +/* + * Type definitions for functions defined in cuda.h + */ +typedef CUresult (CUDAAPI *PFN_cuGetErrorString_v6000)(CUresult error, const char **pStr); +typedef CUresult (CUDAAPI *PFN_cuGetErrorName_v6000)(CUresult error, const char **pStr); +typedef CUresult (CUDAAPI *PFN_cuInit_v2000)(unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuDriverGetVersion_v2020)(int *driverVersion); +typedef CUresult (CUDAAPI *PFN_cuDeviceGet_v2000)(CUdevice_v1 *device, int ordinal); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetCount_v2000)(int *count); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetName_v2000)(char *name, int len, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetUuid_v9020)(CUuuid *uuid, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetUuid_v11040)(CUuuid *uuid, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetLuid_v10000)(char *luid, unsigned int *deviceNodeMask, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDeviceTotalMem_v3020)(size_t *bytes, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetTexture1DLinearMaxWidth_v11010)(size_t *maxWidthInElements, CUarray_format format, unsigned numChannels, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetAttribute_v2000)(int *pi, CUdevice_attribute attrib, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetNvSciSyncAttributes_v10020)(void *nvSciSyncAttrList, CUdevice_v1 dev, int flags); +typedef CUresult (CUDAAPI *PFN_cuDeviceSetMemPool_v11020)(CUdevice_v1 dev, CUmemoryPool pool); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetMemPool_v11020)(CUmemoryPool *pool, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetDefaultMemPool_v11020)(CUmemoryPool *pool_out, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetProperties_v2000)(CUdevprop_v1 *prop, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDeviceComputeCapability_v2000)(int *major, int *minor, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDevicePrimaryCtxRetain_v7000)(CUcontext *pctx, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDevicePrimaryCtxRelease_v11000)(CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDevicePrimaryCtxSetFlags_v11000)(CUdevice_v1 dev, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuDevicePrimaryCtxGetState_v7000)(CUdevice_v1 dev, unsigned int *flags, int *active); +typedef CUresult (CUDAAPI *PFN_cuDevicePrimaryCtxReset_v11000)(CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetExecAffinitySupport_v11040)(int *pi, CUexecAffinityType type, CUdevice dev); +typedef CUresult (CUDAAPI *PFN_cuCtxCreate_v3020)(CUcontext *pctx, unsigned int flags, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuCtxCreate_v11040)(CUcontext *pctx, CUexecAffinityParam *paramsArray, int numParams, unsigned int flags, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuCtxGetId_v12000)(CUcontext ctx, unsigned long long *ctxId); +typedef CUresult (CUDAAPI *PFN_cuCtxDestroy_v4000)(CUcontext ctx); +typedef CUresult (CUDAAPI *PFN_cuCtxPushCurrent_v4000)(CUcontext ctx); +typedef CUresult (CUDAAPI *PFN_cuCtxPopCurrent_v4000)(CUcontext *pctx); +typedef CUresult (CUDAAPI *PFN_cuCtxSetCurrent_v4000)(CUcontext ctx); +typedef CUresult (CUDAAPI *PFN_cuCtxGetCurrent_v4000)(CUcontext *pctx); +typedef CUresult (CUDAAPI *PFN_cuCtxGetDevice_v2000)(CUdevice_v1 *device); +typedef CUresult (CUDAAPI *PFN_cuCtxGetFlags_v7000)(unsigned int *flags); +typedef CUresult (CUDAAPI *PFN_cuCtxSetFlags_v12010)(unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuCtxSynchronize_v2000)(void); +typedef CUresult (CUDAAPI *PFN_cuCtxSetLimit_v3010)(CUlimit limit, size_t value); +typedef CUresult (CUDAAPI *PFN_cuCtxGetLimit_v3010)(size_t *pvalue, CUlimit limit); +typedef CUresult (CUDAAPI *PFN_cuCtxGetCacheConfig_v3020)(CUfunc_cache *pconfig); +typedef CUresult (CUDAAPI *PFN_cuCtxSetCacheConfig_v3020)(CUfunc_cache config); +typedef CUresult (CUDAAPI *PFN_cuCtxGetSharedMemConfig_v4020)(CUsharedconfig *pConfig); +typedef CUresult (CUDAAPI *PFN_cuCtxSetSharedMemConfig_v4020)(CUsharedconfig config); +typedef CUresult (CUDAAPI *PFN_cuCtxGetApiVersion_v3020)(CUcontext ctx, unsigned int *version); +typedef CUresult (CUDAAPI *PFN_cuCtxGetStreamPriorityRange_v5050)(int *leastPriority, int *greatestPriority); +typedef CUresult (CUDAAPI *PFN_cuCtxResetPersistingL2Cache_v11000)(void); +typedef CUresult (CUDAAPI *PFN_cuCtxAttach_v2000)(CUcontext *pctx, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuCtxDetach_v2000)(CUcontext ctx); +typedef CUresult (CUDAAPI *PFN_cuCtxGetExecAffinity_v11040)(CUexecAffinityParam *pExecAffinity, CUexecAffinityType type); +typedef CUresult (CUDAAPI *PFN_cuModuleLoad_v2000)(CUmodule *module, const char *fname); +typedef CUresult (CUDAAPI *PFN_cuModuleLoadData_v2000)(CUmodule *module, const void *image); +typedef CUresult (CUDAAPI *PFN_cuModuleLoadDataEx_v2010)(CUmodule *module, const void *image, unsigned int numOptions, CUjit_option *options, void **optionValues); +typedef CUresult (CUDAAPI *PFN_cuModuleLoadFatBinary_v2000)(CUmodule *module, const void *fatCubin); +typedef CUresult (CUDAAPI *PFN_cuModuleUnload_v2000)(CUmodule hmod); +typedef CUresult (CUDAAPI *PFN_cuModuleGetFunction_v2000)(CUfunction *hfunc, CUmodule hmod, const char *name); +typedef CUresult (CUDAAPI *PFN_cuModuleGetGlobal_v3020)(CUdeviceptr_v2 *dptr, size_t *bytes, CUmodule hmod, const char *name); +typedef CUresult (CUDAAPI *PFN_cuModuleGetTexRef_v2000)(CUtexref *pTexRef, CUmodule hmod, const char *name); +typedef CUresult (CUDAAPI *PFN_cuModuleGetSurfRef_v3000)(CUsurfref *pSurfRef, CUmodule hmod, const char *name); +typedef CUresult (CUDAAPI *PFN_cuLinkCreate_v6050)(unsigned int numOptions, CUjit_option *options, void **optionValues, CUlinkState *stateOut); +typedef CUresult (CUDAAPI *PFN_cuLinkAddData_v6050)(CUlinkState state, CUjitInputType type, void *data, size_t size, const char *name, unsigned int numOptions, CUjit_option *options, void **optionValues); +typedef CUresult (CUDAAPI *PFN_cuLinkAddFile_v6050)(CUlinkState state, CUjitInputType type, const char *path, unsigned int numOptions, CUjit_option *options, void **optionValues); +typedef CUresult (CUDAAPI *PFN_cuLinkComplete_v5050)(CUlinkState state, void **cubinOut, size_t *sizeOut); +typedef CUresult (CUDAAPI *PFN_cuLinkDestroy_v5050)(CUlinkState state); +typedef CUresult (CUDAAPI *PFN_cuMemGetInfo_v3020)(size_t *free, size_t *total); +typedef CUresult (CUDAAPI *PFN_cuMemAlloc_v3020)(CUdeviceptr_v2 *dptr, size_t bytesize); +typedef CUresult (CUDAAPI *PFN_cuMemAllocPitch_v3020)(CUdeviceptr_v2 *dptr, size_t *pPitch, size_t WidthInBytes, size_t Height, unsigned int ElementSizeBytes); +typedef CUresult (CUDAAPI *PFN_cuMemFree_v3020)(CUdeviceptr_v2 dptr); +typedef CUresult (CUDAAPI *PFN_cuMemGetAddressRange_v3020)(CUdeviceptr_v2 *pbase, size_t *psize, CUdeviceptr_v2 dptr); +typedef CUresult (CUDAAPI *PFN_cuMemAllocHost_v3020)(void **pp, size_t bytesize); +typedef CUresult (CUDAAPI *PFN_cuMemFreeHost_v2000)(void *p); +typedef CUresult (CUDAAPI *PFN_cuMemHostAlloc_v2020)(void **pp, size_t bytesize, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuMemHostGetDevicePointer_v3020)(CUdeviceptr_v2 *pdptr, void *p, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuMemHostGetFlags_v2030)(unsigned int *pFlags, void *p); +typedef CUresult (CUDAAPI *PFN_cuMemAllocManaged_v6000)(CUdeviceptr_v2 *dptr, size_t bytesize, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetByPCIBusId_v4010)(CUdevice_v1 *dev, const char *pciBusId); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetPCIBusId_v4010)(char *pciBusId, int len, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuIpcGetEventHandle_v4010)(CUipcEventHandle_v1 *pHandle, CUevent event); +typedef CUresult (CUDAAPI *PFN_cuIpcOpenEventHandle_v4010)(CUevent *phEvent, CUipcEventHandle_v1 handle); +typedef CUresult (CUDAAPI *PFN_cuIpcGetMemHandle_v4010)(CUipcMemHandle_v1 *pHandle, CUdeviceptr_v2 dptr); +typedef CUresult (CUDAAPI *PFN_cuIpcOpenMemHandle_v11000)(CUdeviceptr_v2 *pdptr, CUipcMemHandle_v1 handle, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuIpcCloseMemHandle_v4010)(CUdeviceptr_v2 dptr); +typedef CUresult (CUDAAPI *PFN_cuMemHostRegister_v6050)(void *p, size_t bytesize, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuMemHostUnregister_v4000)(void *p); +typedef CUresult (CUDAAPI *PFN_cuMemcpy_v7000_ptds)(CUdeviceptr_v2 dst, CUdeviceptr_v2 src, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyPeer_v7000_ptds)(CUdeviceptr_v2 dstDevice, CUcontext dstContext, CUdeviceptr_v2 srcDevice, CUcontext srcContext, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoD_v7000_ptds)(CUdeviceptr_v2 dstDevice, const void *srcHost, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoH_v7000_ptds)(void *dstHost, CUdeviceptr_v2 srcDevice, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoD_v7000_ptds)(CUdeviceptr_v2 dstDevice, CUdeviceptr_v2 srcDevice, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoA_v7000_ptds)(CUarray dstArray, size_t dstOffset, CUdeviceptr_v2 srcDevice, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoD_v7000_ptds)(CUdeviceptr_v2 dstDevice, CUarray srcArray, size_t srcOffset, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoA_v7000_ptds)(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoH_v7000_ptds)(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoA_v7000_ptds)(CUarray dstArray, size_t dstOffset, CUarray srcArray, size_t srcOffset, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpy2D_v7000_ptds)(const CUDA_MEMCPY2D_v2 *pCopy); +typedef CUresult (CUDAAPI *PFN_cuMemcpy2DUnaligned_v7000_ptds)(const CUDA_MEMCPY2D_v2 *pCopy); +typedef CUresult (CUDAAPI *PFN_cuMemcpy3D_v7000_ptds)(const CUDA_MEMCPY3D_v2 *pCopy); +typedef CUresult (CUDAAPI *PFN_cuMemcpy3DPeer_v7000_ptds)(const CUDA_MEMCPY3D_PEER_v1 *pCopy); +typedef CUresult (CUDAAPI *PFN_cuMemcpyAsync_v7000_ptsz)(CUdeviceptr_v2 dst, CUdeviceptr_v2 src, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpyPeerAsync_v7000_ptsz)(CUdeviceptr_v2 dstDevice, CUcontext dstContext, CUdeviceptr_v2 srcDevice, CUcontext srcContext, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoDAsync_v7000_ptsz)(CUdeviceptr_v2 dstDevice, const void *srcHost, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoHAsync_v7000_ptsz)(void *dstHost, CUdeviceptr_v2 srcDevice, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoDAsync_v7000_ptsz)(CUdeviceptr_v2 dstDevice, CUdeviceptr_v2 srcDevice, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoAAsync_v7000_ptsz)(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoHAsync_v7000_ptsz)(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpy2DAsync_v7000_ptsz)(const CUDA_MEMCPY2D_v2 *pCopy, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpy3DAsync_v7000_ptsz)(const CUDA_MEMCPY3D_v2 *pCopy, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpy3DPeerAsync_v7000_ptsz)(const CUDA_MEMCPY3D_PEER_v1 *pCopy, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD8_v7000_ptds)(CUdeviceptr_v2 dstDevice, unsigned char uc, size_t N); +typedef CUresult (CUDAAPI *PFN_cuMemsetD16_v7000_ptds)(CUdeviceptr_v2 dstDevice, unsigned short us, size_t N); +typedef CUresult (CUDAAPI *PFN_cuMemsetD32_v7000_ptds)(CUdeviceptr_v2 dstDevice, unsigned int ui, size_t N); +typedef CUresult (CUDAAPI *PFN_cuMemsetD2D8_v7000_ptds)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height); +typedef CUresult (CUDAAPI *PFN_cuMemsetD2D16_v7000_ptds)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height); +typedef CUresult (CUDAAPI *PFN_cuMemsetD2D32_v7000_ptds)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height); +typedef CUresult (CUDAAPI *PFN_cuMemsetD8Async_v7000_ptsz)(CUdeviceptr_v2 dstDevice, unsigned char uc, size_t N, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD16Async_v7000_ptsz)(CUdeviceptr_v2 dstDevice, unsigned short us, size_t N, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD32Async_v7000_ptsz)(CUdeviceptr_v2 dstDevice, unsigned int ui, size_t N, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD2D8Async_v7000_ptsz)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD2D16Async_v7000_ptsz)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD2D32Async_v7000_ptsz)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuArrayCreate_v3020)(CUarray *pHandle, const CUDA_ARRAY_DESCRIPTOR_v2 *pAllocateArray); +typedef CUresult (CUDAAPI *PFN_cuArrayGetDescriptor_v3020)(CUDA_ARRAY_DESCRIPTOR_v2 *pArrayDescriptor, CUarray hArray); +typedef CUresult (CUDAAPI *PFN_cuArrayGetSparseProperties_v11010)(CUDA_ARRAY_SPARSE_PROPERTIES_v1 *sparseProperties, CUarray array); +typedef CUresult (CUDAAPI *PFN_cuMipmappedArrayGetSparseProperties_v11010)(CUDA_ARRAY_SPARSE_PROPERTIES_v1 *sparseProperties, CUmipmappedArray mipmap); +typedef CUresult (CUDAAPI *PFN_cuArrayGetMemoryRequirements_v11060)(CUDA_ARRAY_MEMORY_REQUIREMENTS_v1 *memoryRequirements, CUarray array, CUdevice device); +typedef CUresult (CUDAAPI *PFN_cuMipmappedArrayGetMemoryRequirements_v11060)(CUDA_ARRAY_MEMORY_REQUIREMENTS_v1 *memoryRequirements, CUmipmappedArray mipmap, CUdevice device); +typedef CUresult (CUDAAPI *PFN_cuArrayGetPlane_v11020)(CUarray *pPlaneArray, CUarray hArray, unsigned int planeIdx); +typedef CUresult (CUDAAPI *PFN_cuArrayDestroy_v2000)(CUarray hArray); +typedef CUresult (CUDAAPI *PFN_cuArray3DCreate_v3020)(CUarray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR_v2 *pAllocateArray); +typedef CUresult (CUDAAPI *PFN_cuArray3DGetDescriptor_v3020)(CUDA_ARRAY3D_DESCRIPTOR_v2 *pArrayDescriptor, CUarray hArray); +typedef CUresult (CUDAAPI *PFN_cuMipmappedArrayCreate_v5000)(CUmipmappedArray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR_v2 *pMipmappedArrayDesc, unsigned int numMipmapLevels); +typedef CUresult (CUDAAPI *PFN_cuMipmappedArrayGetLevel_v5000)(CUarray *pLevelArray, CUmipmappedArray hMipmappedArray, unsigned int level); +typedef CUresult (CUDAAPI *PFN_cuMipmappedArrayDestroy_v5000)(CUmipmappedArray hMipmappedArray); +typedef CUresult (CUDAAPI *PFN_cuMemAddressReserve_v10020)(CUdeviceptr_v2 *ptr, size_t size, size_t alignment, CUdeviceptr_v2 addr, unsigned long long flags); +typedef CUresult (CUDAAPI *PFN_cuMemAddressFree_v10020)(CUdeviceptr_v2 ptr, size_t size); +typedef CUresult (CUDAAPI *PFN_cuMemCreate_v10020)(CUmemGenericAllocationHandle_v1 *handle, size_t size, const CUmemAllocationProp_v1 *prop, unsigned long long flags); +typedef CUresult (CUDAAPI *PFN_cuMemRelease_v10020)(CUmemGenericAllocationHandle_v1 handle); +typedef CUresult (CUDAAPI *PFN_cuMemMap_v10020)(CUdeviceptr_v2 ptr, size_t size, size_t offset, CUmemGenericAllocationHandle_v1 handle, unsigned long long flags); +typedef CUresult (CUDAAPI *PFN_cuMemMapArrayAsync_v11010_ptsz)(CUarrayMapInfo_v1 *mapInfoList, unsigned int count, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemUnmap_v10020)(CUdeviceptr_v2 ptr, size_t size); +typedef CUresult (CUDAAPI *PFN_cuMemSetAccess_v10020)(CUdeviceptr_v2 ptr, size_t size, const CUmemAccessDesc_v1 *desc, size_t count); +typedef CUresult (CUDAAPI *PFN_cuMemGetAccess_v10020)(unsigned long long *flags, const CUmemLocation_v1 *location, CUdeviceptr_v2 ptr); +typedef CUresult (CUDAAPI *PFN_cuMemExportToShareableHandle_v10020)(void *shareableHandle, CUmemGenericAllocationHandle_v1 handle, CUmemAllocationHandleType handleType, unsigned long long flags); +typedef CUresult (CUDAAPI *PFN_cuMemImportFromShareableHandle_v10020)(CUmemGenericAllocationHandle_v1 *handle, void *osHandle, CUmemAllocationHandleType shHandleType); +typedef CUresult (CUDAAPI *PFN_cuMemGetAllocationGranularity_v10020)(size_t *granularity, const CUmemAllocationProp_v1 *prop, CUmemAllocationGranularity_flags option); +typedef CUresult (CUDAAPI *PFN_cuMemGetAllocationPropertiesFromHandle_v10020)(CUmemAllocationProp_v1 *prop, CUmemGenericAllocationHandle_v1 handle); +typedef CUresult (CUDAAPI *PFN_cuMemRetainAllocationHandle_v11000)(CUmemGenericAllocationHandle_v1 *handle, void *addr); +typedef CUresult (CUDAAPI *PFN_cuMemFreeAsync_v11020_ptsz)(CUdeviceptr_v2 dptr, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemAllocAsync_v11020_ptsz)(CUdeviceptr_v2 *dptr, size_t bytesize, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemPoolTrimTo_v11020)(CUmemoryPool pool, size_t minBytesToKeep); +typedef CUresult (CUDAAPI *PFN_cuMemPoolSetAttribute_v11020)(CUmemoryPool pool, CUmemPool_attribute attr, void *value); +typedef CUresult (CUDAAPI *PFN_cuMemPoolGetAttribute_v11020)(CUmemoryPool pool, CUmemPool_attribute attr, void *value); +typedef CUresult (CUDAAPI *PFN_cuMemPoolSetAccess_v11020)(CUmemoryPool pool, const CUmemAccessDesc_v1 *map, size_t count); +typedef CUresult (CUDAAPI *PFN_cuMemPoolGetAccess_v11020)(CUmemAccess_flags *flags, CUmemoryPool memPool, CUmemLocation_v1 *location); +typedef CUresult (CUDAAPI *PFN_cuMemPoolCreate_v11020)(CUmemoryPool *pool, const CUmemPoolProps_v1 *poolProps); +typedef CUresult (CUDAAPI *PFN_cuMemPoolDestroy_v11020)(CUmemoryPool pool); +typedef CUresult (CUDAAPI *PFN_cuMemAllocFromPoolAsync_v11020_ptsz)(CUdeviceptr_v2 *dptr, size_t bytesize, CUmemoryPool pool, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemPoolExportToShareableHandle_v11020)(void *handle_out, CUmemoryPool pool, CUmemAllocationHandleType handleType, unsigned long long flags); +typedef CUresult (CUDAAPI *PFN_cuMemPoolImportFromShareableHandle_v11020)(CUmemoryPool *pool_out, void *handle, CUmemAllocationHandleType handleType, unsigned long long flags); +typedef CUresult (CUDAAPI *PFN_cuMemPoolExportPointer_v11020)(CUmemPoolPtrExportData_v1 *shareData_out, CUdeviceptr_v2 ptr); +typedef CUresult (CUDAAPI *PFN_cuMemPoolImportPointer_v11020)(CUdeviceptr_v2 *ptr_out, CUmemoryPool pool, CUmemPoolPtrExportData_v1 *shareData); +typedef CUresult (CUDAAPI *PFN_cuPointerGetAttribute_v4000)(void *data, CUpointer_attribute attribute, CUdeviceptr_v2 ptr); +typedef CUresult (CUDAAPI *PFN_cuMemPrefetchAsync_v8000_ptsz)(CUdeviceptr_v2 devPtr, size_t count, CUdevice_v1 dstDevice, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemAdvise_v8000)(CUdeviceptr_v2 devPtr, size_t count, CUmem_advise advice, CUdevice_v1 device); +typedef CUresult (CUDAAPI *PFN_cuMemRangeGetAttribute_v8000)(void *data, size_t dataSize, CUmem_range_attribute attribute, CUdeviceptr_v2 devPtr, size_t count); +typedef CUresult (CUDAAPI *PFN_cuMemRangeGetAttributes_v8000)(void **data, size_t *dataSizes, CUmem_range_attribute *attributes, size_t numAttributes, CUdeviceptr_v2 devPtr, size_t count); +typedef CUresult (CUDAAPI *PFN_cuMulticastCreate_v12010)(CUmemGenericAllocationHandle *mcHandle, const CUmulticastObjectProp *prop); +typedef CUresult (CUDAAPI *PFN_cuMulticastAddDevice_v12010)(CUmemGenericAllocationHandle mcHandle, CUdevice dev); +typedef CUresult (CUDAAPI *PFN_cuMulticastBindMem_v12010)(CUmemGenericAllocationHandle mcHandle, size_t mcOffset, CUmemGenericAllocationHandle memHandle, size_t memOffset, size_t size, unsigned long long flags); +typedef CUresult (CUDAAPI *PFN_cuMulticastBindAddr_v12010)(CUmemGenericAllocationHandle mcHandle, size_t mcOffset, CUdeviceptr memptr, size_t size, unsigned long long flags); +typedef CUresult (CUDAAPI *PFN_cuMulticastUnbind_v12010)(CUmemGenericAllocationHandle mcHandle, CUdevice dev, size_t mcOffset, size_t size); +typedef CUresult (CUDAAPI *PFN_cuMulticastGetGranularity_v12010)(size_t *granularity, const CUmulticastObjectProp *prop, CUmulticastGranularity_flags option); +typedef CUresult (CUDAAPI *PFN_cuPointerSetAttribute_v6000)(const void *value, CUpointer_attribute attribute, CUdeviceptr_v2 ptr); +typedef CUresult (CUDAAPI *PFN_cuPointerGetAttributes_v7000)(unsigned int numAttributes, CUpointer_attribute *attributes, void **data, CUdeviceptr_v2 ptr); +typedef CUresult (CUDAAPI *PFN_cuStreamCreate_v2000)(CUstream *phStream, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuStreamCreateWithPriority_v5050)(CUstream *phStream, unsigned int flags, int priority); +typedef CUresult (CUDAAPI *PFN_cuStreamGetId_v12000)(CUstream hStream, unsigned long long *streamId); +typedef CUresult (CUDAAPI *PFN_cuStreamGetId_v12000_ptsz)(CUstream hStream, unsigned long long *streamId); +typedef CUresult (CUDAAPI *PFN_cuStreamGetPriority_v7000_ptsz)(CUstream hStream, int *priority); +typedef CUresult (CUDAAPI *PFN_cuStreamGetFlags_v7000_ptsz)(CUstream hStream, unsigned int *flags); +typedef CUresult (CUDAAPI *PFN_cuStreamGetCtx_v9020_ptsz)(CUstream hStream, CUcontext *pctx); +typedef CUresult (CUDAAPI *PFN_cuStreamWaitEvent_v7000_ptsz)(CUstream hStream, CUevent hEvent, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuStreamAddCallback_v7000_ptsz)(CUstream hStream, CUstreamCallback callback, void *userData, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamBeginCapture_v10010_ptsz)(CUstream hStream, CUstreamCaptureMode mode); +typedef CUresult (CUDAAPI *PFN_cuThreadExchangeStreamCaptureMode_v10010)(CUstreamCaptureMode *mode); +typedef CUresult (CUDAAPI *PFN_cuStreamEndCapture_v10000_ptsz)(CUstream hStream, CUgraph *phGraph); +typedef CUresult (CUDAAPI *PFN_cuStreamIsCapturing_v10000_ptsz)(CUstream hStream, CUstreamCaptureStatus *captureStatus); +typedef CUresult (CUDAAPI *PFN_cuStreamGetCaptureInfo_v10010_ptsz)(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, cuuint64_t *id_out); +typedef CUresult (CUDAAPI *PFN_cuStreamGetCaptureInfo_v11030_ptsz)(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, cuuint64_t *id_out, CUgraph *graph_out, const CUgraphNode **dependencies_out, size_t *numDependencies_out); +typedef CUresult (CUDAAPI *PFN_cuStreamUpdateCaptureDependencies_v11030_ptsz)(CUstream hStream, CUgraphNode *dependencies, size_t numDependencies, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamAttachMemAsync_v7000_ptsz)(CUstream hStream, CUdeviceptr_v2 dptr, size_t length, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamQuery_v7000_ptsz)(CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuStreamSynchronize_v7000_ptsz)(CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuStreamDestroy_v4000)(CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuStreamCopyAttributes_v11000_ptsz)(CUstream dst, CUstream src); +typedef CUresult (CUDAAPI *PFN_cuStreamGetAttribute_v11000_ptsz)(CUstream hStream, CUstreamAttrID attr, CUstreamAttrValue_v1 *value_out); +typedef CUresult (CUDAAPI *PFN_cuStreamSetAttribute_v11000_ptsz)(CUstream hStream, CUstreamAttrID attr, const CUstreamAttrValue_v1 *value); +typedef CUresult (CUDAAPI *PFN_cuEventCreate_v2000)(CUevent *phEvent, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuEventRecord_v7000_ptsz)(CUevent hEvent, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuEventRecordWithFlags_v11010_ptsz)(CUevent hEvent, CUstream hStream, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuEventQuery_v2000)(CUevent hEvent); +typedef CUresult (CUDAAPI *PFN_cuEventSynchronize_v2000)(CUevent hEvent); +typedef CUresult (CUDAAPI *PFN_cuEventDestroy_v4000)(CUevent hEvent); +typedef CUresult (CUDAAPI *PFN_cuEventElapsedTime_v2000)(float *pMilliseconds, CUevent hStart, CUevent hEnd); +typedef CUresult (CUDAAPI *PFN_cuImportExternalMemory_v10000)(CUexternalMemory *extMem_out, const CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1 *memHandleDesc); +typedef CUresult (CUDAAPI *PFN_cuExternalMemoryGetMappedBuffer_v10000)(CUdeviceptr_v2 *devPtr, CUexternalMemory extMem, const CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1 *bufferDesc); +typedef CUresult (CUDAAPI *PFN_cuExternalMemoryGetMappedMipmappedArray_v10000)(CUmipmappedArray *mipmap, CUexternalMemory extMem, const CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1 *mipmapDesc); +typedef CUresult (CUDAAPI *PFN_cuDestroyExternalMemory_v10000)(CUexternalMemory extMem); +typedef CUresult (CUDAAPI *PFN_cuImportExternalSemaphore_v10000)(CUexternalSemaphore *extSem_out, const CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1 *semHandleDesc); +typedef CUresult (CUDAAPI *PFN_cuSignalExternalSemaphoresAsync_v10000_ptsz)(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1 *paramsArray, unsigned int numExtSems, CUstream stream); +typedef CUresult (CUDAAPI *PFN_cuWaitExternalSemaphoresAsync_v10000_ptsz)(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1 *paramsArray, unsigned int numExtSems, CUstream stream); +typedef CUresult (CUDAAPI *PFN_cuDestroyExternalSemaphore_v10000)(CUexternalSemaphore extSem); +typedef CUresult (CUDAAPI *PFN_cuStreamWaitValue32_v8000_ptsz)(CUstream stream, CUdeviceptr_v2 addr, cuuint32_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWaitValue64_v9000_ptsz)(CUstream stream, CUdeviceptr_v2 addr, cuuint64_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWriteValue32_v8000_ptsz)(CUstream stream, CUdeviceptr_v2 addr, cuuint32_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWriteValue64_v9000_ptsz)(CUstream stream, CUdeviceptr_v2 addr, cuuint64_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamBatchMemOp_v8000_ptsz)(CUstream stream, unsigned int count, CUstreamBatchMemOpParams_v1 *paramArray, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWaitValue32_v11070_ptsz)(CUstream stream, CUdeviceptr_v2 addr, cuuint32_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWaitValue64_v11070_ptsz)(CUstream stream, CUdeviceptr_v2 addr, cuuint64_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWriteValue32_v11070_ptsz)(CUstream stream, CUdeviceptr_v2 addr, cuuint32_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWriteValue64_v11070_ptsz)(CUstream stream, CUdeviceptr_v2 addr, cuuint64_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamBatchMemOp_v11070_ptsz)(CUstream stream, unsigned int count, CUstreamBatchMemOpParams *paramArray, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuFuncGetAttribute_v2020)(int *pi, CUfunction_attribute attrib, CUfunction hfunc); +typedef CUresult (CUDAAPI *PFN_cuFuncSetAttribute_v9000)(CUfunction hfunc, CUfunction_attribute attrib, int value); +typedef CUresult (CUDAAPI *PFN_cuFuncSetCacheConfig_v3000)(CUfunction hfunc, CUfunc_cache config); +typedef CUresult (CUDAAPI *PFN_cuFuncSetSharedMemConfig_v4020)(CUfunction hfunc, CUsharedconfig config); +typedef CUresult (CUDAAPI *PFN_cuLaunchKernel_v7000_ptsz)(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void **kernelParams, void **extra); +typedef CUresult (CUDAAPI *PFN_cuLaunchKernelEx_v11060_ptsz)(const CUlaunchConfig *config, CUfunction f, void **kernelParams, void **extra); +typedef CUresult (CUDAAPI *PFN_cuLaunchCooperativeKernel_v9000_ptsz)(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void **kernelParams); +typedef CUresult (CUDAAPI *PFN_cuLaunchCooperativeKernelMultiDevice_v9000)(CUDA_LAUNCH_PARAMS_v1 *launchParamsList, unsigned int numDevices, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuLaunchHostFunc_v10000_ptsz)(CUstream hStream, CUhostFn fn, void *userData); +typedef CUresult (CUDAAPI *PFN_cuFuncSetBlockShape_v2000)(CUfunction hfunc, int x, int y, int z); +typedef CUresult (CUDAAPI *PFN_cuFuncSetSharedSize_v2000)(CUfunction hfunc, unsigned int bytes); +typedef CUresult (CUDAAPI *PFN_cuParamSetSize_v2000)(CUfunction hfunc, unsigned int numbytes); +typedef CUresult (CUDAAPI *PFN_cuParamSeti_v2000)(CUfunction hfunc, int offset, unsigned int value); +typedef CUresult (CUDAAPI *PFN_cuParamSetf_v2000)(CUfunction hfunc, int offset, float value); +typedef CUresult (CUDAAPI *PFN_cuParamSetv_v2000)(CUfunction hfunc, int offset, void *ptr, unsigned int numbytes); +typedef CUresult (CUDAAPI *PFN_cuLaunch_v2000)(CUfunction f); +typedef CUresult (CUDAAPI *PFN_cuLaunchGrid_v2000)(CUfunction f, int grid_width, int grid_height); +typedef CUresult (CUDAAPI *PFN_cuLaunchGridAsync_v2000)(CUfunction f, int grid_width, int grid_height, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuParamSetTexRef_v2000)(CUfunction hfunc, int texunit, CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuGraphCreate_v10000)(CUgraph *phGraph, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuGraphAddKernelNode_v10000)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphKernelNodeGetParams_v10000)(CUgraphNode hNode, CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphKernelNodeSetParams_v10000)(CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphAddKernelNode_v12000)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_KERNEL_NODE_PARAMS_v2 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphKernelNodeGetParams_v12000)(CUgraphNode hNode, CUDA_KERNEL_NODE_PARAMS_v2 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphKernelNodeSetParams_v12000)(CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS_v2 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphAddMemcpyNode_v10000)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_MEMCPY3D_v2 *copyParams, CUcontext ctx); +typedef CUresult (CUDAAPI *PFN_cuGraphMemcpyNodeGetParams_v10000)(CUgraphNode hNode, CUDA_MEMCPY3D_v2 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphMemcpyNodeSetParams_v10000)(CUgraphNode hNode, const CUDA_MEMCPY3D_v2 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphAddMemsetNode_v10000)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_MEMSET_NODE_PARAMS_v1 *memsetParams, CUcontext ctx); +typedef CUresult (CUDAAPI *PFN_cuGraphMemsetNodeGetParams_v10000)(CUgraphNode hNode, CUDA_MEMSET_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphMemsetNodeSetParams_v10000)(CUgraphNode hNode, const CUDA_MEMSET_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphAddHostNode_v10000)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_HOST_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphHostNodeGetParams_v10000)(CUgraphNode hNode, CUDA_HOST_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphHostNodeSetParams_v10000)(CUgraphNode hNode, const CUDA_HOST_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphAddChildGraphNode_v10000)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUgraph childGraph); +typedef CUresult (CUDAAPI *PFN_cuGraphChildGraphNodeGetGraph_v10000)(CUgraphNode hNode, CUgraph *phGraph); +typedef CUresult (CUDAAPI *PFN_cuGraphAddEmptyNode_v10000)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies); +typedef CUresult (CUDAAPI *PFN_cuGraphAddEventRecordNode_v11010)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUevent event); +typedef CUresult (CUDAAPI *PFN_cuGraphEventRecordNodeGetEvent_v11010)(CUgraphNode hNode, CUevent *event_out); +typedef CUresult (CUDAAPI *PFN_cuGraphEventRecordNodeSetEvent_v11010)(CUgraphNode hNode, CUevent event); +typedef CUresult (CUDAAPI *PFN_cuGraphAddEventWaitNode_v11010)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUevent event); +typedef CUresult (CUDAAPI *PFN_cuGraphEventWaitNodeGetEvent_v11010)(CUgraphNode hNode, CUevent *event_out); +typedef CUresult (CUDAAPI *PFN_cuGraphEventWaitNodeSetEvent_v11010)(CUgraphNode hNode, CUevent event); +typedef CUresult (CUDAAPI *PFN_cuGraphAddExternalSemaphoresSignalNode_v11020)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphExternalSemaphoresSignalNodeGetParams_v11020)(CUgraphNode hNode, CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1 *params_out); +typedef CUresult (CUDAAPI *PFN_cuGraphExternalSemaphoresSignalNodeSetParams_v11020)(CUgraphNode hNode, const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphAddExternalSemaphoresWaitNode_v11020)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphExternalSemaphoresWaitNodeGetParams_v11020)(CUgraphNode hNode, CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1 *params_out); +typedef CUresult (CUDAAPI *PFN_cuGraphExternalSemaphoresWaitNodeSetParams_v11020)(CUgraphNode hNode, const CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphAddBatchMemOpNode_v11070)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphBatchMemOpNodeGetParams_v11070)(CUgraphNode hNode, CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams_out); +typedef CUresult (CUDAAPI *PFN_cuGraphBatchMemOpNodeSetParams_v11070)(CUgraphNode hNode, const CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphExecBatchMemOpNodeSetParams_v11070)(CUgraphExec graphExec, CUgraphNode node, const CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphClone_v10000)(CUgraph *phGraphClone, CUgraph originalGraph); +typedef CUresult (CUDAAPI *PFN_cuGraphNodeFindInClone_v10000)(CUgraphNode *phNode, CUgraphNode hOriginalNode, CUgraph hClonedGraph); +typedef CUresult (CUDAAPI *PFN_cuGraphNodeGetType_v10000)(CUgraphNode hNode, CUgraphNodeType *type); +typedef CUresult (CUDAAPI *PFN_cuGraphGetNodes_v10000)(CUgraph hGraph, CUgraphNode *nodes, size_t *numNodes); +typedef CUresult (CUDAAPI *PFN_cuGraphGetRootNodes_v10000)(CUgraph hGraph, CUgraphNode *rootNodes, size_t *numRootNodes); +typedef CUresult (CUDAAPI *PFN_cuGraphGetEdges_v10000)(CUgraph hGraph, CUgraphNode *from, CUgraphNode *to, size_t *numEdges); +typedef CUresult (CUDAAPI *PFN_cuGraphNodeGetDependencies_v10000)(CUgraphNode hNode, CUgraphNode *dependencies, size_t *numDependencies); +typedef CUresult (CUDAAPI *PFN_cuGraphNodeGetDependentNodes_v10000)(CUgraphNode hNode, CUgraphNode *dependentNodes, size_t *numDependentNodes); +typedef CUresult (CUDAAPI *PFN_cuGraphAddDependencies_v10000)(CUgraph hGraph, const CUgraphNode *from, const CUgraphNode *to, size_t numDependencies); +typedef CUresult (CUDAAPI *PFN_cuGraphRemoveDependencies_v10000)(CUgraph hGraph, const CUgraphNode *from, const CUgraphNode *to, size_t numDependencies); +typedef CUresult (CUDAAPI *PFN_cuGraphDestroyNode_v10000)(CUgraphNode hNode); +typedef CUresult (CUDAAPI *PFN_cuGraphInstantiateWithFlags_v11040)(CUgraphExec *phGraphExec, CUgraph hGraph, unsigned long long flags); +typedef CUresult (CUDAAPI *PFN_cuGraphInstantiateWithParams_v12000_ptsz)(CUgraphExec *phGraphExec, CUgraph hGraph, CUDA_GRAPH_INSTANTIATE_PARAMS *instantiateParams); +typedef CUresult (CUDAAPI *PFN_cuGraphExecGetFlags_v12000)(CUgraphExec hGraphExec, cuuint64_t *flags); +typedef CUresult (CUDAAPI *PFN_cuGraphExecKernelNodeSetParams_v10010)(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphExecKernelNodeSetParams_v12000)(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS_v2 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphExecMemcpyNodeSetParams_v10020)(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_MEMCPY3D_v2 *copyParams, CUcontext ctx); +typedef CUresult (CUDAAPI *PFN_cuGraphExecMemsetNodeSetParams_v10020)(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_MEMSET_NODE_PARAMS_v1 *memsetParams, CUcontext ctx); +typedef CUresult (CUDAAPI *PFN_cuGraphExecHostNodeSetParams_v10020)(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_HOST_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphExecChildGraphNodeSetParams_v11010)(CUgraphExec hGraphExec, CUgraphNode hNode, CUgraph childGraph); +typedef CUresult (CUDAAPI *PFN_cuGraphExecEventRecordNodeSetEvent_v11010)(CUgraphExec hGraphExec, CUgraphNode hNode, CUevent event); +typedef CUresult (CUDAAPI *PFN_cuGraphExecEventWaitNodeSetEvent_v11010)(CUgraphExec hGraphExec, CUgraphNode hNode, CUevent event); +typedef CUresult (CUDAAPI *PFN_cuGraphExecExternalSemaphoresSignalNodeSetParams_v11020)(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphExecExternalSemaphoresWaitNodeSetParams_v11020)(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphUpload_v11010_ptsz)(CUgraphExec hGraphExec, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuGraphLaunch_v10000_ptsz)(CUgraphExec hGraphExec, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuGraphExecDestroy_v10000)(CUgraphExec hGraphExec); +typedef CUresult (CUDAAPI *PFN_cuGraphDestroy_v10000)(CUgraph hGraph); +typedef CUresult (CUDAAPI *PFN_cuGraphExecUpdate_v10020)(CUgraphExec hGraphExec, CUgraph hGraph, CUgraphNode *hErrorNode_out, CUgraphExecUpdateResult *updateResult_out); +typedef CUresult (CUDAAPI *PFN_cuGraphExecUpdate_v12000)(CUgraphExec hGraphExec, CUgraph hGraph, CUgraphExecUpdateResultInfo *resultInfo); +typedef CUresult (CUDAAPI *PFN_cuGraphKernelNodeCopyAttributes_v11000)(CUgraphNode dst, CUgraphNode src); +typedef CUresult (CUDAAPI *PFN_cuGraphKernelNodeGetAttribute_v11000)(CUgraphNode hNode, CUkernelNodeAttrID attr, CUkernelNodeAttrValue_v1 *value_out); +typedef CUresult (CUDAAPI *PFN_cuGraphKernelNodeSetAttribute_v11000)(CUgraphNode hNode, CUkernelNodeAttrID attr, const CUkernelNodeAttrValue_v1 *value); +typedef CUresult (CUDAAPI *PFN_cuGraphDebugDotPrint_v11030)(CUgraph hGraph, const char *path, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuGraphAddMemAllocNode_v11040)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUDA_MEM_ALLOC_NODE_PARAMS *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphMemAllocNodeGetParams_v11040)(CUgraphNode hNode, CUDA_MEM_ALLOC_NODE_PARAMS *params_out); +typedef CUresult (CUDAAPI *PFN_cuGraphAddMemFreeNode_v11040)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUdeviceptr dptr); +typedef CUresult (CUDAAPI *PFN_cuGraphMemFreeNodeGetParams_v11040)(CUgraphNode hNode, CUdeviceptr *dptr_out); +typedef CUresult (CUDAAPI *PFN_cuGraphNodeSetEnabled_v11060)(CUgraphExec hGraphExec, CUgraphNode hNode, unsigned int isEnabled); +typedef CUresult (CUDAAPI *PFN_cuGraphNodeGetEnabled_v11060)(CUgraphExec hGraphExec, CUgraphNode hNode, unsigned int *isEnabled); +typedef CUresult (CUDAAPI *PFN_cuDeviceGraphMemTrim_v11040)(CUdevice device); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetGraphMemAttribute_v11040)(CUdevice device, CUgraphMem_attribute attr, void* value); +typedef CUresult (CUDAAPI *PFN_cuDeviceSetGraphMemAttribute_v11040)(CUdevice device, CUgraphMem_attribute attr, void* value); +typedef CUresult (CUDAAPI *PFN_cuOccupancyMaxActiveBlocksPerMultiprocessor_v6050)(int *numBlocks, CUfunction func, int blockSize, size_t dynamicSMemSize); +typedef CUresult (CUDAAPI *PFN_cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000)(int *numBlocks, CUfunction func, int blockSize, size_t dynamicSMemSize, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuOccupancyMaxPotentialBlockSize_v6050)(int *minGridSize, int *blockSize, CUfunction func, CUoccupancyB2DSize blockSizeToDynamicSMemSize, size_t dynamicSMemSize, int blockSizeLimit); +typedef CUresult (CUDAAPI *PFN_cuOccupancyMaxPotentialBlockSizeWithFlags_v7000)(int *minGridSize, int *blockSize, CUfunction func, CUoccupancyB2DSize blockSizeToDynamicSMemSize, size_t dynamicSMemSize, int blockSizeLimit, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuOccupancyAvailableDynamicSMemPerBlock_v10020)(size_t *dynamicSmemSize, CUfunction func, int numBlocks, int blockSize); +typedef CUresult (CUDAAPI *PFN_cuOccupancyMaxPotentialClusterSize_v11070)(int *clusterSize, CUfunction func, const CUlaunchConfig *config); +typedef CUresult (CUDAAPI *PFN_cuOccupancyMaxActiveClusters_v11070)(int *numClusters, CUfunction func, const CUlaunchConfig *config); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetArray_v2000)(CUtexref hTexRef, CUarray hArray, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetMipmappedArray_v5000)(CUtexref hTexRef, CUmipmappedArray hMipmappedArray, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetAddress_v3020)(size_t *ByteOffset, CUtexref hTexRef, CUdeviceptr_v2 dptr, size_t bytes); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetAddress2D_v4010)(CUtexref hTexRef, const CUDA_ARRAY_DESCRIPTOR_v2 *desc, CUdeviceptr_v2 dptr, size_t Pitch); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetFormat_v2000)(CUtexref hTexRef, CUarray_format fmt, int NumPackedComponents); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetAddressMode_v2000)(CUtexref hTexRef, int dim, CUaddress_mode am); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetFilterMode_v2000)(CUtexref hTexRef, CUfilter_mode fm); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetMipmapFilterMode_v5000)(CUtexref hTexRef, CUfilter_mode fm); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetMipmapLevelBias_v5000)(CUtexref hTexRef, float bias); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetMipmapLevelClamp_v5000)(CUtexref hTexRef, float minMipmapLevelClamp, float maxMipmapLevelClamp); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetMaxAnisotropy_v5000)(CUtexref hTexRef, unsigned int maxAniso); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetBorderColor_v8000)(CUtexref hTexRef, float *pBorderColor); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetFlags_v2000)(CUtexref hTexRef, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuTexRefGetAddress_v3020)(CUdeviceptr_v2 *pdptr, CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuTexRefGetArray_v2000)(CUarray *phArray, CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuTexRefGetMipmappedArray_v5000)(CUmipmappedArray *phMipmappedArray, CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuTexRefGetAddressMode_v2000)(CUaddress_mode *pam, CUtexref hTexRef, int dim); +typedef CUresult (CUDAAPI *PFN_cuTexRefGetFilterMode_v2000)(CUfilter_mode *pfm, CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuTexRefGetFormat_v2000)(CUarray_format *pFormat, int *pNumChannels, CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuTexRefGetMipmapFilterMode_v5000)(CUfilter_mode *pfm, CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuTexRefGetMipmapLevelBias_v5000)(float *pbias, CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuTexRefGetMipmapLevelClamp_v5000)(float *pminMipmapLevelClamp, float *pmaxMipmapLevelClamp, CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuTexRefGetMaxAnisotropy_v5000)(int *pmaxAniso, CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuTexRefGetBorderColor_v8000)(float *pBorderColor, CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuTexRefGetFlags_v2000)(unsigned int *pFlags, CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuTexRefCreate_v2000)(CUtexref *pTexRef); +typedef CUresult (CUDAAPI *PFN_cuTexRefDestroy_v2000)(CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuSurfRefSetArray_v3000)(CUsurfref hSurfRef, CUarray hArray, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuSurfRefGetArray_v3000)(CUarray *phArray, CUsurfref hSurfRef); +typedef CUresult (CUDAAPI *PFN_cuTexObjectCreate_v5000)(CUtexObject_v1 *pTexObject, const CUDA_RESOURCE_DESC_v1 *pResDesc, const CUDA_TEXTURE_DESC_v1 *pTexDesc, const CUDA_RESOURCE_VIEW_DESC_v1 *pResViewDesc); +typedef CUresult (CUDAAPI *PFN_cuTexObjectDestroy_v5000)(CUtexObject_v1 texObject); +typedef CUresult (CUDAAPI *PFN_cuTexObjectGetResourceDesc_v5000)(CUDA_RESOURCE_DESC_v1 *pResDesc, CUtexObject_v1 texObject); +typedef CUresult (CUDAAPI *PFN_cuTexObjectGetTextureDesc_v5000)(CUDA_TEXTURE_DESC_v1 *pTexDesc, CUtexObject_v1 texObject); +typedef CUresult (CUDAAPI *PFN_cuTexObjectGetResourceViewDesc_v5000)(CUDA_RESOURCE_VIEW_DESC_v1 *pResViewDesc, CUtexObject_v1 texObject); +typedef CUresult (CUDAAPI *PFN_cuSurfObjectCreate_v5000)(CUsurfObject_v1 *pSurfObject, const CUDA_RESOURCE_DESC_v1 *pResDesc); +typedef CUresult (CUDAAPI *PFN_cuSurfObjectDestroy_v5000)(CUsurfObject_v1 surfObject); +typedef CUresult (CUDAAPI *PFN_cuSurfObjectGetResourceDesc_v5000)(CUDA_RESOURCE_DESC_v1 *pResDesc, CUsurfObject_v1 surfObject); +typedef CUresult (CUDAAPI *PFN_cuTensorMapEncodeTiled_v12000)(CUtensorMap *tensorMap, CUtensorMapDataType tensorDataType, cuuint32_t tensorRank, void *globalAddress, const cuuint64_t *globalDim, const cuuint64_t *globalStrides, const cuuint32_t *boxDim, const cuuint32_t *elementStrides, CUtensorMapInterleave interleave, CUtensorMapSwizzle swizzle, CUtensorMapL2promotion l2Promotion, CUtensorMapFloatOOBfill oobFill); +typedef CUresult (CUDAAPI *PFN_cuTensorMapEncodeIm2col_v12000)(CUtensorMap *tensorMap, CUtensorMapDataType tensorDataType, cuuint32_t tensorRank, void *globalAddress, const cuuint64_t *globalDim, const cuuint64_t *globalStrides, const int *pixelBoxLowerCorner, const int *pixelBoxUpperCorner, cuuint32_t channelsPerPixel, cuuint32_t pixelsPerColumn, const cuuint32_t *elementStrides, CUtensorMapInterleave interleave, CUtensorMapSwizzle swizzle, CUtensorMapL2promotion l2Promotion, CUtensorMapFloatOOBfill oobFill); +typedef CUresult (CUDAAPI *PFN_cuTensorMapReplaceAddress_v12000)(CUtensorMap *tensorMap, void *globalAddress); +typedef CUresult (CUDAAPI *PFN_cuDeviceCanAccessPeer_v4000)(int *canAccessPeer, CUdevice_v1 dev, CUdevice_v1 peerDev); +typedef CUresult (CUDAAPI *PFN_cuCtxEnablePeerAccess_v4000)(CUcontext peerContext, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuCtxDisablePeerAccess_v4000)(CUcontext peerContext); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetP2PAttribute_v8000)(int *value, CUdevice_P2PAttribute attrib, CUdevice_v1 srcDevice, CUdevice_v1 dstDevice); +typedef CUresult (CUDAAPI *PFN_cuGraphicsUnregisterResource_v3000)(CUgraphicsResource resource); +typedef CUresult (CUDAAPI *PFN_cuGraphicsSubResourceGetMappedArray_v3000)(CUarray *pArray, CUgraphicsResource resource, unsigned int arrayIndex, unsigned int mipLevel); +typedef CUresult (CUDAAPI *PFN_cuGraphicsResourceGetMappedMipmappedArray_v5000)(CUmipmappedArray *pMipmappedArray, CUgraphicsResource resource); +typedef CUresult (CUDAAPI *PFN_cuGraphicsResourceGetMappedPointer_v3020)(CUdeviceptr_v2 *pDevPtr, size_t *pSize, CUgraphicsResource resource); +typedef CUresult (CUDAAPI *PFN_cuGraphicsResourceSetMapFlags_v6050)(CUgraphicsResource resource, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuGraphicsMapResources_v7000_ptsz)(unsigned int count, CUgraphicsResource *resources, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuGraphicsUnmapResources_v7000_ptsz)(unsigned int count, CUgraphicsResource *resources, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuGetExportTable_v3000)(const void **ppExportTable, const CUuuid *pExportTableId); +typedef CUresult (CUDAAPI *PFN_cuFuncGetModule_v11000)(CUmodule *hmod, CUfunction hfunc); +typedef CUresult (CUDAAPI *PFN_cuGetProcAddress_v11030)(const char *symbol, void **pfn, int driverVersion, cuuint64_t flags); +typedef CUresult (CUDAAPI *PFN_cuGetProcAddress_v12000)(const char *symbol, void **pfn, int driverVersion, cuuint64_t flags, CUdriverProcAddressQueryResult *symbolFound); +typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoD_v3020)(CUdeviceptr_v2 dstDevice, const void *srcHost, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoH_v3020)(void *dstHost, CUdeviceptr_v2 srcDevice, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoD_v3020)(CUdeviceptr_v2 dstDevice, CUdeviceptr_v2 srcDevice, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoA_v3020)(CUarray dstArray, size_t dstOffset, CUdeviceptr_v2 srcDevice, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoD_v3020)(CUdeviceptr_v2 dstDevice, CUarray srcArray, size_t srcOffset, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoA_v3020)(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoH_v3020)(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoA_v3020)(CUarray dstArray, size_t dstOffset, CUarray srcArray, size_t srcOffset, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoAAsync_v3020)(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoHAsync_v3020)(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpy2D_v3020)(const CUDA_MEMCPY2D_v2 *pCopy); +typedef CUresult (CUDAAPI *PFN_cuMemcpy2DUnaligned_v3020)(const CUDA_MEMCPY2D_v2 *pCopy); +typedef CUresult (CUDAAPI *PFN_cuMemcpy3D_v3020)(const CUDA_MEMCPY3D_v2 *pCopy); +typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoDAsync_v3020)(CUdeviceptr_v2 dstDevice, const void *srcHost, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoHAsync_v3020)(void *dstHost, CUdeviceptr_v2 srcDevice, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoDAsync_v3020)(CUdeviceptr_v2 dstDevice, CUdeviceptr_v2 srcDevice, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpy2DAsync_v3020)(const CUDA_MEMCPY2D_v2 *pCopy, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpy3DAsync_v3020)(const CUDA_MEMCPY3D_v2 *pCopy, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD8_v3020)(CUdeviceptr_v2 dstDevice, unsigned char uc, size_t N); +typedef CUresult (CUDAAPI *PFN_cuMemsetD16_v3020)(CUdeviceptr_v2 dstDevice, unsigned short us, size_t N); +typedef CUresult (CUDAAPI *PFN_cuMemsetD32_v3020)(CUdeviceptr_v2 dstDevice, unsigned int ui, size_t N); +typedef CUresult (CUDAAPI *PFN_cuMemsetD2D8_v3020)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height); +typedef CUresult (CUDAAPI *PFN_cuMemsetD2D16_v3020)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height); +typedef CUresult (CUDAAPI *PFN_cuMemsetD2D32_v3020)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height); +typedef CUresult (CUDAAPI *PFN_cuMemcpy_v4000)(CUdeviceptr_v2 dst, CUdeviceptr_v2 src, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyAsync_v4000)(CUdeviceptr_v2 dst, CUdeviceptr_v2 src, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpyPeer_v4000)(CUdeviceptr_v2 dstDevice, CUcontext dstContext, CUdeviceptr_v2 srcDevice, CUcontext srcContext, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyPeerAsync_v4000)(CUdeviceptr_v2 dstDevice, CUcontext dstContext, CUdeviceptr_v2 srcDevice, CUcontext srcContext, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpy3DPeer_v4000)(const CUDA_MEMCPY3D_PEER_v1 *pCopy); +typedef CUresult (CUDAAPI *PFN_cuMemcpy3DPeerAsync_v4000)(const CUDA_MEMCPY3D_PEER_v1 *pCopy, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD8Async_v3020)(CUdeviceptr_v2 dstDevice, unsigned char uc, size_t N, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD16Async_v3020)(CUdeviceptr_v2 dstDevice, unsigned short us, size_t N, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD32Async_v3020)(CUdeviceptr_v2 dstDevice, unsigned int ui, size_t N, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD2D8Async_v3020)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD2D16Async_v3020)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD2D32Async_v3020)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuStreamGetPriority_v5050)(CUstream hStream, int *priority); +typedef CUresult (CUDAAPI *PFN_cuStreamGetFlags_v5050)(CUstream hStream, unsigned int *flags); +typedef CUresult (CUDAAPI *PFN_cuStreamGetCtx_v9020)(CUstream hStream, CUcontext *pctx); +typedef CUresult (CUDAAPI *PFN_cuStreamWaitEvent_v3020)(CUstream hStream, CUevent hEvent, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuStreamAddCallback_v5000)(CUstream hStream, CUstreamCallback callback, void *userData, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamAttachMemAsync_v6000)(CUstream hStream, CUdeviceptr_v2 dptr, size_t length, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamQuery_v2000)(CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuStreamSynchronize_v2000)(CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuEventRecord_v2000)(CUevent hEvent, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuEventRecordWithFlags_v11010)(CUevent hEvent, CUstream hStream, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuLaunchKernel_v4000)(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void **kernelParams, void **extra); +typedef CUresult (CUDAAPI *PFN_cuLaunchKernelEx_v11060)(const CUlaunchConfig *config, CUfunction f, void **kernelParams, void **extra); +typedef CUresult (CUDAAPI *PFN_cuLaunchHostFunc_v10000)(CUstream hStream, CUhostFn fn, void *userData); +typedef CUresult (CUDAAPI *PFN_cuGraphicsMapResources_v3000)(unsigned int count, CUgraphicsResource *resources, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuGraphicsUnmapResources_v3000)(unsigned int count, CUgraphicsResource *resources, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuStreamWriteValue32_v8000)(CUstream stream, CUdeviceptr_v2 addr, cuuint32_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWaitValue32_v8000)(CUstream stream, CUdeviceptr_v2 addr, cuuint32_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWriteValue64_v9000)(CUstream stream, CUdeviceptr_v2 addr, cuuint64_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWaitValue64_v9000)(CUstream stream, CUdeviceptr_v2 addr, cuuint64_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamBatchMemOp_v8000)(CUstream stream, unsigned int count, CUstreamBatchMemOpParams *paramArray, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWriteValue32_v11070)(CUstream stream, CUdeviceptr_v2 addr, cuuint32_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWaitValue32_v11070)(CUstream stream, CUdeviceptr_v2 addr, cuuint32_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWriteValue64_v11070)(CUstream stream, CUdeviceptr_v2 addr, cuuint64_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWaitValue64_v11070)(CUstream stream, CUdeviceptr_v2 addr, cuuint64_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamBatchMemOp_v11070)(CUstream stream, unsigned int count, CUstreamBatchMemOpParams *paramArray, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuMemPrefetchAsync_v8000)(CUdeviceptr_v2 devPtr, size_t count, CUdevice_v1 dstDevice, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuLaunchCooperativeKernel_v9000)(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void **kernelParams); +typedef CUresult (CUDAAPI *PFN_cuSignalExternalSemaphoresAsync_v10000)(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1 *paramsArray, unsigned int numExtSems, CUstream stream); +typedef CUresult (CUDAAPI *PFN_cuWaitExternalSemaphoresAsync_v10000)(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1 *paramsArray, unsigned int numExtSems, CUstream stream); +typedef CUresult (CUDAAPI *PFN_cuStreamBeginCapture_v10010)(CUstream hStream, CUstreamCaptureMode mode); +typedef CUresult (CUDAAPI *PFN_cuStreamEndCapture_v10000)(CUstream hStream, CUgraph *phGraph); +typedef CUresult (CUDAAPI *PFN_cuStreamIsCapturing_v10000)(CUstream hStream, CUstreamCaptureStatus *captureStatus); +typedef CUresult (CUDAAPI *PFN_cuStreamGetCaptureInfo_v10010)(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, cuuint64_t *id_out); +typedef CUresult (CUDAAPI *PFN_cuStreamGetCaptureInfo_v11030)(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, cuuint64_t *id_out, CUgraph *graph_out, const CUgraphNode **dependencies_out, size_t *numDependencies_out); +typedef CUresult (CUDAAPI *PFN_cuStreamUpdateCaptureDependencies_v11030)(CUstream hStream, CUgraphNode *dependencies, size_t numDependencies, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuGraphInstantiateWithParams_v12000)(CUgraphExec *phGraphExec, CUgraph hGraph, CUDA_GRAPH_INSTANTIATE_PARAMS *instantiateParams); +typedef CUresult (CUDAAPI *PFN_cuGraphUpload_v11010)(CUgraphExec hGraph, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuGraphLaunch_v10000)(CUgraphExec hGraph, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuStreamCopyAttributes_v11000)(CUstream dstStream, CUstream srcStream); +typedef CUresult (CUDAAPI *PFN_cuStreamGetAttribute_v11000)(CUstream hStream, CUstreamAttrID attr, CUstreamAttrValue_v1 *value); +typedef CUresult (CUDAAPI *PFN_cuStreamSetAttribute_v11000)(CUstream hStream, CUstreamAttrID attr, const CUstreamAttrValue_v1 *param); +typedef CUresult (CUDAAPI *PFN_cuMemMapArrayAsync_v11010)(CUarrayMapInfo_v1 *mapInfoList, unsigned int count, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemFreeAsync_v11020)(CUdeviceptr_v2 dptr, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemAllocAsync_v11020)(CUdeviceptr_v2 *dptr, size_t bytesize, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemAllocFromPoolAsync_v11020)(CUdeviceptr_v2 *dptr, size_t bytesize, CUmemoryPool pool, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuFlushGPUDirectRDMAWrites_v11030)(CUflushGPUDirectRDMAWritesTarget target, CUflushGPUDirectRDMAWritesScope scope); +typedef CUresult (CUDAAPI *PFN_cuUserObjectCreate_v11030)(CUuserObject *object_out, void *ptr, CUhostFn destroy, unsigned int initialRefcount, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuUserObjectRetain_v11030)(CUuserObject object, unsigned int count); +typedef CUresult (CUDAAPI *PFN_cuUserObjectRelease_v11030)(CUuserObject object, unsigned int count); +typedef CUresult (CUDAAPI *PFN_cuGraphRetainUserObject_v11030)(CUgraph graph, CUuserObject object, unsigned int count, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuGraphReleaseUserObject_v11030)(CUgraph graph, CUuserObject object, unsigned int count); +typedef CUresult (CUDAAPI *PFN_cuModuleGetLoadingMode_v11070)(CUmoduleLoadingMode *mode); +typedef CUresult (CUDAAPI *PFN_cuMemGetHandleForAddressRange_v11070)(void *handle, CUdeviceptr dptr, size_t size, CUmemRangeHandleType handleType, unsigned long long flags); +typedef CUresult (CUDAAPI *PFN_cuLibraryLoadData_v12000)(CUlibrary *library, const void *code, CUjit_option *jitOptions, void **jitOptionsValues, unsigned int numJitOptions, CUlibraryOption *libraryOptions, void** libraryOptionValues, unsigned int numLibraryOptions); +typedef CUresult (CUDAAPI *PFN_cuLibraryLoadFromFile_v12000)(CUlibrary *library, const char *fileName, CUjit_option *jitOptions, void **jitOptionsValues, unsigned int numJitOptions, CUlibraryOption *libraryOptions, void **libraryOptionValues, unsigned int numLibraryOptions); +typedef CUresult (CUDAAPI *PFN_cuLibraryUnload_v12000)(CUlibrary library); +typedef CUresult (CUDAAPI *PFN_cuLibraryGetKernel_v12000)(CUkernel *pKernel, CUlibrary library, const char *name); +typedef CUresult (CUDAAPI *PFN_cuLibraryGetModule_v12000)(CUmodule *pMod, CUlibrary library); +typedef CUresult (CUDAAPI *PFN_cuKernelGetFunction_v12000)(CUfunction *pFunc, CUkernel kernel); +typedef CUresult (CUDAAPI *PFN_cuLibraryGetGlobal_v12000)(CUdeviceptr *dptr, size_t *bytes, CUlibrary library, const char *name); +typedef CUresult (CUDAAPI *PFN_cuLibraryGetManaged_v12000)(CUdeviceptr *dptr, size_t *bytes, CUlibrary library, const char *name); +typedef CUresult (CUDAAPI *PFN_cuKernelGetAttribute_v12000)(int *pi, CUfunction_attribute attrib, CUkernel kernel, CUdevice dev); +typedef CUresult (CUDAAPI *PFN_cuKernelSetAttribute_v12000)(CUfunction_attribute attrib, int val, CUkernel kernel, CUdevice dev); +typedef CUresult (CUDAAPI *PFN_cuKernelSetCacheConfig_v12000)(CUkernel kernel, CUfunc_cache config, CUdevice dev); +typedef CUresult (CUDAAPI *PFN_cuLibraryGetUnifiedFunction_v12000)(void **fptr, CUlibrary library, const char *symbol); +typedef CUresult(CUDAAPI *PFN_cuCoredumpGetAttribute_v12010)(CUcoredumpSettings get, void *value, size_t *size); +typedef CUresult(CUDAAPI *PFN_cuCoredumpGetAttributeGlobal_v12010)(CUcoredumpSettings get, void *value, size_t *size); +typedef CUresult(CUDAAPI *PFN_cuCoredumpSetAttribute_v12010)(CUcoredumpSettings set, void *value, size_t *size); +typedef CUresult(CUDAAPI *PFN_cuCoredumpSetAttributeGlobal_v12010)(CUcoredumpSettings set, void *value, size_t *size); +/* + * Type definitions for older versioned functions in cuda.h + */ +#if defined(__CUDA_API_VERSION_INTERNAL) + typedef CUresult (CUDAAPI *PFN_cuMemHostRegister_v4000)(void *p, size_t bytesize, unsigned int Flags); + typedef CUresult (CUDAAPI *PFN_cuGraphicsResourceSetMapFlags_v3000)(CUgraphicsResource resource, unsigned int flags); + typedef CUresult (CUDAAPI *PFN_cuLinkCreate_v5050)(unsigned int numOptions, CUjit_option *options, void **optionValues, CUlinkState *stateOut); + typedef CUresult (CUDAAPI *PFN_cuLinkAddData_v5050)(CUlinkState state, CUjitInputType type, void *data, size_t size, const char *name, unsigned int numOptions, CUjit_option *options, void **optionValues); + typedef CUresult (CUDAAPI *PFN_cuLinkAddFile_v5050)(CUlinkState state, CUjitInputType type, const char *path, unsigned int numOptions, CUjit_option *options, void **optionValues); + typedef CUresult (CUDAAPI *PFN_cuTexRefSetAddress2D_v3020)(CUtexref hTexRef, const CUDA_ARRAY_DESCRIPTOR_v2 *desc, CUdeviceptr_v2 dptr, size_t Pitch); + typedef CUresult (CUDAAPI *PFN_cuDeviceTotalMem_v2000)(unsigned int *bytes, CUdevice_v1 dev); + typedef CUresult (CUDAAPI *PFN_cuCtxCreate_v2000)(CUcontext *pctx, unsigned int flags, CUdevice_v1 dev); + typedef CUresult (CUDAAPI *PFN_cuModuleGetGlobal_v2000)(CUdeviceptr_v1 *dptr, unsigned int *bytes, CUmodule hmod, const char *name); + typedef CUresult (CUDAAPI *PFN_cuMemGetInfo_v2000)(unsigned int *free, unsigned int *total); + typedef CUresult (CUDAAPI *PFN_cuMemAlloc_v2000)(CUdeviceptr_v1 *dptr, unsigned int bytesize); + typedef CUresult (CUDAAPI *PFN_cuMemAllocPitch_v2000)(CUdeviceptr_v1 *dptr, unsigned int *pPitch, unsigned int WidthInBytes, unsigned int Height, unsigned int ElementSizeBytes); + typedef CUresult (CUDAAPI *PFN_cuMemFree_v2000)(CUdeviceptr_v1 dptr); + typedef CUresult (CUDAAPI *PFN_cuMemGetAddressRange_v2000)(CUdeviceptr_v1 *pbase, unsigned int *psize, CUdeviceptr_v1 dptr); + typedef CUresult (CUDAAPI *PFN_cuMemAllocHost_v2000)(void **pp, unsigned int bytesize); + typedef CUresult (CUDAAPI *PFN_cuMemHostGetDevicePointer_v2020)(CUdeviceptr_v1 *pdptr, void *p, unsigned int Flags); + typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoD_v2000)(CUdeviceptr_v1 dstDevice, const void *srcHost, unsigned int ByteCount); + typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoH_v2000)(void *dstHost, CUdeviceptr_v1 srcDevice, unsigned int ByteCount); + typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoD_v2000)(CUdeviceptr_v1 dstDevice, CUdeviceptr_v1 srcDevice, unsigned int ByteCount); + typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoA_v2000)(CUarray dstArray, unsigned int dstOffset, CUdeviceptr_v1 srcDevice, unsigned int ByteCount); + typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoD_v2000)(CUdeviceptr_v1 dstDevice, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount); + typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoA_v2000)(CUarray dstArray, unsigned int dstOffset, const void *srcHost, unsigned int ByteCount); + typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoH_v2000)(void *dstHost, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount); + typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoA_v2000)(CUarray dstArray, unsigned int dstOffset, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount); + typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoAAsync_v2000)(CUarray dstArray, unsigned int dstOffset, const void *srcHost, unsigned int ByteCount, CUstream hStream); + typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoHAsync_v2000)(void *dstHost, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount, CUstream hStream); + typedef CUresult (CUDAAPI *PFN_cuMemcpy2D_v2000)(const CUDA_MEMCPY2D_v1 *pCopy); + typedef CUresult (CUDAAPI *PFN_cuMemcpy2DUnaligned_v2000)(const CUDA_MEMCPY2D_v1 *pCopy); + typedef CUresult (CUDAAPI *PFN_cuMemcpy3D_v2000)(const CUDA_MEMCPY3D_v1 *pCopy); + typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoDAsync_v2000)(CUdeviceptr_v1 dstDevice, const void *srcHost, unsigned int ByteCount, CUstream hStream); + typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoHAsync_v2000)(void *dstHost, CUdeviceptr_v1 srcDevice, unsigned int ByteCount, CUstream hStream); + typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoDAsync_v3000)(CUdeviceptr_v1 dstDevice, CUdeviceptr_v1 srcDevice, unsigned int ByteCount, CUstream hStream); + typedef CUresult (CUDAAPI *PFN_cuMemcpy2DAsync_v2000)(const CUDA_MEMCPY2D_v1 *pCopy, CUstream hStream); + typedef CUresult (CUDAAPI *PFN_cuMemcpy3DAsync_v2000)(const CUDA_MEMCPY3D_v1 *pCopy, CUstream hStream); + typedef CUresult (CUDAAPI *PFN_cuMemsetD8_v2000)(CUdeviceptr_v1 dstDevice, unsigned char uc, unsigned int N); + typedef CUresult (CUDAAPI *PFN_cuMemsetD16_v2000)(CUdeviceptr_v1 dstDevice, unsigned short us, unsigned int N); + typedef CUresult (CUDAAPI *PFN_cuMemsetD32_v2000)(CUdeviceptr_v1 dstDevice, unsigned int ui, unsigned int N); + typedef CUresult (CUDAAPI *PFN_cuMemsetD2D8_v2000)(CUdeviceptr_v1 dstDevice, unsigned int dstPitch, unsigned char uc, unsigned int Width, unsigned int Height); + typedef CUresult (CUDAAPI *PFN_cuMemsetD2D16_v2000)(CUdeviceptr_v1 dstDevice, unsigned int dstPitch, unsigned short us, unsigned int Width, unsigned int Height); + typedef CUresult (CUDAAPI *PFN_cuMemsetD2D32_v2000)(CUdeviceptr_v1 dstDevice, unsigned int dstPitch, unsigned int ui, unsigned int Width, unsigned int Height); + typedef CUresult (CUDAAPI *PFN_cuArrayCreate_v2000)(CUarray *pHandle, const CUDA_ARRAY_DESCRIPTOR_v1 *pAllocateArray); + typedef CUresult (CUDAAPI *PFN_cuArrayGetDescriptor_v2000)(CUDA_ARRAY_DESCRIPTOR_v1 *pArrayDescriptor, CUarray hArray); + typedef CUresult (CUDAAPI *PFN_cuArray3DCreate_v2000)(CUarray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR_v1 *pAllocateArray); + typedef CUresult (CUDAAPI *PFN_cuArray3DGetDescriptor_v2000)(CUDA_ARRAY3D_DESCRIPTOR_v1 *pArrayDescriptor, CUarray hArray); + typedef CUresult (CUDAAPI *PFN_cuTexRefSetAddress_v2000)(unsigned int *ByteOffset, CUtexref hTexRef, CUdeviceptr_v1 dptr, unsigned int bytes); + typedef CUresult (CUDAAPI *PFN_cuTexRefSetAddress2D_v2020)(CUtexref hTexRef, const CUDA_ARRAY_DESCRIPTOR_v1 *desc, CUdeviceptr_v1 dptr, unsigned int Pitch); + typedef CUresult (CUDAAPI *PFN_cuTexRefGetAddress_v2000)(CUdeviceptr_v1 *pdptr, CUtexref hTexRef); + typedef CUresult (CUDAAPI *PFN_cuGraphicsResourceGetMappedPointer_v3000)(CUdeviceptr_v1 *pDevPtr, unsigned int *pSize, CUgraphicsResource resource); + typedef CUresult (CUDAAPI *PFN_cuCtxDestroy_v2000)(CUcontext ctx); + typedef CUresult (CUDAAPI *PFN_cuCtxPopCurrent_v2000)(CUcontext *pctx); + typedef CUresult (CUDAAPI *PFN_cuCtxPushCurrent_v2000)(CUcontext ctx); + typedef CUresult (CUDAAPI *PFN_cuStreamDestroy_v2000)(CUstream hStream); + typedef CUresult (CUDAAPI *PFN_cuEventDestroy_v2000)(CUevent hEvent); + typedef CUresult (CUDAAPI *PFN_cuDevicePrimaryCtxRelease_v7000)(CUdevice_v1 dev); + typedef CUresult (CUDAAPI *PFN_cuDevicePrimaryCtxReset_v7000)(CUdevice_v1 dev); + typedef CUresult (CUDAAPI *PFN_cuDevicePrimaryCtxSetFlags_v7000)(CUdevice_v1 dev, unsigned int flags); + typedef CUresult (CUDAAPI *PFN_cuStreamBeginCapture_v10000)(CUstream hStream); + typedef CUresult (CUDAAPI *PFN_cuStreamBeginCapture_v10000_ptsz)(CUstream hStream); + typedef CUresult (CUDAAPI *PFN_cuIpcOpenMemHandle_v4010)(CUdeviceptr_v2 *pdptr, CUipcMemHandle_v1 handle, unsigned int Flags); + typedef CUresult (CUDAAPI *PFN_cuGraphInstantiate_v10000)(CUgraphExec *phGraphExec, CUgraph hGraph, CUgraphNode *phErrorNode, char *logBuffer, size_t bufferSize); + typedef CUresult (CUDAAPI *PFN_cuGraphInstantiate_v11000)(CUgraphExec *phGraphExec, CUgraph hGraph, CUgraphNode *phErrorNode, char *logBuffer, size_t bufferSize); +#endif + +#ifdef __cplusplus +} +#endif // __cplusplus + +#endif // file guard diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAU.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAU.h new file mode 100644 index 0000000000000000000000000000000000000000..97de57ae494d62ae176fc02ad3c0c3f4d43e1526 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAU.h @@ -0,0 +1,282 @@ +/* + * Copyright 2010-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef CUDAVDPAU_H +#define CUDAVDPAU_H + +#ifdef CUDA_FORCE_API_VERSION +#error "CUDA_FORCE_API_VERSION is no longer supported." +#endif + +#define cuVDPAUCtxCreate cuVDPAUCtxCreate_v2 + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \defgroup CUDA_VDPAU VDPAU Interoperability + * \ingroup CUDA_DRIVER + * + * ___MANBRIEF___ VDPAU interoperability functions of the low-level CUDA driver + * API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the VDPAU interoperability functions of the + * low-level CUDA driver application programming interface. + * + * @{ + */ + +/** + * \brief Gets the CUDA device associated with a VDPAU device + * + * Returns in \p *pDevice the CUDA device associated with a \p vdpDevice, if + * applicable. + * + * \param pDevice - Device associated with vdpDevice + * \param vdpDevice - A VdpDevice handle + * \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuCtxCreate, ::cuVDPAUCtxCreate, ::cuGraphicsVDPAURegisterVideoSurface, + * ::cuGraphicsVDPAURegisterOutputSurface, ::cuGraphicsUnregisterResource, + * ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources, + * ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray, + * ::cudaVDPAUGetDevice + */ +CUresult CUDAAPI cuVDPAUGetDevice(CUdevice *pDevice, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress); + +/** + * \brief Create a CUDA context for interoperability with VDPAU + * + * Creates a new CUDA context, initializes VDPAU interoperability, and + * associates the CUDA context with the calling thread. It must be called + * before performing any other VDPAU interoperability operations. It may fail + * if the needed VDPAU driver facilities are not available. For usage of the + * \p flags parameter, see ::cuCtxCreate(). + * + * \param pCtx - Returned CUDA context + * \param flags - Options for CUDA context creation + * \param device - Device on which to create the context + * \param vdpDevice - The VdpDevice to interop with + * \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \notefnerr + * + * \sa ::cuCtxCreate, ::cuGraphicsVDPAURegisterVideoSurface, + * ::cuGraphicsVDPAURegisterOutputSurface, ::cuGraphicsUnregisterResource, + * ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources, + * ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray, + * ::cuVDPAUGetDevice + */ +CUresult CUDAAPI cuVDPAUCtxCreate(CUcontext *pCtx, unsigned int flags, CUdevice device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress); + +/** + * \brief Registers a VDPAU VdpVideoSurface object + * + * Registers the VdpVideoSurface specified by \p vdpSurface for access by + * CUDA. A handle to the registered object is returned as \p pCudaResource. + * The surface's intended usage is specified using \p flags, as follows: + * + * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this + * resource will be used. It is therefore assumed that this resource will be + * read from and written to by CUDA. This is the default value. + * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY: Specifies that CUDA + * will not write to this resource. + * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD: Specifies that + * CUDA will not read from this resource and will write over the + * entire contents of the resource, so none of the data previously + * stored in the resource will be preserved. + * + * The VdpVideoSurface is presented as an array of subresources that may be + * accessed using pointers returned by ::cuGraphicsSubResourceGetMappedArray. + * The exact number of valid \p arrayIndex values depends on the VDPAU surface + * format. The mapping is shown in the table below. \p mipLevel must be 0. + * + * \htmlonly + * + * + * + * + * + * + * + * + * + * + *
VdpChromaType arrayIndexSize FormatContent
VDP_CHROMA_TYPE_4200 w x h/2R8 Top-field luma
1 w x h/2R8 Bottom-field luma
2 w/2 x h/4R8G8 Top-field chroma
3 w/2 x h/4R8G8 Bottom-field chroma
VDP_CHROMA_TYPE_4220 w x h/2R8 Top-field luma
1 w x h/2R8 Bottom-field luma
2 w/2 x h/2R8G8 Top-field chroma
3 w/2 x h/2R8G8 Bottom-field chroma
+ * \endhtmlonly + * + * \latexonly + * \begin{tabular}{|l|l|l|l|l|} + * \hline + * VdpChromaType & arrayIndex & Size & Format & Content \\ + * \hline + * VDP\_CHROMA\_TYPE\_420 & 0 & w x h/2 & R8 & Top-field luma \\ + * & 1 & w x h/2 & R8 & Bottom-field luma \\ + * & 2 & w/2 x h/4 & R8G8 & Top-field chroma \\ + * & 3 & w/2 x h/4 & R8G8 & Bottom-field chroma \\ + * \hline + * VDP\_CHROMA\_TYPE\_422 & 0 & w x h/2 & R8 & Top-field luma \\ + * & 1 & w x h/2 & R8 & Bottom-field luma \\ + * & 2 & w/2 x h/2 & R8G8 & Top-field chroma \\ + * & 3 & w/2 x h/2 & R8G8 & Bottom-field chroma \\ + * \hline + * \end{tabular} + * \endlatexonly + * + * \param pCudaResource - Pointer to the returned object handle + * \param vdpSurface - The VdpVideoSurface to be registered + * \param flags - Map flags + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_ALREADY_MAPPED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * \notefnerr + * + * \sa ::cuCtxCreate, ::cuVDPAUCtxCreate, + * ::cuGraphicsVDPAURegisterOutputSurface, ::cuGraphicsUnregisterResource, + * ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources, + * ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray, + * ::cuVDPAUGetDevice, + * ::cudaGraphicsVDPAURegisterVideoSurface + */ +CUresult CUDAAPI cuGraphicsVDPAURegisterVideoSurface(CUgraphicsResource *pCudaResource, VdpVideoSurface vdpSurface, unsigned int flags); + +/** + * \brief Registers a VDPAU VdpOutputSurface object + * + * Registers the VdpOutputSurface specified by \p vdpSurface for access by + * CUDA. A handle to the registered object is returned as \p pCudaResource. + * The surface's intended usage is specified using \p flags, as follows: + * + * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this + * resource will be used. It is therefore assumed that this resource will be + * read from and written to by CUDA. This is the default value. + * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY: Specifies that CUDA + * will not write to this resource. + * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD: Specifies that + * CUDA will not read from this resource and will write over the + * entire contents of the resource, so none of the data previously + * stored in the resource will be preserved. + * + * The VdpOutputSurface is presented as an array of subresources that may be + * accessed using pointers returned by ::cuGraphicsSubResourceGetMappedArray. + * The exact number of valid \p arrayIndex values depends on the VDPAU surface + * format. The mapping is shown in the table below. \p mipLevel must be 0. + * + * \htmlonly + * + * + * + * + *
VdpRGBAFormat arrayIndexSize Format Content
VDP_RGBA_FORMAT_B8G8R8A8 0 w x hARGB8 Entire surface
VDP_RGBA_FORMAT_R10G10B10A20 w x hA2BGR10Entire surface
+ * \endhtmlonly + * + * \latexonly + * \begin{tabular}{|l|l|l|l|l|} + * \hline + * VdpRGBAFormat & arrayIndex & Size & Format & Content \\ + * \hline + * VDP\_RGBA\_FORMAT\_B8G8R8A8 & 0 & w x h & ARGB8 & Entire surface \\ + * VDP\_RGBA\_FORMAT\_R10G10B10A2 & 0 & w x h & A2BGR10 & Entire surface \\ + * \hline + * \end{tabular} + * \endlatexonly + * + * \param pCudaResource - Pointer to the returned object handle + * \param vdpSurface - The VdpOutputSurface to be registered + * \param flags - Map flags + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_ALREADY_MAPPED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * \notefnerr + * + * \sa ::cuCtxCreate, ::cuVDPAUCtxCreate, + * ::cuGraphicsVDPAURegisterVideoSurface, ::cuGraphicsUnregisterResource, + * ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources, + * ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray, + * ::cuVDPAUGetDevice, + * ::cudaGraphicsVDPAURegisterOutputSurface + */ +CUresult CUDAAPI cuGraphicsVDPAURegisterOutputSurface(CUgraphicsResource *pCudaResource, VdpOutputSurface vdpSurface, unsigned int flags); + +/** @} */ /* END CUDA_VDPAU */ + + +#if defined(__CUDA_API_VERSION_INTERNAL) + #undef cuVDPAUCtxCreate + + CUresult CUDAAPI cuVDPAUCtxCreate(CUcontext *pCtx, unsigned int flags, CUdevice device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress); +#endif /* __CUDA_API_VERSION_INTERNAL */ + +#ifdef __cplusplus +}; +#endif + +#endif diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAUTypedefs.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAUTypedefs.h new file mode 100644 index 0000000000000000000000000000000000000000..2bfd148632827d222548be49b3a2ffb7caa1c4dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAUTypedefs.h @@ -0,0 +1,90 @@ +/* + * Copyright 2020-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef CUDAVDPAUTYPEDEFS_H +#define CUDAVDPAUTYPEDEFS_H + +// Dependent includes for cudavdpau.h +#include + +#include + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +/* + * Macros for the latest version for each driver function in cudaVDPAU.h + */ +#define PFN_cuVDPAUGetDevice PFN_cuVDPAUGetDevice_v3010 +#define PFN_cuVDPAUCtxCreate PFN_cuVDPAUCtxCreate_v3020 +#define PFN_cuGraphicsVDPAURegisterVideoSurface PFN_cuGraphicsVDPAURegisterVideoSurface_v3010 +#define PFN_cuGraphicsVDPAURegisterOutputSurface PFN_cuGraphicsVDPAURegisterOutputSurface_v3010 + + +/** + * Type definitions for functions defined in cudaVDPAU.h + */ +typedef CUresult (CUDAAPI *PFN_cuVDPAUGetDevice_v3010)(CUdevice_v1 *pDevice, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress); +typedef CUresult (CUDAAPI *PFN_cuVDPAUCtxCreate_v3020)(CUcontext *pCtx, unsigned int flags, CUdevice_v1 device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress); +typedef CUresult (CUDAAPI *PFN_cuGraphicsVDPAURegisterVideoSurface_v3010)(CUgraphicsResource *pCudaResource, VdpVideoSurface vdpSurface, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuGraphicsVDPAURegisterOutputSurface_v3010)(CUgraphicsResource *pCudaResource, VdpOutputSurface vdpSurface, unsigned int flags); + +/* + * Type definitions for older versioned functions in cudaVDPAU.h + */ +#if defined(__CUDA_API_VERSION_INTERNAL) +typedef CUresult (CUDAAPI *PFN_cuVDPAUCtxCreate_v3010)(CUcontext *pCtx, unsigned int flags, CUdevice_v1 device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress); +#endif + +#ifdef __cplusplus +} +#endif // __cplusplus + +#endif // file guard diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier.h new file mode 100644 index 0000000000000000000000000000000000000000..12fd878dd10d9f18ad944a0d62ae1caba123fd06 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier.h @@ -0,0 +1,280 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CUDA_AWBARRIER_H_ +# define _CUDA_AWBARRIER_H_ + +# include "cuda_awbarrier_primitives.h" + +# if !defined(_CUDA_AWBARRIER_SM_TARGET) +# error This file requires compute capability 7.0 or greater. +# endif + +# if !defined(_CUDA_AWBARRIER_CPLUSPLUS_11_OR_LATER) +# error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \ + -std=c++11 compiler option. +# endif + +_CUDA_AWBARRIER_BEGIN_NAMESPACE + +class awbarrier { +public: + class arrival_token { + public: + arrival_token() = default; + ~arrival_token() = default; + _CUDA_AWBARRIER_QUALIFIER uint32_t pending_count() const; + private: + _CUDA_AWBARRIER_QUALIFIER arrival_token(uint64_t token); + uint64_t token; + friend awbarrier; + }; + awbarrier() = default; + awbarrier(const awbarrier&) = delete; + awbarrier& operator=(const awbarrier&) = delete; + ~awbarrier() = default; + + _CUDA_AWBARRIER_QUALIFIER arrival_token arrive(); + _CUDA_AWBARRIER_QUALIFIER arrival_token arrive_and_drop(); + _CUDA_AWBARRIER_QUALIFIER bool timed_wait(arrival_token token, uint32_t hint_cycles); + _CUDA_AWBARRIER_QUALIFIER bool timed_wait_parity(bool phase, uint32_t hint_cycles); + _CUDA_AWBARRIER_QUALIFIER void wait(arrival_token token); + _CUDA_AWBARRIER_QUALIFIER void arrive_and_wait(); + _CUDA_AWBARRIER_QUALIFIER bool try_wait(arrival_token token, uint32_t maxSleepNanosec); + _CUDA_AWBARRIER_QUALIFIER bool try_wait_parity(bool phase, uint32_t maxSleepNanosec); + _CUDA_AWBARRIER_STATIC_QUALIFIER __host__ constexpr uint32_t max(); + +private: + uint64_t barrier; + friend _CUDA_AWBARRIER_QUALIFIER void init(awbarrier* barrier, uint32_t expected_count); + friend _CUDA_AWBARRIER_QUALIFIER void inval(awbarrier* barrier); + friend class pipeline; +}; + +_CUDA_AWBARRIER_QUALIFIER +uint32_t awbarrier::arrival_token::pending_count() const +{ + const uint32_t pending_count = _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_token_pending_count(this->token); +#if (__CUDA_ARCH__ >= 900) + return pending_count; +#else + return (pending_count >> 15); +#endif +} + +_CUDA_AWBARRIER_QUALIFIER +awbarrier::arrival_token::arrival_token(uint64_t token) + : token(token) +{ +} + +_CUDA_AWBARRIER_QUALIFIER +void init(awbarrier* barrier, uint32_t expected_count) +{ + _CUDA_AWBARRIER_ASSERT(__isShared(barrier)); + _CUDA_AWBARRIER_ASSERT(expected_count > 0 && expected_count <= _CUDA_AWBARRIER_MAX_COUNT); + +#if (__CUDA_ARCH__ >= 900) + const uint32_t init_count = expected_count; +#else + const uint32_t init_count = (expected_count << 15) + expected_count; +#endif + + _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_init(&barrier->barrier, init_count); +} + +_CUDA_AWBARRIER_QUALIFIER +void inval(awbarrier* barrier) +{ + _CUDA_AWBARRIER_ASSERT(__isShared(barrier)); + + _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_inval(&barrier->barrier); +} + +_CUDA_AWBARRIER_QUALIFIER +awbarrier::arrival_token awbarrier::arrive() +{ + _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier)); + + #if (__CUDA_ARCH__ < 900) + const uint32_t arrive_count = 1 << 15; + const uint64_t token = _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop_no_complete(&this->barrier, arrive_count); + (void) +#else + const uint64_t token = + #endif + _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop(&this->barrier); + + return arrival_token(token); +} + +_CUDA_AWBARRIER_QUALIFIER +awbarrier::arrival_token awbarrier::arrive_and_drop() +{ + _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier)); + + #if (__CUDA_ARCH__ < 900) + const uint32_t arrive_count = 1 << 15; + const uint64_t token = _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop_no_complete(&this->barrier, arrive_count); + (void) +#else + const uint64_t token = + #endif + _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop(&this->barrier); + + return arrival_token(token); +} + +_CUDA_AWBARRIER_QUALIFIER +bool awbarrier::timed_wait(arrival_token token, uint32_t hint_cycles) +{ + constexpr uint64_t max_busy_wait_cycles = 1024; + constexpr uint32_t max_sleep_ns = 1 << 20; + + _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier)); + + if (_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait(&this->barrier, token.token)) { + return true; + } + + uint64_t start_cycles = clock64(); + uint64_t elapsed_cycles = 0; + uint32_t sleep_ns = 32; + while (elapsed_cycles < hint_cycles) { + if (_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait(&this->barrier, token.token)) { + return true; + } + + if (elapsed_cycles > max_busy_wait_cycles) { + __nanosleep(sleep_ns); + if (sleep_ns < max_sleep_ns) { + sleep_ns *= 2; + } + } + + elapsed_cycles = clock64() - start_cycles; + } + + return false; +} + +_CUDA_AWBARRIER_QUALIFIER +bool awbarrier::timed_wait_parity(bool phase, uint32_t hint_cycles) +{ + constexpr uint64_t max_busy_wait_cycles = 1024; + constexpr uint32_t max_sleep_ns = 1 << 20; + + _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier)); + + if (_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait_parity(&this->barrier, phase)) { + return true; + } + + uint64_t start_cycles = clock64(); + uint64_t elapsed_cycles = 0; + uint32_t sleep_ns = 32; + while (elapsed_cycles < hint_cycles) { + if (_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait_parity(&this->barrier, phase)) { + return true; + } + + if (elapsed_cycles > max_busy_wait_cycles) { + __nanosleep(sleep_ns); + if (sleep_ns < max_sleep_ns) { + sleep_ns *= 2; + } + } + + elapsed_cycles = clock64() - start_cycles; + } + + return false; +} + +_CUDA_AWBARRIER_QUALIFIER +bool awbarrier::try_wait(arrival_token token, uint32_t maxSleepNanosec) +{ + _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier)); + + return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_try_wait(&this->barrier, token.token, maxSleepNanosec); +} + +_CUDA_AWBARRIER_QUALIFIER +bool awbarrier::try_wait_parity(bool phase, uint32_t maxSleepNanosec) +{ + _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier)); + + return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_try_wait_parity(&this->barrier, phase, maxSleepNanosec); +} + +_CUDA_AWBARRIER_QUALIFIER +void awbarrier::wait(arrival_token token) +{ + _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier)); + + while (!timed_wait(token, ~0u)); +} + +_CUDA_AWBARRIER_QUALIFIER +void awbarrier::arrive_and_wait() +{ + _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier)); + + this->wait(this->arrive()); +} + +_CUDA_AWBARRIER_QUALIFIER __host__ +constexpr uint32_t awbarrier::max() +{ + return _CUDA_AWBARRIER_MAX_COUNT; +} + +_CUDA_AWBARRIER_END_NAMESPACE + +#endif /* !_CUDA_AWBARRIER_H_ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier_helpers.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier_helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..7c58346fe78c59329aca138ebc92add9015c005c --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier_helpers.h @@ -0,0 +1,365 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CUDA_AWBARRIER_HELPERS_H_ +#define _CUDA_AWBARRIER_HELPERS_H_ + +#define _CUDA_AWBARRIER_NAMESPACE nvcuda::experimental +#define _CUDA_AWBARRIER_BEGIN_NAMESPACE namespace nvcuda { namespace experimental { +#define _CUDA_AWBARRIER_END_NAMESPACE } } + +#define _CUDA_AWBARRIER_INTERNAL_NAMESPACE _CUDA_AWBARRIER_NAMESPACE::__awbarrier_internal +#define _CUDA_AWBARRIER_BEGIN_INTERNAL_NAMESPACE _CUDA_AWBARRIER_BEGIN_NAMESPACE namespace __awbarrier_internal { +#define _CUDA_AWBARRIER_END_INTERNAL_NAMESPACE } _CUDA_AWBARRIER_END_NAMESPACE + +# if !defined(_CUDA_AWBARRIER_QUALIFIER) +# define _CUDA_AWBARRIER_QUALIFIER inline __device__ +# endif +# if !defined(_CUDA_AWBARRIER_STATIC_QUALIFIER) +# define _CUDA_AWBARRIER_STATIC_QUALIFIER static inline __device__ +#endif + +#if defined(__CUDA_ARCH__) +#if (__CUDA_ARCH__ >= 900) +# define _CUDA_AWBARRIER_SM_TARGET _CUDA_AWBARRIER_SM_90 +#elif (__CUDA_ARCH__ >= 800) +# define _CUDA_AWBARRIER_SM_TARGET _CUDA_AWBARRIER_SM_80 +#elif (__CUDA_ARCH__ >= 700) +# define _CUDA_AWBARRIER_SM_TARGET _CUDA_AWBARRIER_SM_70 +#endif +#else +# define _CUDA_AWBARRIER_SM_TARGET _CUDA_AWBARRIER_SM_70 +#endif + +#define _CUDA_AWBARRIER_MAX_COUNT ((1 << 14) - 1) + +#if defined(__cplusplus) && ((__cplusplus >= 201103L) || (defined(_MSC_VER) && (_MSC_VER >= 1900))) +# define _CUDA_AWBARRIER_CPLUSPLUS_11_OR_LATER +#endif + +#if !defined(_CUDA_AWBARRIER_DEBUG) +# if defined(__CUDACC_DEBUG__) +# define _CUDA_AWBARRIER_DEBUG 1 +# else +# define _CUDA_AWBARRIER_DEBUG 0 +# endif +#endif + +#if defined(_CUDA_AWBARRIER_DEBUG) && (_CUDA_AWBARRIER_DEBUG == 1) && !defined(NDEBUG) +# if !defined(__CUDACC_RTC__) +# include +# endif +# define _CUDA_AWBARRIER_ASSERT(x) assert((x)); +# define _CUDA_AWBARRIER_ABORT() assert(0); +#else +# define _CUDA_AWBARRIER_ASSERT(x) +# define _CUDA_AWBARRIER_ABORT() __trap(); +#endif + +#if defined(__CUDACC_RTC__) +typedef unsigned short uint16_t; +typedef unsigned int uint32_t; +typedef unsigned long long uint64_t; +typedef uint64_t uintptr_t; +#else +# include +#endif + +// implicitly provided by NVRTC +#ifndef __CUDACC_RTC__ +#include +#endif /* !defined(__CUDACC_RTC__) */ + +typedef uint64_t __mbarrier_t; +typedef uint64_t __mbarrier_token_t; + +_CUDA_AWBARRIER_BEGIN_INTERNAL_NAMESPACE + +extern "C" __device__ uint32_t __nvvm_get_smem_pointer(void *); + +union AWBarrier { + struct { + uint32_t expected; + uint32_t pending; + } split; + uint64_t raw; +}; + +_CUDA_AWBARRIER_STATIC_QUALIFIER +void awbarrier_init(uint64_t* barrier, uint32_t expected_count) { + _CUDA_AWBARRIER_ASSERT(__isShared(barrier)); + _CUDA_AWBARRIER_ASSERT(expected_count > 0 && expected_count < (1 << 29)); + + NV_IF_TARGET(NV_PROVIDES_SM_80, + asm volatile ("mbarrier.init.shared.b64 [%0], %1;" + : + : "r"(__nvvm_get_smem_pointer(barrier)), "r"(expected_count) + : "memory"); + return; + ) + NV_IF_TARGET(NV_PROVIDES_SM_70, + AWBarrier* awbarrier = reinterpret_cast(barrier); + + awbarrier->split.expected = 0x40000000 - expected_count; + awbarrier->split.pending = 0x80000000 - expected_count; + return; + ) +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +void awbarrier_inval(uint64_t* barrier) { + _CUDA_AWBARRIER_ASSERT(__isShared(barrier)); + + NV_IF_TARGET(NV_PROVIDES_SM_80, + asm volatile ("mbarrier.inval.shared.b64 [%0];" + : + : "r"(__nvvm_get_smem_pointer(barrier)) + : "memory"); + return; + ) + return; +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +uint32_t awbarrier_token_pending_count(uint64_t token) { + NV_IF_TARGET(NV_PROVIDES_SM_80, + uint32_t __pending_count; + + asm ("mbarrier.pending_count.b64 %0, %1;" + : "=r"(__pending_count) + : "l"(token)); + return __pending_count; + ) + NV_IF_TARGET(NV_PROVIDES_SM_70, + const uint32_t pending = token >> 32; + return 0x80000000 - (pending & 0x7fffffff); + ) +} + +template +_CUDA_AWBARRIER_STATIC_QUALIFIER +uint64_t awbarrier_arrive_drop(uint64_t* barrier) { + _CUDA_AWBARRIER_ASSERT(__isShared(barrier)); + + NV_IF_TARGET(NV_PROVIDES_SM_80, + uint64_t token; + + if (_Drop) { + asm volatile ("mbarrier.arrive_drop.shared.b64 %0, [%1];" + : "=l"(token) + : "r"(__nvvm_get_smem_pointer(barrier)) + : "memory"); + } else { + asm volatile ("mbarrier.arrive.shared.b64 %0, [%1];" + : "=l"(token) + : "r"(__nvvm_get_smem_pointer(barrier)) + : "memory"); + } + + return token; + ) + NV_IF_TARGET(NV_PROVIDES_SM_70, + AWBarrier* awbarrier = reinterpret_cast(barrier); + + while ((*reinterpret_cast(&awbarrier->split.pending) & 0x7fffffff) == 0); + + if (_Drop) { + (void)atomicAdd_block(&awbarrier->split.expected, 1); + } + + __threadfence_block(); + + const uint32_t old_pending = atomicAdd_block(&awbarrier->split.pending, 1); + const uint32_t new_pending = old_pending + 1; + const bool reset = (old_pending ^ new_pending) & 0x80000000; + + if (reset) { + __threadfence_block(); + + uint32_t new_expected = *reinterpret_cast(&awbarrier->split.expected); + new_expected &= ~0x40000000; + if (new_expected & 0x20000000) { + new_expected |= 0x40000000; + } + atomicAdd_block(&awbarrier->split.pending, new_expected); + } + + return static_cast(old_pending) << 32; + ) +} + +template +_CUDA_AWBARRIER_STATIC_QUALIFIER +uint64_t awbarrier_arrive_drop_no_complete(uint64_t* barrier, uint32_t count) { + _CUDA_AWBARRIER_ASSERT(__isShared(barrier)); + _CUDA_AWBARRIER_ASSERT(count > 0 && count < (1 << 29)); + + NV_IF_TARGET(NV_PROVIDES_SM_80, + uint64_t token; + + if (_Drop) { + asm volatile ("mbarrier.arrive_drop.noComplete.shared.b64 %0, [%1], %2;" + : "=l"(token) + : "r"(__nvvm_get_smem_pointer(barrier)), "r"(count) + : "memory"); + } else { + asm volatile ("mbarrier.arrive.noComplete.shared.b64 %0, [%1], %2;" + : "=l"(token) + : "r"(__nvvm_get_smem_pointer(barrier)), "r"(count) + : "memory"); + } + + return token; + ) + NV_IF_TARGET(NV_PROVIDES_SM_70, + AWBarrier* awbarrier = reinterpret_cast(barrier); + + while ((*reinterpret_cast(&awbarrier->split.pending) & 0x7fffffff) == 0); + + if (_Drop) { + (void)atomicAdd_block(&awbarrier->split.expected, count); + } + + return static_cast(atomicAdd_block(&awbarrier->split.pending, count)) << 32; + ) +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +bool awbarrier_test_wait(uint64_t* barrier, uint64_t token) { + _CUDA_AWBARRIER_ASSERT(__isShared(barrier)); + + NV_IF_TARGET(NV_PROVIDES_SM_80, + uint32_t __wait_complete; + + asm volatile ("{" + " .reg .pred %%p;" + " mbarrier.test_wait.shared.b64 %%p, [%1], %2;" + " selp.b32 %0, 1, 0, %%p;" + "}" + : "=r"(__wait_complete) + : "r"(__nvvm_get_smem_pointer(barrier)), "l"(token) + : "memory"); + return bool(__wait_complete); + ) + NV_IF_TARGET(NV_PROVIDES_SM_70, + volatile AWBarrier* awbarrier = reinterpret_cast(barrier); + + return ((token >> 32) ^ awbarrier->split.pending) & 0x80000000; + ) +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +bool awbarrier_test_wait_parity(uint64_t* barrier, bool phase_parity) { + _CUDA_AWBARRIER_ASSERT(__isShared(barrier)); + + NV_IF_TARGET(NV_PROVIDES_SM_90, + uint32_t __wait_complete = 0; + + asm volatile ("{" + ".reg .pred %%p;" + "mbarrier.test_wait.parity.shared.b64 %%p, [%1], %2;" + "selp.b32 %0, 1, 0, %%p;" + "}" + : "=r"(__wait_complete) + : "r"(__nvvm_get_smem_pointer(barrier)), "r"(static_cast(phase_parity)) + : "memory"); + + return __wait_complete; + ) + _CUDA_AWBARRIER_ABORT() + return false; +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +bool awbarrier_try_wait(uint64_t* barrier, uint64_t token, uint32_t max_sleep_nanosec) { + _CUDA_AWBARRIER_ASSERT(__isShared(barrier)); + + NV_IF_TARGET(NV_PROVIDES_SM_90, + uint32_t __wait_complete = 0; + + asm volatile ("{\n\t" + ".reg .pred p;\n\t" + "mbarrier.try_wait.shared.b64 p, [%1], %2, %3;\n\t" + "selp.b32 %0, 1, 0, p;\n\t" + "}" + : "=r"(__wait_complete) + : "r"(__nvvm_get_smem_pointer(barrier)), "l"(token), "r"(max_sleep_nanosec) + : "memory"); + + return __wait_complete; + ) + _CUDA_AWBARRIER_ABORT() + return false; +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +bool awbarrier_try_wait_parity(uint64_t* barrier, bool phase_parity, uint32_t max_sleep_nanosec) { + _CUDA_AWBARRIER_ASSERT(__isShared(barrier)); + + NV_IF_TARGET(NV_PROVIDES_SM_90, + uint32_t __wait_complete = 0; + + asm volatile ("{\n\t" + ".reg .pred p;\n\t" + "mbarrier.try_wait.parity.shared.b64 p, [%1], %2, %3;\n\t" + "selp.b32 %0, 1, 0, p;\n\t" + "}" + : "=r"(__wait_complete) + : "r"(__nvvm_get_smem_pointer(barrier)), "r"(static_cast(phase_parity)), "r"(max_sleep_nanosec) + : "memory"); + + return __wait_complete; + ) + _CUDA_AWBARRIER_ABORT() + return false; +} + +_CUDA_AWBARRIER_END_INTERNAL_NAMESPACE + +#endif /* !_CUDA_AWBARRIER_HELPERS_H_ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier_primitives.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier_primitives.h new file mode 100644 index 0000000000000000000000000000000000000000..5562ef3f6afeb7fce4bad4cb8067b3cb1b9a690f --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier_primitives.h @@ -0,0 +1,109 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CUDA_AWBARRIER_PRIMITIVES_H_ +#define _CUDA_AWBARRIER_PRIMITIVES_H_ + +#include "cuda_awbarrier_helpers.h" + +#if !defined(_CUDA_AWBARRIER_SM_TARGET) +# error This file requires compute capability 7.0 or greater. +#endif + +_CUDA_AWBARRIER_STATIC_QUALIFIER __host__ +uint32_t __mbarrier_maximum_count() { + return _CUDA_AWBARRIER_MAX_COUNT; +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +void __mbarrier_init(__mbarrier_t* barrier, uint32_t expected_count) { + _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_init(barrier, expected_count); +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +void __mbarrier_inval(__mbarrier_t* barrier) { + _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_inval(barrier); +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +__mbarrier_token_t __mbarrier_arrive(__mbarrier_t* barrier) { + return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop(barrier); +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +__mbarrier_token_t __mbarrier_arrive_and_drop(__mbarrier_t* barrier) { + return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop(barrier); +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +bool __mbarrier_test_wait(__mbarrier_t* barrier, __mbarrier_token_t token) { + return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait(barrier, token); +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +uint32_t __mbarrier_token_pending_count(__mbarrier_token_t token) { + return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_token_pending_count(token); +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +bool __mbarrier_test_wait_parity(__mbarrier_t* barrier, bool phase_parity) { + return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait_parity(barrier, phase_parity); +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +bool __mbarrier_try_wait(__mbarrier_t* barrier, __mbarrier_token_t token, uint32_t max_sleep_nanosec) { + return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_try_wait(barrier, token, max_sleep_nanosec); +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +bool __mbarrier_try_wait_parity(__mbarrier_t* barrier, bool phase_parity, uint32_t max_sleep_nanosec) { + return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_try_wait_parity(barrier, phase_parity, max_sleep_nanosec); +} + +#endif /* !_CUDA_AWBARRIER_PRIMITIVES_H_ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_bf16.hpp b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_bf16.hpp new file mode 100644 index 0000000000000000000000000000000000000000..ec55a7b364a8ba1a2220565d979dbee87dd23f09 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_bf16.hpp @@ -0,0 +1,2846 @@ +/* +* Copyright 1993-2022 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO LICENSEE: +* +* This source code and/or documentation ("Licensed Deliverables") are +* subject to NVIDIA intellectual property rights under U.S. and +* international Copyright laws. +* +* These Licensed Deliverables contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and +* conditions of a form of NVIDIA software license agreement by and +* between NVIDIA and Licensee ("License Agreement") or electronically +* accepted by Licensee. Notwithstanding any terms or conditions to +* the contrary in the License Agreement, reproduction or disclosure +* of the Licensed Deliverables to any third party without the express +* written consent of NVIDIA is prohibited. +* +* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE +* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE +* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS +* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. +* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED +* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, +* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE +* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY +* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY +* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +* OF THESE LICENSED DELIVERABLES. +* +* U.S. Government End Users. These Licensed Deliverables are a +* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT +* 1995), consisting of "commercial computer software" and "commercial +* computer software documentation" as such terms are used in 48 +* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government +* only as a commercial end item. Consistent with 48 C.F.R.12.212 and +* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all +* U.S. Government End Users acquire the Licensed Deliverables with +* only those rights set forth herein. +* +* Any use of the Licensed Deliverables in individual and commercial +* software must include, in the user documentation and internal +* comments to the code, the above Disclaimer and U.S. Government End +* Users Notice. +*/ + +#if !defined(__CUDA_BF16_HPP__) +#define __CUDA_BF16_HPP__ + +#if !defined(__CUDA_BF16_H__) +#error "Do not include this file directly. Instead, include cuda_bf16.h." +#endif + +#if !defined(_MSC_VER) && __cplusplus >= 201103L +# define __CPP_VERSION_AT_LEAST_11_BF16 +#elif _MSC_FULL_VER >= 190024210 && _MSVC_LANG >= 201103L +# define __CPP_VERSION_AT_LEAST_11_BF16 +#endif + +/* C++11 header for std::move. + * In RTC mode, std::move is provided implicitly; don't include the header + */ +#if defined(__CPP_VERSION_AT_LEAST_11_BF16) && !defined(__CUDACC_RTC__) +#include +#endif /* defined(__CPP_VERSION_AT_LEAST_11_BF16) && !defined(__CUDACC_RTC__) */ + +/* C++ header for std::memcpy (used for type punning in host-side implementations). + * When compiling as a CUDA source file memcpy is provided implicitly. + * !defined(__CUDACC__) implies !defined(__CUDACC_RTC__). + */ +#if defined(__cplusplus) && !defined(__CUDACC__) +#include +#endif /* defined(__cplusplus) && !defined(__CUDACC__) */ + +// implicitly provided by NVRTC +#if !defined(__CUDACC_RTC__) +#include +#endif /* !defined(__CUDACC_RTC__) */ + +#if !defined(IF_DEVICE_OR_CUDACC) +#if defined(__CUDACC__) + #define IF_DEVICE_OR_CUDACC(d, c, f) NV_IF_ELSE_TARGET(NV_IS_DEVICE, d, c) +#else + #define IF_DEVICE_OR_CUDACC(d, c, f) NV_IF_ELSE_TARGET(NV_IS_DEVICE, d, f) +#endif +#endif + +/* Set up function decorations */ +#if defined(__CUDACC__) +#define __CUDA_BF16_DECL__ static __device__ __inline__ +#define __CUDA_HOSTDEVICE_BF16_DECL__ static __host__ __device__ __inline__ +#define __VECTOR_FUNCTIONS_DECL__ static __inline__ __host__ __device__ +#define __CUDA_HOSTDEVICE__ __host__ __device__ +#else /* !defined(__CUDACC__) */ +#if defined(__GNUC__) +#define __CUDA_HOSTDEVICE_BF16_DECL__ static __attribute__ ((unused)) +#else +#define __CUDA_HOSTDEVICE_BF16_DECL__ static +#endif /* defined(__GNUC__) */ +#define __CUDA_HOSTDEVICE__ +#endif /* defined(__CUDACC_) */ + +/* Set up structure-alignment attribute */ +#if defined(__CUDACC__) +#define __CUDA_ALIGN__(align) __align__(align) +#else +/* Define alignment macro based on compiler type (cannot assume C11 "_Alignas" is available) */ +#if defined(__CPP_VERSION_AT_LEAST_11_BF16) +#define __CUDA_ALIGN__(n) alignas(n) /* C++11 kindly gives us a keyword for this */ +#else /* defined(__CPP_VERSION_AT_LEAST_11_BF16)*/ +#if defined(__GNUC__) +#define __CUDA_ALIGN__(n) __attribute__ ((aligned(n))) +#elif defined(_MSC_VER) +#define __CUDA_ALIGN__(n) __declspec(align(n)) +#else +#define __CUDA_ALIGN__(n) +#endif /* defined(__GNUC__) */ +#endif /* defined(__CPP_VERSION_AT_LEAST_11_BF16) */ +#endif /* defined(__CUDACC__) */ + +/* Macros to allow nv_bfloat16 & nv_bfloat162 to be used by inline assembly */ +#define __BFLOAT16_TO_US(var) *(reinterpret_cast(&(var))) +#define __BFLOAT16_TO_CUS(var) *(reinterpret_cast(&(var))) +#define __BFLOAT162_TO_UI(var) *(reinterpret_cast(&(var))) +#define __BFLOAT162_TO_CUI(var) *(reinterpret_cast(&(var))) + +/** +* Types which allow static initialization of "nv_bfloat16" and "nv_bfloat162" until +* these become an actual builtin. Note this initialization is as a +* bitfield representation of "nv_bfloat16", and not a conversion from short->nv_bfloat16. +* Such a representation will be deprecated in a future version of CUDA. +* (Note these are visible to non-nvcc compilers, including C-only compilation) +*/ +typedef struct __CUDA_ALIGN__(2) { + unsigned short x; +} __nv_bfloat16_raw; + +typedef struct __CUDA_ALIGN__(4) { + unsigned short x; + unsigned short y; +} __nv_bfloat162_raw; + +/* All other definitions in this file are only visible to C++ compilers */ +#if defined(__cplusplus) + +/* Hide GCC member initialization list warnings because of host/device in-function init requirement */ +#if defined(__GNUC__) +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wstrict-aliasing" +#pragma GCC diagnostic ignored "-Weffc++" +#endif /* __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) */ +#endif /* defined(__GNUC__) */ + +/* class' : multiple assignment operators specified + The class has multiple assignment operators of a single type. This warning is informational */ +#if defined(_MSC_VER) && _MSC_VER >= 1500 +#pragma warning( push ) +#pragma warning( disable:4522 ) +#endif /* defined(__GNUC__) */ + +struct __CUDA_ALIGN__(2) __nv_bfloat16 { +protected: + unsigned short __x; + +public: +#if defined(__CPP_VERSION_AT_LEAST_11_BF16) + __nv_bfloat16() = default; +#else + __CUDA_HOSTDEVICE__ __nv_bfloat16() { } +#endif /* defined(__CPP_VERSION_AT_LEAST_11_BF16) */ + + /* Convert to/from __nv_bfloat16_raw */ + __CUDA_HOSTDEVICE__ __nv_bfloat16(const __nv_bfloat16_raw &hr) : __x(hr.x) { } + __CUDA_HOSTDEVICE__ __nv_bfloat16 &operator=(const __nv_bfloat16_raw &hr) { __x = hr.x; return *this; } + __CUDA_HOSTDEVICE__ volatile __nv_bfloat16 &operator=(const __nv_bfloat16_raw &hr) volatile { __x = hr.x; return *this; } + __CUDA_HOSTDEVICE__ volatile __nv_bfloat16 &operator=(const volatile __nv_bfloat16_raw &hr) volatile { __x = hr.x; return *this; } + __CUDA_HOSTDEVICE__ operator __nv_bfloat16_raw() const { __nv_bfloat16_raw ret; ret.x = __x; return ret; } + __CUDA_HOSTDEVICE__ operator __nv_bfloat16_raw() const volatile { __nv_bfloat16_raw ret; ret.x = __x; return ret; } + +#if !defined(__CUDA_NO_BFLOAT16_CONVERSIONS__) + /* Construct from float/double */ + __CUDA_HOSTDEVICE__ __nv_bfloat16(const float f) { __x = __float2bfloat16(f).__x; } + __CUDA_HOSTDEVICE__ __nv_bfloat16(const double f) { __x = __double2bfloat16(f).__x; } + + __CUDA_HOSTDEVICE__ operator float() const { return __bfloat162float(*this); } + __CUDA_HOSTDEVICE__ __nv_bfloat16 &operator=(const float f) { __x = __float2bfloat16(f).__x; return *this; } + + /* We omit "cast to double" operator, so as to not be ambiguous about up-cast */ + __CUDA_HOSTDEVICE__ __nv_bfloat16 &operator=(const double f) { __x = __double2bfloat16(f).__x; return *this; } + +/* Member functions only available to nvcc compilation so far */ +#if (defined(__CUDACC__) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800))) || defined(_NVHPC_CUDA) + /* Allow automatic construction from types supported natively in hardware */ + /* Note we do avoid constructor init-list because of special host/device compilation rules */ + __CUDA_HOSTDEVICE__ __nv_bfloat16(short val) { __x = __short2bfloat16_rn(val).__x; } + __CUDA_HOSTDEVICE__ __nv_bfloat16(unsigned short val) { __x = __ushort2bfloat16_rn(val).__x; } + __CUDA_HOSTDEVICE__ __nv_bfloat16(int val) { __x = __int2bfloat16_rn(val).__x; } + __CUDA_HOSTDEVICE__ __nv_bfloat16(unsigned int val) { __x = __uint2bfloat16_rn(val).__x; } + __CUDA_HOSTDEVICE__ __nv_bfloat16(long long val) { __x = __ll2bfloat16_rn(val).__x; } + __CUDA_HOSTDEVICE__ __nv_bfloat16(unsigned long long val) { __x = __ull2bfloat16_rn(val).__x; } + + /* Allow automatic casts to supported builtin types, matching all that are permitted with float */ + __CUDA_HOSTDEVICE__ operator short() const { return __bfloat162short_rz(*this); } + __CUDA_HOSTDEVICE__ __nv_bfloat16 &operator=(short val) { __x = __short2bfloat16_rn(val).__x; return *this; } + + __CUDA_HOSTDEVICE__ operator unsigned short() const { return __bfloat162ushort_rz(*this); } + __CUDA_HOSTDEVICE__ __nv_bfloat16 &operator=(unsigned short val) { __x = __ushort2bfloat16_rn(val).__x; return *this; } + + __CUDA_HOSTDEVICE__ operator int() const { return __bfloat162int_rz(*this); } + __CUDA_HOSTDEVICE__ __nv_bfloat16 &operator=(int val) { __x = __int2bfloat16_rn(val).__x; return *this; } + + __CUDA_HOSTDEVICE__ operator unsigned int() const { return __bfloat162uint_rz(*this); } + __CUDA_HOSTDEVICE__ __nv_bfloat16 &operator=(unsigned int val) { __x = __uint2bfloat16_rn(val).__x; return *this; } + + __CUDA_HOSTDEVICE__ operator long long() const { return __bfloat162ll_rz(*this); } + __CUDA_HOSTDEVICE__ __nv_bfloat16 &operator=(long long val) { __x = __ll2bfloat16_rn(val).__x; return *this; } + + __CUDA_HOSTDEVICE__ operator unsigned long long() const { return __bfloat162ull_rz(*this); } + __CUDA_HOSTDEVICE__ __nv_bfloat16 &operator=(unsigned long long val) { __x = __ull2bfloat16_rn(val).__x; return *this; } + + /* Boolean conversion - note both 0 and -0 must return false */ + __CUDA_HOSTDEVICE__ operator bool() const { return (__x & 0x7FFF) != 0; } +#endif /* (defined(__CUDACC__) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800))) || defined(_NVHPC_CUDA) */ +#endif /* !defined(__CUDA_NO_BFLOAT16_CONVERSIONS__) */ +}; + +/* Global-space operator functions are only available to nvcc compilation */ +#if defined(__CUDACC__) || defined(_NVHPC_CUDA) + +#if (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800)) || defined(_NVHPC_CUDA) +#if !defined(__CUDA_NO_BFLOAT16_OPERATORS__) +/* Some basic arithmetic operations expected of a builtin */ +__device__ __forceinline__ __nv_bfloat16 operator+(const __nv_bfloat16 &lh, const __nv_bfloat16 &rh) { return __hadd(lh, rh); } +__device__ __forceinline__ __nv_bfloat16 operator-(const __nv_bfloat16 &lh, const __nv_bfloat16 &rh) { return __hsub(lh, rh); } +__device__ __forceinline__ __nv_bfloat16 operator*(const __nv_bfloat16 &lh, const __nv_bfloat16 &rh) { return __hmul(lh, rh); } +__device__ __forceinline__ __nv_bfloat16 operator/(const __nv_bfloat16 &lh, const __nv_bfloat16 &rh) { return __hdiv(lh, rh); } + +__device__ __forceinline__ __nv_bfloat16 &operator+=(__nv_bfloat16 &lh, const __nv_bfloat16 &rh) { lh = __hadd(lh, rh); return lh; } +__device__ __forceinline__ __nv_bfloat16 &operator-=(__nv_bfloat16 &lh, const __nv_bfloat16 &rh) { lh = __hsub(lh, rh); return lh; } +__device__ __forceinline__ __nv_bfloat16 &operator*=(__nv_bfloat16 &lh, const __nv_bfloat16 &rh) { lh = __hmul(lh, rh); return lh; } +__device__ __forceinline__ __nv_bfloat16 &operator/=(__nv_bfloat16 &lh, const __nv_bfloat16 &rh) { lh = __hdiv(lh, rh); return lh; } + +/* Note for increment and decrement we use the raw value 0x3F80 equating to nv_bfloat16(1.0f), to avoid the extra conversion */ +__device__ __forceinline__ __nv_bfloat16 &operator++(__nv_bfloat16 &h) { __nv_bfloat16_raw one; one.x = 0x3F80; h += one; return h; } +__device__ __forceinline__ __nv_bfloat16 &operator--(__nv_bfloat16 &h) { __nv_bfloat16_raw one; one.x = 0x3F80; h -= one; return h; } +__device__ __forceinline__ __nv_bfloat16 operator++(__nv_bfloat16 &h, const int ignored) +{ + // ignored on purpose. Parameter only needed to distinguish the function declaration from other types of operators. + static_cast(ignored); + + const __nv_bfloat16 ret = h; + __nv_bfloat16_raw one; + one.x = 0x3F80; + h += one; + return ret; +} +__device__ __forceinline__ __nv_bfloat16 operator--(__nv_bfloat16 &h, const int ignored) +{ + // ignored on purpose. Parameter only needed to distinguish the function declaration from other types of operators. + static_cast(ignored); + + const __nv_bfloat16 ret = h; + __nv_bfloat16_raw one; + one.x = 0x3F80; + h -= one; + return ret; +} +/* Unary plus and inverse operators */ +__device__ __forceinline__ __nv_bfloat16 operator+(const __nv_bfloat16 &h) { return h; } +__device__ __forceinline__ __nv_bfloat16 operator-(const __nv_bfloat16 &h) { return __hneg(h); } + +/* Some basic comparison operations to make it look like a builtin */ +__device__ __forceinline__ bool operator==(const __nv_bfloat16 &lh, const __nv_bfloat16 &rh) { return __heq(lh, rh); } +__device__ __forceinline__ bool operator!=(const __nv_bfloat16 &lh, const __nv_bfloat16 &rh) { return __hneu(lh, rh); } +__device__ __forceinline__ bool operator> (const __nv_bfloat16 &lh, const __nv_bfloat16 &rh) { return __hgt(lh, rh); } +__device__ __forceinline__ bool operator< (const __nv_bfloat16 &lh, const __nv_bfloat16 &rh) { return __hlt(lh, rh); } +__device__ __forceinline__ bool operator>=(const __nv_bfloat16 &lh, const __nv_bfloat16 &rh) { return __hge(lh, rh); } +__device__ __forceinline__ bool operator<=(const __nv_bfloat16 &lh, const __nv_bfloat16 &rh) { return __hle(lh, rh); } +#endif /* !defined(__CUDA_NO_BFLOAT16_OPERATORS__) */ +#endif /* (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800)) || defined(_NVHPC_CUDA) */ +#endif /* defined(__CUDACC__) */ + +/* __nv_bfloat162 is visible to non-nvcc host compilers */ +struct __CUDA_ALIGN__(4) __nv_bfloat162 { + __nv_bfloat16 x; + __nv_bfloat16 y; + + // All construct/copy/assign/move +public: +#if defined(__CPP_VERSION_AT_LEAST_11_BF16) + __nv_bfloat162() = default; + __CUDA_HOSTDEVICE__ __nv_bfloat162(__nv_bfloat162 &&src) { __BFLOAT162_TO_UI(*this) = std::move(__BFLOAT162_TO_CUI(src)); } + __CUDA_HOSTDEVICE__ __nv_bfloat162 &operator=(__nv_bfloat162 &&src) { __BFLOAT162_TO_UI(*this) = std::move(__BFLOAT162_TO_CUI(src)); return *this; } +#else + __CUDA_HOSTDEVICE__ __nv_bfloat162() { } +#endif /* defined(__CPP_VERSION_AT_LEAST_11_BF16) */ + __CUDA_HOSTDEVICE__ __nv_bfloat162(const __nv_bfloat16 &a, const __nv_bfloat16 &b) : x(a), y(b) { } + __CUDA_HOSTDEVICE__ __nv_bfloat162(const __nv_bfloat162 &src) { __BFLOAT162_TO_UI(*this) = __BFLOAT162_TO_CUI(src); } + __CUDA_HOSTDEVICE__ __nv_bfloat162 &operator=(const __nv_bfloat162 &src) { __BFLOAT162_TO_UI(*this) = __BFLOAT162_TO_CUI(src); return *this; } + + /* Convert to/from __nv_bfloat162_raw */ + __CUDA_HOSTDEVICE__ __nv_bfloat162(const __nv_bfloat162_raw &h2r ) { __BFLOAT162_TO_UI(*this) = __BFLOAT162_TO_CUI(h2r); } + __CUDA_HOSTDEVICE__ __nv_bfloat162 &operator=(const __nv_bfloat162_raw &h2r) { __BFLOAT162_TO_UI(*this) = __BFLOAT162_TO_CUI(h2r); return *this; } + __CUDA_HOSTDEVICE__ operator __nv_bfloat162_raw() const { __nv_bfloat162_raw ret; ret.x = 0U; ret.y = 0U; __BFLOAT162_TO_UI(ret) = __BFLOAT162_TO_CUI(*this); return ret; } +}; + +/* Global-space operator functions are only available to nvcc compilation */ +#if defined(__CUDACC__) + +#if ((!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800)) && !defined(__CUDA_NO_BFLOAT162_OPERATORS__)) || defined(_NVHPC_CUDA) + +__device__ __forceinline__ __nv_bfloat162 operator+(const __nv_bfloat162 &lh, const __nv_bfloat162 &rh) { return __hadd2(lh, rh); } +__device__ __forceinline__ __nv_bfloat162 operator-(const __nv_bfloat162 &lh, const __nv_bfloat162 &rh) { return __hsub2(lh, rh); } +__device__ __forceinline__ __nv_bfloat162 operator*(const __nv_bfloat162 &lh, const __nv_bfloat162 &rh) { return __hmul2(lh, rh); } +__device__ __forceinline__ __nv_bfloat162 operator/(const __nv_bfloat162 &lh, const __nv_bfloat162 &rh) { return __h2div(lh, rh); } + +__device__ __forceinline__ __nv_bfloat162& operator+=(__nv_bfloat162 &lh, const __nv_bfloat162 &rh) { lh = __hadd2(lh, rh); return lh; } +__device__ __forceinline__ __nv_bfloat162& operator-=(__nv_bfloat162 &lh, const __nv_bfloat162 &rh) { lh = __hsub2(lh, rh); return lh; } +__device__ __forceinline__ __nv_bfloat162& operator*=(__nv_bfloat162 &lh, const __nv_bfloat162 &rh) { lh = __hmul2(lh, rh); return lh; } +__device__ __forceinline__ __nv_bfloat162& operator/=(__nv_bfloat162 &lh, const __nv_bfloat162 &rh) { lh = __h2div(lh, rh); return lh; } + +__device__ __forceinline__ __nv_bfloat162 &operator++(__nv_bfloat162 &h) { __nv_bfloat162_raw one; one.x = 0x3F80; one.y = 0x3F80; h = __hadd2(h, one); return h; } +__device__ __forceinline__ __nv_bfloat162 &operator--(__nv_bfloat162 &h) { __nv_bfloat162_raw one; one.x = 0x3F80; one.y = 0x3F80; h = __hsub2(h, one); return h; } +__device__ __forceinline__ __nv_bfloat162 operator++(__nv_bfloat162 &h, const int ignored) +{ + // ignored on purpose. Parameter only needed to distinguish the function declaration from other types of operators. + static_cast(ignored); + + const __nv_bfloat162 ret = h; + __nv_bfloat162_raw one; + one.x = 0x3F80; + one.y = 0x3F80; + h = __hadd2(h, one); + return ret; +} +__device__ __forceinline__ __nv_bfloat162 operator--(__nv_bfloat162 &h, const int ignored) +{ + // ignored on purpose. Parameter only needed to distinguish the function declaration from other types of operators. + static_cast(ignored); + + const __nv_bfloat162 ret = h; + __nv_bfloat162_raw one; + one.x = 0x3F80; + one.y = 0x3F80; + h = __hsub2(h, one); + return ret; +} +__device__ __forceinline__ __nv_bfloat162 operator+(const __nv_bfloat162 &h) { return h; } +__device__ __forceinline__ __nv_bfloat162 operator-(const __nv_bfloat162 &h) { return __hneg2(h); } + +__device__ __forceinline__ bool operator==(const __nv_bfloat162 &lh, const __nv_bfloat162 &rh) { return __hbeq2(lh, rh); } +__device__ __forceinline__ bool operator!=(const __nv_bfloat162 &lh, const __nv_bfloat162 &rh) { return __hbneu2(lh, rh); } +__device__ __forceinline__ bool operator>(const __nv_bfloat162 &lh, const __nv_bfloat162 &rh) { return __hbgt2(lh, rh); } +__device__ __forceinline__ bool operator<(const __nv_bfloat162 &lh, const __nv_bfloat162 &rh) { return __hblt2(lh, rh); } +__device__ __forceinline__ bool operator>=(const __nv_bfloat162 &lh, const __nv_bfloat162 &rh) { return __hbge2(lh, rh); } +__device__ __forceinline__ bool operator<=(const __nv_bfloat162 &lh, const __nv_bfloat162 &rh) { return __hble2(lh, rh); } + +#endif /* ((!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800)) && !defined(__CUDA_NO_BFLOAT162_OPERATORS__)) || defined(_NVHPC_CUDA) */ +#endif /* defined(__CUDACC__) */ + +/* Restore warning for multiple assignment operators */ +#if defined(_MSC_VER) && _MSC_VER >= 1500 +#pragma warning( pop ) +#endif /* defined(_MSC_VER) && _MSC_VER >= 1500 */ + +/* Restore -Weffc++ warnings from here on */ +#if defined(__GNUC__) +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic pop +#endif /* __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) */ +#endif /* defined(__GNUC__) */ + +#undef __CUDA_HOSTDEVICE__ +#undef __CUDA_ALIGN__ + +__CUDA_HOSTDEVICE_BF16_DECL__ unsigned int __internal_float_as_uint(const float f) +{ + unsigned int u; +IF_DEVICE_OR_CUDACC( + u = __float_as_uint(f); +, + memcpy(&u, &f, sizeof(f)); +, + std::memcpy(&u, &f, sizeof(f)); +) + return u; +} + +__CUDA_HOSTDEVICE_BF16_DECL__ float __internal_uint_as_float(unsigned int u) +{ + float f; +IF_DEVICE_OR_CUDACC( + f = __uint_as_float(u); +, + memcpy(&f, &u, sizeof(u)); +, + std::memcpy(&f, &u, sizeof(u)); +) + return f; +} + +__CUDA_HOSTDEVICE_BF16_DECL__ unsigned short __internal_float2bfloat16(const float f, unsigned int &sign, unsigned int &remainder) +{ + unsigned int x; + + x = __internal_float_as_uint(f); + + if ((x & 0x7fffffffU) > 0x7f800000U) { + sign = 0U; + remainder = 0U; + return static_cast(0x7fffU); + } + sign = x >> 31U; + remainder = x << 16U; + return static_cast(x >> 16U); +} + +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __double2bfloat16(const double x) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("{ cvt.rn.bf16.f64 %0, %1;}\n" : "=h"(__BFLOAT16_TO_US(val)) : "d"(x)); + return val; +, + float f = static_cast(x); + const double d = static_cast(f); + unsigned int u = __internal_float_as_uint(f); + + bool x_is_not_nan = ((u << (unsigned)1U) <= (unsigned)0xFF000000U); + + + if ((x > 0.0) && (d > x)) { + u--; + } + if ((x < 0.0) && (d < x)) { + u--; + } + if ((d != x) && x_is_not_nan) { + u |= 1U; + } + + f = __internal_uint_as_float(u); + + return __float2bfloat16(f); +) +} + +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __float2bfloat16(const float a) +{ + __nv_bfloat16 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80, + asm("{ cvt.rn.bf16.f32 %0, %1;}\n" : "=h"(__BFLOAT16_TO_US(val)) : "f"(a)); +, + __nv_bfloat16_raw r; + unsigned int sign = 0U; + unsigned int remainder = 0U; + r.x = __internal_float2bfloat16(a, sign, remainder); + if ((remainder > 0x80000000U) || ((remainder == 0x80000000U) && ((r.x & 0x1U) != 0U))) { + r.x++; + } + val = r; +) + return val; +} +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __float2bfloat16_rn(const float a) +{ + __nv_bfloat16 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80, + asm("{ cvt.rn.bf16.f32 %0, %1;}\n" : "=h"(__BFLOAT16_TO_US(val)) : "f"(a)); +, + __nv_bfloat16_raw r; + unsigned int sign = 0U; + unsigned int remainder = 0U; + r.x = __internal_float2bfloat16(a, sign, remainder); + if ((remainder > 0x80000000U) || ((remainder == 0x80000000U) && ((r.x & 0x1U) != 0U))) { + r.x++; + } + val = r; +) + return val; +} +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __float2bfloat16_rz(const float a) +{ + __nv_bfloat16 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80, + asm("{ cvt.rz.bf16.f32 %0, %1;}\n" : "=h"(__BFLOAT16_TO_US(val)) : "f"(a)); +, + __nv_bfloat16_raw r; + unsigned int sign = 0U; + unsigned int remainder = 0U; + r.x = __internal_float2bfloat16(a, sign, remainder); + val = r; +) + return val; +} +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __float2bfloat16_rd(const float a) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("{ cvt.rm.bf16.f32 %0, %1;}\n" : "=h"(__BFLOAT16_TO_US(val)) : "f"(a)); + return val; +, + __nv_bfloat16 val; + __nv_bfloat16_raw r; + unsigned int sign = 0U; + unsigned int remainder = 0U; + r.x = __internal_float2bfloat16(a, sign, remainder); + if ((remainder != 0U) && (sign != 0U)) { + r.x++; + } + val = r; + return val; +) +} +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __float2bfloat16_ru(const float a) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("{ cvt.rp.bf16.f32 %0, %1;}\n" : "=h"(__BFLOAT16_TO_US(val)) : "f"(a)); + return val; +, + __nv_bfloat16 val; + __nv_bfloat16_raw r; + unsigned int sign = 0U; + unsigned int remainder = 0U; + r.x = __internal_float2bfloat16(a, sign, remainder); + if ((remainder != 0U) && (sign == 0U)) { + r.x++; + } + val = r; + return val; +) +} +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat162 __float2bfloat162_rn(const float a) +{ + __nv_bfloat162 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80, + asm("{.reg .b16 low;\n" + " cvt.rn.bf16.f32 low, %1;\n" + " mov.b32 %0, {low,low};}\n" : "=r"(__BFLOAT162_TO_UI(val)) : "f"(a)); +, + val = __nv_bfloat162(__float2bfloat16_rn(a), __float2bfloat16_rn(a)); +) + return val; +} +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat162 __floats2bfloat162_rn(const float a, const float b) +{ + __nv_bfloat162 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80, + asm("{ cvt.rn.bf16x2.f32 %0, %2, %1;}\n" + : "=r"(__BFLOAT162_TO_UI(val)) : "f"(a), "f"(b)); +, + val = __nv_bfloat162(__float2bfloat16_rn(a), __float2bfloat16_rn(b)); +) + return val; +} + +#if (defined __CUDACC__) +__CUDA_BF16_DECL__ float __internal_device_bfloat162float(const unsigned short h) +{ + float f; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm("{ cvt.f32.bf16 %0, %1;}\n" : "=f"(f) : "h"(h)); +, + asm("{ mov.b32 %0, {0,%1};}\n" : "=f"(f) : "h"(h)); +) + return f; +} +#endif /* (defined __CUDACC__) */ + +__CUDA_HOSTDEVICE_BF16_DECL__ float __internal_bfloat162float(const unsigned short h) +{ + float f; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + f = __internal_device_bfloat162float(h); +, + unsigned int u = static_cast(h) << 16; + f = __internal_uint_as_float(u); +) + return f; +} + +__CUDA_HOSTDEVICE_BF16_DECL__ float __bfloat162float(const __nv_bfloat16 a) +{ + return __internal_bfloat162float(static_cast<__nv_bfloat16_raw>(a).x); +} +__CUDA_HOSTDEVICE_BF16_DECL__ float __low2float(const __nv_bfloat162 a) +{ + return __internal_bfloat162float(static_cast<__nv_bfloat162_raw>(a).x); +} + +__CUDA_HOSTDEVICE_BF16_DECL__ float __high2float(const __nv_bfloat162 a) +{ + return __internal_bfloat162float(static_cast<__nv_bfloat162_raw>(a).y); +} + +#if (defined(__CUDACC__) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800))) || defined(_NVHPC_CUDA) + +/* CUDA vector-types compatible vector creation function (note returns __nv_bfloat162, not nv_bfloat162) */ +__VECTOR_FUNCTIONS_DECL__ __nv_bfloat162 make_bfloat162(const __nv_bfloat16 x, const __nv_bfloat16 y) +{ + __nv_bfloat162 t; t.x = x; t.y = y; return t; +} +#undef __VECTOR_FUNCTIONS_DECL__ + + +/* Definitions of intrinsics */ +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat162 __float22bfloat162_rn(const float2 a) +{ + __nv_bfloat162 val = __floats2bfloat162_rn(a.x, a.y); + return val; +} +__CUDA_HOSTDEVICE_BF16_DECL__ float2 __bfloat1622float2(const __nv_bfloat162 a) +{ + float hi_float; + float lo_float; + lo_float = __internal_bfloat162float(((__nv_bfloat162_raw)a).x); + hi_float = __internal_bfloat162float(((__nv_bfloat162_raw)a).y); + return make_float2(lo_float, hi_float); +} +__CUDA_BF16_DECL__ int __bfloat162int_rn(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + int val; + asm("{ cvt.rni.s32.bf16 %0, %1;}\n" : "=r"(val) : "h"(__BFLOAT16_TO_CUS(h))); + return val; +, + return __float2int_rn(__bfloat162float(h)); +) +} + +__CUDA_HOSTDEVICE_BF16_DECL__ int __internal_bfloat162int_rz(const __nv_bfloat16 h) +{ + const float f = __bfloat162float(h); + int i; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + i = __float2int_rz(f); +, + const int max_val = (int)0x7fffffffU; + const int min_val = (int)0x80000000U; + const unsigned short bits = static_cast(static_cast<__nv_bfloat16_raw>(h).x << 1U); + // saturation fixup + if (bits > (unsigned short)0xFF00U) { + // NaN + i = 0; + } else if (f >= static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + i = static_cast(f); + } +) + return i; +} + +__CUDA_HOSTDEVICE_BF16_DECL__ int __bfloat162int_rz(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + int val; + asm("{ cvt.rzi.s32.bf16 %0, %1;}\n" : "=r"(val) : "h"(__BFLOAT16_TO_CUS(h))); + return val; +, + return __internal_bfloat162int_rz(h); +) +} + +__CUDA_BF16_DECL__ int __bfloat162int_rd(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + int val; + asm("{ cvt.rmi.s32.bf16 %0, %1;}\n" : "=r"(val) : "h"(__BFLOAT16_TO_CUS(h))); + return val; +, + return __float2int_rd(__bfloat162float(h)); +) +} +__CUDA_BF16_DECL__ int __bfloat162int_ru(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + int val; + asm("{ cvt.rpi.s32.bf16 %0, %1;}\n" : "=r"(val) : "h"(__BFLOAT16_TO_CUS(h))); + return val; +, + return __float2int_ru(__bfloat162float(h)); +) +} + +__CUDA_BF16_DECL__ __nv_bfloat16 __internal_device_int2bfloat16_rn(const int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rn.bf16.s32 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "r"(i)); + return val; +, + const float ru = __int2float_ru(i); + const float rd = __int2float_rd(i); + float rz = __int2float_rz(i); + if (ru != rd) { + rz = __uint_as_float(__float_as_uint(rz) | 1U); + } + return __float2bfloat16_rn(rz); +) +} + +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __int2bfloat16_rn(const int i) +{ +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + return __internal_device_int2bfloat16_rn(i); +, + const double d = static_cast(i); + return __double2bfloat16(d); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __int2bfloat16_rz(const int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rz.bf16.s32 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "r"(i)); + return val; +, + return __float2bfloat16_rz(__int2float_rz(i)); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __int2bfloat16_rd(const int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rm.bf16.s32 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "r"(i)); + return val; +, + return __float2bfloat16_rd(__int2float_rd(i)); +) +} + +__CUDA_BF16_DECL__ __nv_bfloat16 __int2bfloat16_ru(const int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rp.bf16.s32 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "r"(i)); + return val; +, + return __float2bfloat16_ru(__int2float_ru(i)); +) +} + +__CUDA_BF16_DECL__ short int __bfloat162short_rn(const __nv_bfloat16 h) +{ + short int val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm("cvt.rni.s16.bf16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +, + asm("{ .reg.f32 f;\n" + " mov.b32 f, {0,%1};\n" + " cvt.rni.s16.f32 %0,f;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +) + return val; +} + +__CUDA_BF16_DECL__ short int __internal_device_bfloat162short_rz(const __nv_bfloat16 h) +{ + short int val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm("cvt.rzi.s16.bf16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +, + asm("{ .reg.f32 f;\n" + " mov.b32 f, {0,%1};\n" + " cvt.rzi.s16.f32 %0,f;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +) + return val; +} + +__CUDA_HOSTDEVICE_BF16_DECL__ short int __bfloat162short_rz(const __nv_bfloat16 h) +{ + short int val; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + val = __internal_device_bfloat162short_rz(h); +, + const float f = __bfloat162float(h); + const short int max_val = (short int)0x7fffU; + const short int min_val = (short int)0x8000U; + const unsigned short bits = static_cast(static_cast<__nv_bfloat16_raw>(h).x << 1U); + // saturation fixup + if (bits > (unsigned short)0xFF00U) { + // NaN + val = 0; + } else if (f > static_cast(max_val)) { + // saturate maximum + val = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + val = min_val; + } else { + val = static_cast(f); + } +) + return val; +} + +__CUDA_BF16_DECL__ short int __bfloat162short_rd(const __nv_bfloat16 h) +{ + short int val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm("cvt.rmi.s16.bf16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +, + asm("{ .reg.f32 f;\n" + " mov.b32 f, {0,%1};\n" + " cvt.rmi.s16.f32 %0,f;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +) + return val; +} +__CUDA_BF16_DECL__ short int __bfloat162short_ru(const __nv_bfloat16 h) +{ + short int val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm("cvt.rpi.s16.bf16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +, + asm("{ .reg.f32 f;\n" + " mov.b32 f, {0,%1};\n" + " cvt.rpi.s16.f32 %0,f;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +) + return val; +} +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __short2bfloat16_rn(const short int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rn.bf16.s16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(i)); + return val; +, + const float f = static_cast(i); + return __float2bfloat16_rn(f); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __short2bfloat16_rz(const short int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rz.bf16.s16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(i)); + return val; +, + return __float2bfloat16_rz(__int2float_rz(static_cast(i))); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __short2bfloat16_rd(const short int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rm.bf16.s16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(i)); + return val; +, + return __float2bfloat16_rd(__int2float_rd(static_cast(i))); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __short2bfloat16_ru(const short int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rp.bf16.s16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(i)); + return val; +, + return __float2bfloat16_ru(__int2float_ru(static_cast(i))); +) +} + +__CUDA_BF16_DECL__ unsigned int __bfloat162uint_rn(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + unsigned int val; + asm("{ cvt.rni.u32.bf16 %0, %1;}\n" : "=r"(val) : "h"(__BFLOAT16_TO_CUS(h))); + return val; +, + return __float2uint_rn(__bfloat162float(h)); +) +} + +__CUDA_HOSTDEVICE_BF16_DECL__ unsigned int __internal_bfloat162uint_rz(const __nv_bfloat16 h) +{ + const float f = __bfloat162float(h); + unsigned int i; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + i = __float2uint_rz(f); +, + const unsigned int max_val = 0xffffffffU; + const unsigned int min_val = 0U; + const unsigned short bits = static_cast(static_cast<__nv_bfloat16_raw>(h).x << 1U); + // saturation fixup + if (bits > (unsigned short)0xFF00U) { + // NaN + i = 0U; + } else if (f >= static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + i = static_cast(f); + } +) + return i; +} + +__CUDA_HOSTDEVICE_BF16_DECL__ unsigned int __bfloat162uint_rz(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + unsigned int val; + asm("{ cvt.rzi.u32.bf16 %0, %1;}\n" : "=r"(val) : "h"(__BFLOAT16_TO_CUS(h))); + return val; +, + return __internal_bfloat162uint_rz(h); +) +} +__CUDA_BF16_DECL__ unsigned int __bfloat162uint_rd(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + unsigned int val; + asm("{ cvt.rmi.u32.bf16 %0, %1;}\n" : "=r"(val) : "h"(__BFLOAT16_TO_CUS(h))); + return val; +, + return __float2uint_rd(__bfloat162float(h)); +) +} +__CUDA_BF16_DECL__ unsigned int __bfloat162uint_ru(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + unsigned int val; + asm("{ cvt.rpi.u32.bf16 %0, %1;}\n" : "=r"(val) : "h"(__BFLOAT16_TO_CUS(h))); + return val; +, + return __float2uint_ru(__bfloat162float(h)); +) +} + +__CUDA_BF16_DECL__ __nv_bfloat16 __internal_device_uint2bfloat16_rn(const unsigned int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rn.bf16.u32 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "r"(i)); + return val; +, + const float ru = __uint2float_ru(i); + const float rd = __uint2float_rd(i); + float rz = __uint2float_rz(i); + if (ru != rd) { + rz = __uint_as_float(__float_as_uint(rz) | 1U); + } + return __float2bfloat16_rn(rz); +) +} + +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __uint2bfloat16_rn(const unsigned int i) +{ +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + return __internal_device_uint2bfloat16_rn(i); +, + const double d = static_cast(i); + return __double2bfloat16(d); +) +} + +__CUDA_BF16_DECL__ __nv_bfloat16 __uint2bfloat16_rz(const unsigned int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rz.bf16.u32 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "r"(i)); + return val; +, + return __float2bfloat16_rz(__uint2float_rz(i)); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __uint2bfloat16_rd(const unsigned int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rm.bf16.u32 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "r"(i)); + return val; +, + return __float2bfloat16_rd(__uint2float_rd(i)); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __uint2bfloat16_ru(const unsigned int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rp.bf16.u32 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "r"(i)); + return val; +, + return __float2bfloat16_ru(__uint2float_ru(i)); +) +} + +__CUDA_BF16_DECL__ unsigned short int __bfloat162ushort_rn(const __nv_bfloat16 h) +{ + unsigned short int val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm("cvt.rni.u16.bf16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +, + asm("{ .reg.f32 f;\n" + " mov.b32 f, {0,%1};\n" + " cvt.rni.u16.f32 %0,f;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +) + return val; +} + +__CUDA_BF16_DECL__ unsigned short int __internal_device_bfloat162ushort_rz(const __nv_bfloat16 h) +{ + unsigned short int val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm("cvt.rzi.u16.bf16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +, + asm("{ .reg.f32 f;\n" + " mov.b32 f, {0,%1};\n" + " cvt.rzi.u16.f32 %0,f;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +) + return val; +} + +__CUDA_HOSTDEVICE_BF16_DECL__ unsigned short int __bfloat162ushort_rz(const __nv_bfloat16 h) +{ + unsigned short int val; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + val = __internal_device_bfloat162ushort_rz(h); +, + const float f = __bfloat162float(h); + const unsigned short int max_val = 0xffffU; + const unsigned short int min_val = 0U; + const unsigned short bits = static_cast(static_cast<__nv_bfloat16_raw>(h).x << 1U); + // saturation fixup + if (bits > (unsigned short)0xFF00U) { + // NaN + val = 0U; + } else if (f > static_cast(max_val)) { + // saturate maximum + val = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + val = min_val; + } else { + val = static_cast(f); + } +) + return val; +} +__CUDA_BF16_DECL__ unsigned short int __bfloat162ushort_rd(const __nv_bfloat16 h) +{ + unsigned short int val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm("cvt.rmi.u16.bf16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +, + asm("{ .reg.f32 f;\n" + " mov.b32 f, {0,%1};\n" + " cvt.rmi.u16.f32 %0,f;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +) + return val; +} +__CUDA_BF16_DECL__ unsigned short int __bfloat162ushort_ru(const __nv_bfloat16 h) +{ + unsigned short int val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm("cvt.rpi.u16.bf16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +, + asm("{ .reg.f32 f;\n" + " mov.b32 f, {0,%1};\n" + " cvt.rpi.u16.f32 %0,f;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +) + return val; +} +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __ushort2bfloat16_rn(const unsigned short int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rn.bf16.u16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(i)); + return val; +, + const float f = static_cast(i); + return __float2bfloat16_rn(f); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ushort2bfloat16_rz(const unsigned short int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rz.bf16.u16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(i)); + return val; +, + return __float2bfloat16_rz(__uint2float_rz(static_cast(i))); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ushort2bfloat16_rd(const unsigned short int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rm.bf16.u16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(i)); + return val; +, + return __float2bfloat16_rd(__uint2float_rd(static_cast(i))); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ushort2bfloat16_ru(const unsigned short int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rp.bf16.u16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(i)); + return val; +, + return __float2bfloat16_ru(__uint2float_ru(static_cast(i))); +) +} + +__CUDA_BF16_DECL__ unsigned long long int __bfloat162ull_rn(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + unsigned long long int i; + asm("cvt.rni.u64.bf16 %0, %1;" : "=l"(i) : "h"(__BFLOAT16_TO_CUS(h))); + return i; +, + return __float2ull_rn(__bfloat162float(h)); +) +} + +__CUDA_BF16_DECL__ unsigned long long int __internal_device_bfloat162ull_rz(const __nv_bfloat16 h) +{ + unsigned long long int i; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm("cvt.rzi.u64.bf16 %0, %1;" : "=l"(i) : "h"(__BFLOAT16_TO_CUS(h))); +, + const float f = __bfloat162float(h); + i = __float2ull_rz(f); +) + return i; +} + +__CUDA_HOSTDEVICE_BF16_DECL__ unsigned long long int __bfloat162ull_rz(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + return __internal_device_bfloat162ull_rz(h); +, + const float f = __bfloat162float(h); + unsigned long long int i; + const unsigned long long int max_val = 0xffffffffffffffffULL; + const unsigned long long int min_val = 0ULL; + const unsigned short bits = static_cast(static_cast<__nv_bfloat16_raw>(h).x << 1U); + // saturation fixup + if (bits > (unsigned short)0xFF00U) { + // NaN + i = 0x8000000000000000ULL; + } else if (f >= static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + i = static_cast(f); + } + return i; +) +} + +__CUDA_BF16_DECL__ unsigned long long int __bfloat162ull_rd(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + unsigned long long int i; + asm("cvt.rmi.u64.bf16 %0, %1;" : "=l"(i) : "h"(__BFLOAT16_TO_CUS(h))); + return i; +, + return __float2ull_rd(__bfloat162float(h)); +) +} +__CUDA_BF16_DECL__ unsigned long long int __bfloat162ull_ru(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + unsigned long long int i; + asm("cvt.rpi.u64.bf16 %0, %1;" : "=l"(i) : "h"(__BFLOAT16_TO_CUS(h))); + return i; +, + return __float2ull_ru(__bfloat162float(h)); +) +} + +__CUDA_BF16_DECL__ __nv_bfloat16 __internal_device_ull2bfloat16_rn(const unsigned long long int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 h; + asm("cvt.rn.bf16.u64 %0, %1;" : "=h"(__BFLOAT16_TO_US(h)) : "l"(i)); + return h; +, + const float ru = __ull2float_ru(i); + const float rd = __ull2float_rd(i); + float rz = __ull2float_rz(i); + if (ru != rd) { + rz = __uint_as_float(__float_as_uint(rz) | 1U); + } + return __float2bfloat16_rn(rz); +) +} + +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __ull2bfloat16_rn(const unsigned long long int i) +{ + +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + return __internal_device_ull2bfloat16_rn(i); +, + float f = static_cast(i); + const unsigned long long int uf = static_cast(f); + unsigned int u = __internal_float_as_uint(f); + // round up happened here + // note: no need to handle round up to f == 0x1.p64 specially + if (uf > i) { + u--; + } + if (uf != i) { + u |= 1U; + } + f = __internal_uint_as_float(u); + return __float2bfloat16_rn(f); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ull2bfloat16_rz(const unsigned long long int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 h; + asm("cvt.rz.bf16.u64 %0, %1;" : "=h"(__BFLOAT16_TO_US(h)) : "l"(i)); + return h; +, + return __float2bfloat16_rz(__ull2float_rz(i)); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ull2bfloat16_rd(const unsigned long long int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 h; + asm("cvt.rm.bf16.u64 %0, %1;" : "=h"(__BFLOAT16_TO_US(h)) : "l"(i)); + return h; +, + return __float2bfloat16_rd(__ull2float_rd(i)); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ull2bfloat16_ru(const unsigned long long int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 h; + asm("cvt.rp.bf16.u64 %0, %1;" : "=h"(__BFLOAT16_TO_US(h)) : "l"(i)); + return h; +, + return __float2bfloat16_ru(__ull2float_ru(i)); +) +} +__CUDA_BF16_DECL__ long long int __bfloat162ll_rn(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + long long int i; + asm("cvt.rni.s64.bf16 %0, %1;" : "=l"(i) : "h"(__BFLOAT16_TO_CUS(h))); + return i; +, + return __float2ll_rn(__bfloat162float(h)); +) +} + +__CUDA_BF16_DECL__ long long int __internal_device_bfloat162ll_rz(const __nv_bfloat16 h) +{ + long long int i; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm("cvt.rzi.s64.bf16 %0, %1;" : "=l"(i) : "h"(__BFLOAT16_TO_CUS(h))); +, + const float f = __bfloat162float(h); + i = __float2ll_rz(f); +) + return i; +} + +__CUDA_HOSTDEVICE_BF16_DECL__ long long int __bfloat162ll_rz(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + return __internal_device_bfloat162ll_rz(h); +, + long long int i; + const float f = __bfloat162float(h); + const long long int max_val = (long long int)0x7fffffffffffffffULL; + const long long int min_val = (long long int)0x8000000000000000ULL; + const unsigned short bits = static_cast(static_cast<__nv_bfloat16_raw>(h).x << 1U); + // saturation fixup + if (bits > (unsigned short)0xFF00U) { + // NaN + i = min_val; + } else if (f >= static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + i = static_cast(f); + } + return i; +) +} +__CUDA_BF16_DECL__ long long int __bfloat162ll_rd(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + long long int i; + asm("cvt.rmi.s64.bf16 %0, %1;" : "=l"(i) : "h"(__BFLOAT16_TO_CUS(h))); + return i; +, + return __float2ll_rd(__bfloat162float(h)); +) +} +__CUDA_BF16_DECL__ long long int __bfloat162ll_ru(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + long long int i; + asm("cvt.rpi.s64.bf16 %0, %1;" : "=l"(i) : "h"(__BFLOAT16_TO_CUS(h))); + return i; +, + return __float2ll_ru(__bfloat162float(h)); +) +} + +__CUDA_BF16_DECL__ __nv_bfloat16 __internal_device_ll2bfloat16_rn(const long long int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 h; + asm("cvt.rn.bf16.s64 %0, %1;" : "=h"(__BFLOAT16_TO_US(h)) : "l"(i)); + return h; +, + const float ru = __ll2float_ru(i); + const float rd = __ll2float_rd(i); + float rz = __ll2float_rz(i); + if (ru != rd) { + rz = __uint_as_float(__float_as_uint(rz) | 1U); + } + return __float2bfloat16_rn(rz); +) +} + +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __ll2bfloat16_rn(const long long int i) +{ +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + return __internal_device_ll2bfloat16_rn(i); +, + float f = static_cast(i); + const long long int lf = static_cast(f); + unsigned int u = __internal_float_as_uint(f); + + if ((f > 0.0f) && (lf > i)) { + u--; + } + if ((f < 0.0f) && (lf < i)) { + u--; + } + if (lf != i) { + u |= 1U; + } + + f = __internal_uint_as_float(u); + return __float2bfloat16_rn(f); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ll2bfloat16_rz(const long long int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 h; + asm("cvt.rz.bf16.s64 %0, %1;" : "=h"(__BFLOAT16_TO_US(h)) : "l"(i)); + return h; +, + return __float2bfloat16_rz(__ll2float_rz(i)); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ll2bfloat16_rd(const long long int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 h; + asm("cvt.rm.bf16.s64 %0, %1;" : "=h"(__BFLOAT16_TO_US(h)) : "l"(i)); + return h; +, + return __float2bfloat16_rd(__ll2float_rd(i)); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ll2bfloat16_ru(const long long int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 h; + asm("cvt.rp.bf16.s64 %0, %1;" : "=h"(__BFLOAT16_TO_US(h)) : "l"(i)); + return h; +, + return __float2bfloat16_ru(__ll2float_ru(i)); +) +} + +__CUDA_BF16_DECL__ __nv_bfloat16 htrunc(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 r; + asm("cvt.rzi.bf16.bf16 %0, %1;" : "=h"(__BFLOAT16_TO_US(r)) : "h"(__BFLOAT16_TO_CUS(h))); + return r; +, + return __float2bfloat16_rz(truncf(__bfloat162float(h))); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 hceil(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 r; + asm("cvt.rpi.bf16.bf16 %0, %1;" : "=h"(__BFLOAT16_TO_US(r)) : "h"(__BFLOAT16_TO_CUS(h))); + return r; +, + return __float2bfloat16_ru(ceilf(__bfloat162float(h))); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 hfloor(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 r; + asm("cvt.rmi.bf16.bf16 %0, %1;" : "=h"(__BFLOAT16_TO_US(r)) : "h"(__BFLOAT16_TO_CUS(h))); + return r; +, + return __float2bfloat16_rd(floorf(__bfloat162float(h))); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 hrint(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 r; + asm("cvt.rni.bf16.bf16 %0, %1;" : "=h"(__BFLOAT16_TO_US(r)) : "h"(__BFLOAT16_TO_CUS(h))); + return r; +, + return __float2bfloat16_rn(rintf(__bfloat162float(h))); +) +} + +__CUDA_BF16_DECL__ __nv_bfloat162 h2trunc(const __nv_bfloat162 h) +{ + const __nv_bfloat16 low = htrunc(h.x); + const __nv_bfloat16 high = htrunc(h.y); + return __nv_bfloat162(low, high); +} +__CUDA_BF16_DECL__ __nv_bfloat162 h2ceil(const __nv_bfloat162 h) +{ + const __nv_bfloat16 low = hceil(h.x); + const __nv_bfloat16 high = hceil(h.y); + return __nv_bfloat162(low, high); +} +__CUDA_BF16_DECL__ __nv_bfloat162 h2floor(const __nv_bfloat162 h) +{ + const __nv_bfloat16 low = hfloor(h.x); + const __nv_bfloat16 high = hfloor(h.y); + return __nv_bfloat162(low, high); +} + +__CUDA_BF16_DECL__ __nv_bfloat162 h2rint(const __nv_bfloat162 h) +{ + return __halves2bfloat162(hrint(__low2bfloat16(h)), hrint(__high2bfloat16(h))); +} +__CUDA_BF16_DECL__ __nv_bfloat162 __lows2bfloat162(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; + asm("{.reg .b16 alow,ahigh,blow,bhigh;\n" + " mov.b32 {alow,ahigh}, %1;\n" + " mov.b32 {blow,bhigh}, %2;\n" + " mov.b32 %0, {alow,blow};}\n" : "=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)), "r"(__BFLOAT162_TO_CUI(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __highs2bfloat162(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; + asm("{.reg .b16 alow,ahigh,blow,bhigh;\n" + " mov.b32 {alow,ahigh}, %1;\n" + " mov.b32 {blow,bhigh}, %2;\n" + " mov.b32 %0, {ahigh,bhigh};}\n" : "=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)), "r"(__BFLOAT162_TO_CUI(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __low2bfloat16(const __nv_bfloat162 a) +{ + __nv_bfloat16 ret; + asm("{.reg .b16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " mov.b16 %0, low;}" : "=h"(__BFLOAT16_TO_US(ret)) : "r"(__BFLOAT162_TO_CUI(a))); + return ret; +} +__CUDA_BF16_DECL__ int __hisinf(const __nv_bfloat16 a) +{ + int retval; + if (__BFLOAT16_TO_CUS(a) == 0xFF80U) { + retval = -1; + } else if (__BFLOAT16_TO_CUS(a) == 0x7F80U) { + retval = 1; + } else { + retval = 0; + } + return retval; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __low2bfloat162(const __nv_bfloat162 a) +{ + __nv_bfloat162 val; + asm("{.reg .b16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " mov.b32 %0, {low,low};}\n" : "=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __high2bfloat162(const __nv_bfloat162 a) +{ + __nv_bfloat162 val; + asm("{.reg .b16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " mov.b32 %0, {high,high};}\n" : "=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __high2bfloat16(const __nv_bfloat162 a) +{ + __nv_bfloat16 ret; + asm("{.reg .b16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " mov.b16 %0, high;}" : "=h"(__BFLOAT16_TO_US(ret)) : "r"(__BFLOAT162_TO_CUI(a))); + return ret; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __halves2bfloat162(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat162 val; + asm("{ mov.b32 %0, {%1,%2};}\n" + : "=r"(__BFLOAT162_TO_UI(val)) : "h"(__BFLOAT16_TO_CUS(a)), "h"(__BFLOAT16_TO_CUS(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __bfloat162bfloat162(const __nv_bfloat16 a) +{ + __nv_bfloat162 val; + asm("{ mov.b32 %0, {%1,%1};}\n" + : "=r"(__BFLOAT162_TO_UI(val)) : "h"(__BFLOAT16_TO_CUS(a))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __lowhigh2highlow(const __nv_bfloat162 a) +{ + __nv_bfloat162 val; + asm("{.reg .b16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " mov.b32 %0, {high,low};}\n" : "=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a))); + return val; +} +__CUDA_BF16_DECL__ short int __bfloat16_as_short(const __nv_bfloat16 h) +{ + return static_cast(__BFLOAT16_TO_CUS(h)); +} +__CUDA_BF16_DECL__ unsigned short int __bfloat16_as_ushort(const __nv_bfloat16 h) +{ + return __BFLOAT16_TO_CUS(h); +} +__CUDA_BF16_DECL__ __nv_bfloat16 __short_as_bfloat16(const short int i) +{ + __nv_bfloat16 h; + __BFLOAT16_TO_US(h) = static_cast(i); + return h; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ushort_as_bfloat16(const unsigned short int i) +{ + __nv_bfloat16 h; + __BFLOAT16_TO_US(h) = i; + return h; +} + +/****************************************************************************** +* __nv_bfloat16, __nv_bfloat162 warp shuffle * +******************************************************************************/ +#define __SHUFFLE_SYNC_BFLOAT162_MACRO(name) /* do */ {\ + __nv_bfloat162 r; \ + asm volatile ("{" __CUDA_BF16_STRINGIFY(name) " %0,%1,%2,%3,%4;\n}" \ + :"=r"(__BFLOAT162_TO_UI(r)): "r"(__BFLOAT162_TO_CUI(var)), "r"(delta), "r"(c), "r"(mask)); \ + return r; \ +} /* while(0) */ + +__CUDA_BF16_DECL__ __nv_bfloat162 __shfl_sync(const unsigned mask, const __nv_bfloat162 var, const int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = ((warp_size - static_cast(width)) << 8U) | 0x1fU; + __SHUFFLE_SYNC_BFLOAT162_MACRO(shfl.sync.idx.b32) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __shfl_up_sync(const unsigned mask, const __nv_bfloat162 var, const unsigned int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = (warp_size - static_cast(width)) << 8U; + __SHUFFLE_SYNC_BFLOAT162_MACRO(shfl.sync.up.b32) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __shfl_down_sync(const unsigned mask, const __nv_bfloat162 var, const unsigned int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = ((warp_size - static_cast(width)) << 8U) | 0x1fU; + __SHUFFLE_SYNC_BFLOAT162_MACRO(shfl.sync.down.b32) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __shfl_xor_sync(const unsigned mask, const __nv_bfloat162 var, const int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = ((warp_size - static_cast(width)) << 8U) | 0x1fU; + __SHUFFLE_SYNC_BFLOAT162_MACRO(shfl.sync.bfly.b32) +} + +#undef __SHUFFLE_SYNC_BFLOAT162_MACRO + +__CUDA_BF16_DECL__ __nv_bfloat16 __shfl_sync(const unsigned mask, const __nv_bfloat16 var, const int delta, const int width) +{ + const __nv_bfloat162 temp1 = __halves2bfloat162(var, var); + const __nv_bfloat162 temp2 = __shfl_sync(mask, temp1, delta, width); + return __low2bfloat16(temp2); +} +__CUDA_BF16_DECL__ __nv_bfloat16 __shfl_up_sync(const unsigned mask, const __nv_bfloat16 var, const unsigned int delta, const int width) +{ + const __nv_bfloat162 temp1 = __halves2bfloat162(var, var); + const __nv_bfloat162 temp2 = __shfl_up_sync(mask, temp1, delta, width); + return __low2bfloat16(temp2); +} +__CUDA_BF16_DECL__ __nv_bfloat16 __shfl_down_sync(const unsigned mask, const __nv_bfloat16 var, const unsigned int delta, const int width) +{ + const __nv_bfloat162 temp1 = __halves2bfloat162(var, var); + const __nv_bfloat162 temp2 = __shfl_down_sync(mask, temp1, delta, width); + return __low2bfloat16(temp2); +} +__CUDA_BF16_DECL__ __nv_bfloat16 __shfl_xor_sync(const unsigned mask, const __nv_bfloat16 var, const int delta, const int width) +{ + const __nv_bfloat162 temp1 = __halves2bfloat162(var, var); + const __nv_bfloat162 temp2 = __shfl_xor_sync(mask, temp1, delta, width); + return __low2bfloat16(temp2); +} + +/****************************************************************************** +* __nv_bfloat16 and __nv_bfloat162 __ldg,__ldcg,__ldca,__ldcs * +******************************************************************************/ + +#if defined(__cplusplus) +#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__) +#define __LDG_PTR "l" +#else +#define __LDG_PTR "r" +#endif /*(defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __ldg(const __nv_bfloat162 *const ptr) +{ + __nv_bfloat162 ret; + asm ("ld.global.nc.b32 %0, [%1];" : "=r"(__BFLOAT162_TO_UI(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ldg(const __nv_bfloat16 *const ptr) +{ + __nv_bfloat16 ret; + asm ("ld.global.nc.b16 %0, [%1];" : "=h"(__BFLOAT16_TO_US(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __ldcg(const __nv_bfloat162 *const ptr) +{ + __nv_bfloat162 ret; + asm ("ld.global.cg.b32 %0, [%1];" : "=r"(__BFLOAT162_TO_UI(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ldcg(const __nv_bfloat16 *const ptr) +{ + __nv_bfloat16 ret; + asm ("ld.global.cg.b16 %0, [%1];" : "=h"(__BFLOAT16_TO_US(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __ldca(const __nv_bfloat162 *const ptr) +{ + __nv_bfloat162 ret; + asm ("ld.global.ca.b32 %0, [%1];" : "=r"(__BFLOAT162_TO_UI(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ldca(const __nv_bfloat16 *const ptr) +{ + __nv_bfloat16 ret; + asm ("ld.global.ca.b16 %0, [%1];" : "=h"(__BFLOAT16_TO_US(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __ldcs(const __nv_bfloat162 *const ptr) +{ + __nv_bfloat162 ret; + asm ("ld.global.cs.b32 %0, [%1];" : "=r"(__BFLOAT162_TO_UI(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ldcs(const __nv_bfloat16 *const ptr) +{ + __nv_bfloat16 ret; + asm ("ld.global.cs.b16 %0, [%1];" : "=h"(__BFLOAT16_TO_US(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __ldlu(const __nv_bfloat162 *const ptr) +{ + __nv_bfloat162 ret; + asm ("ld.global.lu.b32 %0, [%1];" : "=r"(__BFLOAT162_TO_UI(ret)) : __LDG_PTR(ptr) : "memory"); + return ret; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ldlu(const __nv_bfloat16 *const ptr) +{ + __nv_bfloat16 ret; + asm ("ld.global.lu.b16 %0, [%1];" : "=h"(__BFLOAT16_TO_US(ret)) : __LDG_PTR(ptr) : "memory"); + return ret; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __ldcv(const __nv_bfloat162 *const ptr) +{ + __nv_bfloat162 ret; + asm ("ld.global.cv.b32 %0, [%1];" : "=r"(__BFLOAT162_TO_UI(ret)) : __LDG_PTR(ptr) : "memory"); + return ret; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ldcv(const __nv_bfloat16 *const ptr) +{ + __nv_bfloat16 ret; + asm ("ld.global.cv.b16 %0, [%1];" : "=h"(__BFLOAT16_TO_US(ret)) : __LDG_PTR(ptr) : "memory"); + return ret; +} + +__CUDA_BF16_DECL__ void __stwb(__nv_bfloat162 *const ptr, const __nv_bfloat162 value) +{ + asm ("st.global.wb.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__BFLOAT162_TO_CUI(value)) : "memory"); +} +__CUDA_BF16_DECL__ void __stwb(__nv_bfloat16 *const ptr, const __nv_bfloat16 value) +{ + asm ("st.global.wb.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__BFLOAT16_TO_CUS(value)) : "memory"); +} +__CUDA_BF16_DECL__ void __stcg(__nv_bfloat162 *const ptr, const __nv_bfloat162 value) +{ + asm ("st.global.cg.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__BFLOAT162_TO_CUI(value)) : "memory"); +} +__CUDA_BF16_DECL__ void __stcg(__nv_bfloat16 *const ptr, const __nv_bfloat16 value) +{ + asm ("st.global.cg.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__BFLOAT16_TO_CUS(value)) : "memory"); +} +__CUDA_BF16_DECL__ void __stcs(__nv_bfloat162 *const ptr, const __nv_bfloat162 value) +{ + asm ("st.global.cs.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__BFLOAT162_TO_CUI(value)) : "memory"); +} +__CUDA_BF16_DECL__ void __stcs(__nv_bfloat16 *const ptr, const __nv_bfloat16 value) +{ + asm ("st.global.cs.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__BFLOAT16_TO_CUS(value)) : "memory"); +} +__CUDA_BF16_DECL__ void __stwt(__nv_bfloat162 *const ptr, const __nv_bfloat162 value) +{ + asm ("st.global.wt.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__BFLOAT162_TO_CUI(value)) : "memory"); +} +__CUDA_BF16_DECL__ void __stwt(__nv_bfloat16 *const ptr, const __nv_bfloat16 value) +{ + asm ("st.global.wt.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__BFLOAT16_TO_CUS(value)) : "memory"); +} + +#undef __LDG_PTR +#endif /*defined(__cplusplus) */ +/****************************************************************************** +* __nv_bfloat162 comparison * +******************************************************************************/ +#define __COMPARISON_OP_BFLOAT162_MACRO(name) {\ + __nv_bfloat162 val; \ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90,\ + asm( "{ " __CUDA_BF16_STRINGIFY(name) ".bf16x2.bf16x2 %0,%1,%2;\n}" \ + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); \ +,\ + asm( "{.reg .b32 low_a,low_b,high_a,high_b,high_res,low_res;\n"\ + " and.b32 high_a, %1, 0xffff0000U;\n"\ + " and.b32 high_b, %2, 0xffff0000U;\n"\ + " shl.b32 low_a, %1, 16;\n"\ + " shl.b32 low_b, %2, 16;\n"\ + " " __CUDA_BF16_STRINGIFY(name) ".f32.f32 low_res, low_a, low_b;\n"\ + " " __CUDA_BF16_STRINGIFY(name) ".f32.f32 high_res, high_a, high_b;\n"\ + " shr.u32 low_res, low_res, 16;\n"\ + " or.b32 %0, high_res, low_res;}\n"\ + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); \ +)\ + return val; \ +} + +__CUDA_BF16_DECL__ __nv_bfloat162 __heq2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO(set.eq) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hne2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO(set.ne) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hle2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO(set.le) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hge2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO(set.ge) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hlt2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO(set.lt) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hgt2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO(set.gt) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hequ2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO(set.equ) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hneu2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO(set.neu) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hleu2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO(set.leu) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hgeu2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO(set.geu) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hltu2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO(set.ltu) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hgtu2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO(set.gtu) +} +#undef __COMPARISON_OP_BFLOAT162_MACRO +/****************************************************************************** +* __nv_bfloat162 comparison with mask output * +******************************************************************************/ +#define __COMPARISON_OP_BFLOAT162_MACRO_MASK(name) {\ + unsigned val; \ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90,\ + asm( "{ " __CUDA_BF16_STRINGIFY(name) ".u32.bf16x2 %0,%1,%2;\n}" \ + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); \ +,\ + asm( "{.reg .b32 low_a,low_b,high_a,high_b;\n"\ + " .reg .u32 high_res,low_res;\n"\ + " and.b32 high_a, %1, 0xffff0000U;\n"\ + " and.b32 high_b, %2, 0xffff0000U;\n"\ + " shl.b32 low_a, %1, 16;\n"\ + " shl.b32 low_b, %2, 16;\n"\ + " " __CUDA_BF16_STRINGIFY(name) ".u32.f32 low_res, low_a, low_b;\n"\ + " " __CUDA_BF16_STRINGIFY(name) ".u32.f32 high_res, high_a, high_b;\n"\ + " shr.u32 low_res, low_res, 16;\n"\ + " shl.b32 high_res, high_res, 16;\n"\ + " or.b32 %0, high_res, low_res;}\n"\ + :"=r"(val) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); \ +)\ + return val; \ +} + +__CUDA_BF16_DECL__ unsigned __heq2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO_MASK(set.eq) +} +__CUDA_BF16_DECL__ unsigned __hne2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO_MASK(set.ne) +} +__CUDA_BF16_DECL__ unsigned __hle2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO_MASK(set.le) +} +__CUDA_BF16_DECL__ unsigned __hge2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO_MASK(set.ge) +} +__CUDA_BF16_DECL__ unsigned __hlt2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO_MASK(set.lt) +} +__CUDA_BF16_DECL__ unsigned __hgt2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO_MASK(set.gt) +} +__CUDA_BF16_DECL__ unsigned __hequ2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO_MASK(set.equ) +} +__CUDA_BF16_DECL__ unsigned __hneu2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO_MASK(set.neu) +} +__CUDA_BF16_DECL__ unsigned __hleu2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO_MASK(set.leu) +} +__CUDA_BF16_DECL__ unsigned __hgeu2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO_MASK(set.geu) +} +__CUDA_BF16_DECL__ unsigned __hltu2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO_MASK(set.ltu) +} +__CUDA_BF16_DECL__ unsigned __hgtu2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO_MASK(set.gtu) +} +#undef __COMPARISON_OP_BFLOAT162_MACRO_MASK + +#define __BOOL_COMPARISON_OP_BFLOAT162_MACRO(name) {\ + unsigned int val; \ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90,\ + bool retval; \ + asm( "{ " __CUDA_BF16_STRINGIFY(name) ".bf16x2.bf16x2 %0,%1,%2;\n}" \ + :"=r"(val) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); \ + if (val == 0x3F803F80U) {\ + retval = true; \ + } else { \ + retval = false; \ + }\ + return retval;\ +,\ + asm( "{.reg .b32 low_a,low_b,high_a,high_b,high_res,low_res;\n"\ + " and.b32 high_a, %1, 0xffff0000U;\n"\ + " and.b32 high_b, %2, 0xffff0000U;\n"\ + " shl.b32 low_a, %1, 16;\n"\ + " shl.b32 low_b, %2, 16;\n"\ + " " __CUDA_BF16_STRINGIFY(name) ".f32.f32 low_res, low_a, low_b;\n"\ + " " __CUDA_BF16_STRINGIFY(name) ".f32.f32 high_res, high_a, high_b;\n"\ + " and.b32 %0, high_res, low_res;}\n"\ + :"=r"(val) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); \ + return (val != 0U) ? true : false; \ +)\ +} + +__CUDA_BF16_DECL__ bool __hbeq2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __BOOL_COMPARISON_OP_BFLOAT162_MACRO(set.eq) +} +__CUDA_BF16_DECL__ bool __hbne2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __BOOL_COMPARISON_OP_BFLOAT162_MACRO(set.ne) +} +__CUDA_BF16_DECL__ bool __hble2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __BOOL_COMPARISON_OP_BFLOAT162_MACRO(set.le) +} +__CUDA_BF16_DECL__ bool __hbge2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __BOOL_COMPARISON_OP_BFLOAT162_MACRO(set.ge) +} +__CUDA_BF16_DECL__ bool __hblt2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __BOOL_COMPARISON_OP_BFLOAT162_MACRO(set.lt) +} +__CUDA_BF16_DECL__ bool __hbgt2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __BOOL_COMPARISON_OP_BFLOAT162_MACRO(set.gt) +} +__CUDA_BF16_DECL__ bool __hbequ2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __BOOL_COMPARISON_OP_BFLOAT162_MACRO(set.equ) +} +__CUDA_BF16_DECL__ bool __hbneu2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __BOOL_COMPARISON_OP_BFLOAT162_MACRO(set.neu) +} +__CUDA_BF16_DECL__ bool __hbleu2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __BOOL_COMPARISON_OP_BFLOAT162_MACRO(set.leu) +} +__CUDA_BF16_DECL__ bool __hbgeu2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __BOOL_COMPARISON_OP_BFLOAT162_MACRO(set.geu) +} +__CUDA_BF16_DECL__ bool __hbltu2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __BOOL_COMPARISON_OP_BFLOAT162_MACRO(set.ltu) +} +__CUDA_BF16_DECL__ bool __hbgtu2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __BOOL_COMPARISON_OP_BFLOAT162_MACRO(set.gtu) +} +#undef __BOOL_COMPARISON_OP_BFLOAT162_MACRO +/****************************************************************************** +* __nv_bfloat16 comparison * +******************************************************************************/ +#define __COMPARISON_OP_BFLOAT16_MACRO(name) {\ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90,\ + unsigned short val; \ + asm( "{ .reg .pred __$temp3;\n" \ + " setp." __CUDA_BF16_STRINGIFY(name) ".bf16 __$temp3, %1, %2;\n" \ + " selp.u16 %0, 1, 0, __$temp3;}" \ + : "=h"(val) : "h"(__BFLOAT16_TO_CUS(a)), "h"(__BFLOAT16_TO_CUS(b))); \ + return (val != 0U) ? true : false; \ +,\ + unsigned int val; \ + asm( "{.reg .b32 a,b;\n"\ + " mov.b32 a, {0, %1};\n"\ + " mov.b32 b, {0, %2};\n"\ + " set." __CUDA_BF16_STRINGIFY(name) ".f32.f32 %0, a, b;}\n"\ + :"=r"(val) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); \ + return (val != 0U) ? true : false; \ +)\ +} +__CUDA_BF16_DECL__ bool __heq(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __COMPARISON_OP_BFLOAT16_MACRO(eq) +} +__CUDA_BF16_DECL__ bool __hne(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __COMPARISON_OP_BFLOAT16_MACRO(ne) +} +__CUDA_BF16_DECL__ bool __hle(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __COMPARISON_OP_BFLOAT16_MACRO(le) +} +__CUDA_BF16_DECL__ bool __hge(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __COMPARISON_OP_BFLOAT16_MACRO(ge) +} +__CUDA_BF16_DECL__ bool __hlt(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __COMPARISON_OP_BFLOAT16_MACRO(lt) +} +__CUDA_BF16_DECL__ bool __hgt(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __COMPARISON_OP_BFLOAT16_MACRO(gt) +} +__CUDA_BF16_DECL__ bool __hequ(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __COMPARISON_OP_BFLOAT16_MACRO(equ) +} +__CUDA_BF16_DECL__ bool __hneu(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __COMPARISON_OP_BFLOAT16_MACRO(neu) +} +__CUDA_BF16_DECL__ bool __hleu(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __COMPARISON_OP_BFLOAT16_MACRO(leu) +} +__CUDA_BF16_DECL__ bool __hgeu(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __COMPARISON_OP_BFLOAT16_MACRO(geu) +} +__CUDA_BF16_DECL__ bool __hltu(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __COMPARISON_OP_BFLOAT16_MACRO(ltu) +} +__CUDA_BF16_DECL__ bool __hgtu(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __COMPARISON_OP_BFLOAT16_MACRO(gtu) +} +#undef __COMPARISON_OP_BFLOAT16_MACRO +/****************************************************************************** +* __nv_bfloat162 arithmetic * +******************************************************************************/ +#define __BINARY_OP_BFLOAT162_MACRO(name) /* do */ {\ + __nv_bfloat162 val; \ + asm( "{.reg .b32 low_a,low_b,high_a,high_b,high_res,low_res;\n"\ + " .reg .b16 low,high;\n"\ + " and.b32 high_a, %1, 0xffff0000U;\n"\ + " and.b32 high_b, %2, 0xffff0000U;\n"\ + " shl.b32 low_a, %1, 16;\n"\ + " shl.b32 low_b, %2, 16;\n"\ + " " __CUDA_BF16_STRINGIFY(name) ".f32 low_res, low_a, low_b;\n"\ + " " __CUDA_BF16_STRINGIFY(name) ".f32 high_res, high_a, high_b;\n"\ + " cvt.rn.bf16.f32 low, low_res;\n"\ + " cvt.rn.bf16.f32 high, high_res;\n"\ + " mov.b32 %0, {low,high};}\n"\ + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); \ + return val; \ +} /* while(0) */ + +__CUDA_BF16_DECL__ __nv_bfloat162 __hadd2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm( "{ add.bf16x2 %0,%1,%2; }\n" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); +, + asm( "{.reg .b32 c;\n" + " mov.b32 c, 0x3f803f80U;\n" + " fma.rn.bf16x2 %0,%1,c,%2;}\n" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); +) + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hsub2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm( "{ sub.bf16x2 %0,%1,%2; }\n" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); +, + asm( "{.reg .b32 c;\n" + " mov.b32 c, 0xbf80bf80U;\n" + " fma.rn.bf16x2 %0,%2,c,%1;}\n" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); +) + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hmul2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm( "{ mul.bf16x2 %0,%1,%2; }\n" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); +, + asm( "{.reg .b32 c;\n" + " mov.b32 c, 0x80008000U;\n" + " fma.rn.bf16x2 %0,%1,%2,c;}\n" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); +) + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hadd2_rn(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm( "{ add.rn.bf16x2 %0,%1,%2; }\n" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); +, + asm( "{.reg .b32 c;\n" + " mov.b32 c, 0x3f803f80U;\n" + " fma.rn.bf16x2 %0,%1,c,%2;}\n" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); +) + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hsub2_rn(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm( "{ sub.rn.bf16x2 %0,%1,%2; }\n" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); +, + asm( "{.reg .b32 c;\n" + " mov.b32 c, 0xbf80bf80U;\n" + " fma.rn.bf16x2 %0,%2,c,%1;}\n" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); +) + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hmul2_rn(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm( "{ mul.rn.bf16x2 %0,%1,%2; }\n" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); +, + asm( "{.reg .b32 c;\n" + " mov.b32 c, 0x80008000U;\n" + " fma.rn.bf16x2 %0,%1,%2,c;}\n" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); +) + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hadd2_sat(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; + asm( "{.reg .b32 f, one, zero;\n" + " mov.b32 one, 0x3f803f80U;\n" + " mov.b32 zero, 0;\n" + " fma.rn.bf16x2 f,%1,one,%2;\n" + " max.bf16x2 f, f, zero;\n" + " min.bf16x2 %0, f, one;\n}" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hsub2_sat(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; + asm( "{.reg .b32 f, one, zero, mone;\n" + " mov.b32 one, 0x3f803f80U;\n" + " mov.b32 zero, 0;\n" + " mov.b32 mone, 0xbf80bf80U;\n" + " fma.rn.bf16x2 f,%2,mone,%1;\n" + " max.bf16x2 f, f, zero;\n" + " min.bf16x2 %0, f, one;\n}" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hmul2_sat(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; + asm( "{.reg .b32 f, one, zero, mzero;\n" + " mov.b32 one, 0x3f803f80U;\n" + " mov.b32 zero, 0;\n" + " mov.b32 mzero, 0x80008000U;\n" + " fma.rn.bf16x2 f,%1,%2,mzero;\n" + " max.bf16x2 f, f, zero;\n" + " min.bf16x2 %0, f, one;\n}" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hfma2(const __nv_bfloat162 a, const __nv_bfloat162 b, const __nv_bfloat162 c) +{ + __nv_bfloat162 val; + asm( "{fma.rn.bf16x2 %0,%1,%2,%3;\n}" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b)),"r"(__BFLOAT162_TO_CUI(c))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hfma2_sat(const __nv_bfloat162 a, const __nv_bfloat162 b, const __nv_bfloat162 c) +{ + __nv_bfloat162 val; + asm( "{ .reg .b32 f, one, zero;\n" + " mov.b32 one, 0x3f803f80U;\n" + " mov.b32 zero, 0;\n" + " fma.rn.bf16x2 f, %1, %2, %3;\n" + " max.bf16x2 f, f, zero;\n" + " min.bf16x2 %0, f, one;\n}" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b)),"r"(__BFLOAT162_TO_CUI(c))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __h2div(const __nv_bfloat162 a, const __nv_bfloat162 b) { + __nv_bfloat16 ha, hb; + + ha = __low2bfloat16(a); + hb = __low2bfloat16(b); + + const __nv_bfloat16 v1 = __hdiv(ha, hb); + + ha = __high2bfloat16(a); + hb = __high2bfloat16(b); + + const __nv_bfloat16 v2 = __hdiv(ha, hb); + + return __halves2bfloat162(v1, v2); +} +/****************************************************************************** +* __nv_bfloat16 arithmetic * +******************************************************************************/ +#define __BINARY_OP_BFLOAT16_MACRO(name) /* do */ {\ + __nv_bfloat16 val; \ + asm( "{.reg .b32 a,b,res;\n"\ + " mov.b32 a, {0,%1};\n"\ + " mov.b32 b, {0,%2};\n"\ + " " __CUDA_BF16_STRINGIFY(name) ".f32 res, a, b;\n"\ + " cvt.rn.bf16.f32 %0, res;}\n"\ + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); \ + return val; \ +} /* while(0) */ + +__CUDA_BF16_DECL__ __nv_bfloat16 __hadd(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm( "{ add.bf16 %0,%1,%2; }\n" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); +, + asm( "{.reg .b16 c;\n" + " mov.b16 c, 0x3f80U;\n" + " fma.rn.bf16 %0,%1,c,%2;}\n" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); +) + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hsub(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm( "{ sub.bf16 %0,%1,%2; }\n" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); +, + asm( "{.reg .b16 c;\n" + " mov.b16 c, 0xbf80U;\n" + " fma.rn.bf16 %0,%2,c,%1;}\n" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); +) + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hmul(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm( "{ mul.bf16 %0,%1,%2; }\n" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); +, + asm( "{.reg .b16 c;\n" + " mov.b16 c, 0x8000U;\n" + " fma.rn.bf16 %0,%1,%2,c;}\n" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); +) + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hadd_rn(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm( "{ add.rn.bf16 %0,%1,%2; }\n" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); +, + asm( "{.reg .b16 c;\n" + " mov.b16 c, 0x3f80U;\n" + " fma.rn.bf16 %0,%1,c,%2;}\n" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); +) + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hsub_rn(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm( "{ sub.rn.bf16 %0,%1,%2; }\n" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); +, + asm( "{.reg .b16 c;\n" + " mov.b16 c, 0xbf80U;\n" + " fma.rn.bf16 %0,%2,c,%1;}\n" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); +) + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hmul_rn(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm( "{ mul.rn.bf16 %0,%1,%2; }\n" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); +, + asm( "{.reg .b16 c;\n" + " mov.b16 c, 0x8000U;\n" + " fma.rn.bf16 %0,%1,%2,c;}\n" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); +) + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hadd_sat(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; + asm( "{ .reg .b16 f, one, zero;\n" + " mov.b16 one, 0x3f80U;\n" + " mov.b16 zero, 0;\n" + " fma.rn.bf16 f, %1, one, %2;\n" + " max.bf16 f, f, zero;\n" + " min.bf16 %0, f, one;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hsub_sat(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; + asm( "{ .reg .b16 f, one, zero, mone;\n" + " mov.b16 one, 0x3f80U;\n" + " mov.b16 zero, 0;\n" + " mov.b16 mone, 0xbf80U;\n" + " fma.rn.bf16 f, %2, mone, %1;\n" + " max.bf16 f, f, zero;\n" + " min.bf16 %0, f, one;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hmul_sat(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; + asm( "{ .reg .b16 f, one, zero, mzero;\n" + " mov.b16 one, 0x3f80U;\n" + " mov.b16 zero, 0;\n" + " mov.b16 mzero, 0x8000U;\n" + " fma.rn.bf16 f, %1, %2, mzero;\n" + " max.bf16 f, f, zero;\n" + " min.bf16 %0, f, one;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hfma(const __nv_bfloat16 a, const __nv_bfloat16 b, const __nv_bfloat16 c) +{ + __nv_bfloat16 val; + asm( "{fma.rn.bf16 %0,%1,%2,%3;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b)),"h"(__BFLOAT16_TO_CUS(c))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hfma_sat(const __nv_bfloat16 a, const __nv_bfloat16 b, const __nv_bfloat16 c) +{ + __nv_bfloat16 val; + asm( "{ .reg .b16 f, one, zero;\n" + " mov.b16 one, 0x3f80U;\n" + " mov.b16 zero, 0;\n" + " fma.rn.bf16 f, %1, %2, %3;\n" + " max.bf16 f, f, zero;\n" + " min.bf16 %0, f, one;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b)),"h"(__BFLOAT16_TO_CUS(c))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hdiv(const __nv_bfloat16 a, const __nv_bfloat16 b) { + __BINARY_OP_BFLOAT16_MACRO(div.rn) +} + +/****************************************************************************** +* __nv_bfloat162 functions * +******************************************************************************/ +#define __APPROX_FCAST(fun) /* do */ {\ + __nv_bfloat16 val;\ + asm("{.reg.b32 f; \n"\ + " .reg.b16 r; \n"\ + " mov.b16 r,%1; \n"\ + " mov.b32 f,{0,r}; \n"\ + " " __CUDA_BF16_STRINGIFY(fun) ".approx.f32 f,f; \n"\ + " cvt.rn.bf16.f32 r,f; \n"\ + " mov.b16 %0,r; \n"\ + "}": "=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)));\ + return val;\ +} /* while(0) */ +#define __APPROX_FCAST2(fun) /* do */ {\ + __nv_bfloat162 val;\ + asm("{.reg.b16 hl, hu; \n"\ + " .reg.b32 fl, fu; \n"\ + " mov.b32 {hl, hu}, %1; \n"\ + " mov.b32 fl, {0,hl}; \n"\ + " mov.b32 fu, {0,hu}; \n"\ + " " __CUDA_BF16_STRINGIFY(fun) ".approx.f32 fl, fl; \n"\ + " " __CUDA_BF16_STRINGIFY(fun) ".approx.f32 fu, fu; \n"\ + " cvt.rn.bf16.f32 hl, fl; \n"\ + " cvt.rn.bf16.f32 hu, fu; \n"\ + " mov.b32 %0, {hl, hu}; \n"\ + "}":"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a))); \ + return val;\ +} /* while(0) */ +__CUDA_BF16_DECL__ __nv_bfloat16 __hsin_internal(const __nv_bfloat16 a) { + float f = __bfloat162float(a); + f = sinf(f); + return __float2bfloat16_rn(f); +} +__CUDA_BF16_DECL__ __nv_bfloat16 hsin(const __nv_bfloat16 a) { + return __hsin_internal(a); +} +__CUDA_BF16_DECL__ __nv_bfloat162 h2sin(const __nv_bfloat162 a) { + const __nv_bfloat16 l = __low2bfloat16(a); + const __nv_bfloat16 h = __high2bfloat16(a); + return __halves2bfloat162(__hsin_internal(l), __hsin_internal(h)); +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hcos_internal(const __nv_bfloat16 a) { + float f = __bfloat162float(a); + f = cosf(f); + return __float2bfloat16_rn(f); +} +__CUDA_BF16_DECL__ __nv_bfloat16 hcos(const __nv_bfloat16 a) { + return __hcos_internal(a); +} +__CUDA_BF16_DECL__ __nv_bfloat162 h2cos(const __nv_bfloat162 a) { + const __nv_bfloat16 l = __low2bfloat16(a); + const __nv_bfloat16 h = __high2bfloat16(a); + return __halves2bfloat162(__hcos_internal(l), __hcos_internal(h)); +} + +#define __BF16_SPEC_CASE2(i,r, spc, ulp) \ + "{.reg.b32 spc, ulp, p;\n"\ + " mov.b32 spc," __CUDA_BF16_STRINGIFY(spc) ";\n"\ + " mov.b32 ulp," __CUDA_BF16_STRINGIFY(ulp) ";\n"\ + " set.eq.f16x2.f16x2 p," __CUDA_BF16_STRINGIFY(i) ", spc;\n"\ + " fma.rn.bf16x2 " __CUDA_BF16_STRINGIFY(r) ",p,ulp," __CUDA_BF16_STRINGIFY(r) ";\n}\n" +#define __BF16_SPEC_CASE(i,r, spc, ulp) \ + "{.reg.b16 spc, ulp, p;\n"\ + " mov.b16 spc," __CUDA_BF16_STRINGIFY(spc) ";\n"\ + " mov.b16 ulp," __CUDA_BF16_STRINGIFY(ulp) ";\n"\ + " set.eq.f16.f16 p," __CUDA_BF16_STRINGIFY(i) ", spc;\n"\ + " fma.rn.bf16 " __CUDA_BF16_STRINGIFY(r) ",p,ulp," __CUDA_BF16_STRINGIFY(r) ";\n}\n" + +__CUDA_BF16_DECL__ __nv_bfloat16 hexp(const __nv_bfloat16 a) { + __nv_bfloat16 val; + asm("{.reg.b32 f, C; \n" + " .reg.b16 h,r; \n" + " mov.b16 h,%1; \n" + " mov.b32 f,{0,h}; \n" + " mov.b32 C, 0x3FB8AA3CU; \n" + " mul.f32 f,f,C; \n" + " ex2.approx.f32 f,f; \n" + " cvt.rn.bf16.f32 r,f; \n" + " mov.b16 %0,r; \n" + "}": "=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 h2exp(const __nv_bfloat162 a) { + __nv_bfloat162 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 h,r,fl,fu, C; \n" + " mov.b32 {hl, hu}, %1; \n" + " mov.b32 h, %1; \n" + " mov.b32 fl, {0,hl}; \n" + " mov.b32 fu, {0,hu}; \n" + " mov.b32 C, 0x3FB8AA3CU; \n" + " mul.f32 fl,fl,C; \n" + " mul.f32 fu,fu,C; \n" + " ex2.approx.f32 fl, fl; \n" + " ex2.approx.f32 fu, fu; \n" + " cvt.rn.bf16.f32 hl, fl; \n" + " cvt.rn.bf16.f32 hu, fu; \n" + " mov.b32 r, {hl, hu}; \n" + " mov.b32 %0, r; \n" + "}":"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 hexp2(const __nv_bfloat16 a) { + __APPROX_FCAST(ex2) +} +__CUDA_BF16_DECL__ __nv_bfloat162 h2exp2(const __nv_bfloat162 a) { + __APPROX_FCAST2(ex2) +} +__CUDA_BF16_DECL__ __nv_bfloat16 hexp10(const __nv_bfloat16 a) { + __nv_bfloat16 val; + asm("{.reg.b16 h, r; \n" + " .reg.b32 f, C; \n" + " mov.b16 h, %1; \n" + " mov.b32 f, {0,h}; \n" + " mov.b32 C, 0x40549A78U; \n" + " mul.f32 f,f,C; \n" + " ex2.approx.f32 f, f; \n" + " cvt.rn.bf16.f32 r, f; \n" + __BF16_SPEC_CASE(%1, r, 0xBC95U,0xBF00U) + " mov.b16 %0, r; \n" + "}":"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 h2exp10(const __nv_bfloat162 a) { + __nv_bfloat162 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 h,r,fl,fu, C; \n" + " mov.b32 {hl, hu}, %1; \n" + " mov.b32 fl, {0,hl}; \n" + " mov.b32 fu, {0,hu}; \n" + " mov.b32 C, 0x40549A78U; \n" + " mul.f32 fl,fl,C; \n" + " mul.f32 fu,fu,C; \n" + " ex2.approx.f32 fl, fl; \n" + " ex2.approx.f32 fu, fu; \n" + " cvt.rn.bf16.f32 hl, fl; \n" + " cvt.rn.bf16.f32 hu, fu; \n" + " mov.b32 r, {hl, hu}; \n" + __BF16_SPEC_CASE2(%1, r, 0xBC95BC95U,0xBF00BF00U) + " mov.b32 %0, r; \n" + "}":"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 hlog2(const __nv_bfloat16 a) { + __APPROX_FCAST(lg2) +} +__CUDA_BF16_DECL__ __nv_bfloat162 h2log2(const __nv_bfloat162 a) { + __APPROX_FCAST2(lg2) +} +__CUDA_BF16_DECL__ __nv_bfloat16 hlog(const __nv_bfloat16 a) { + __nv_bfloat16 val; + asm("{.reg.b32 f, C; \n" + " .reg.b16 r,h; \n" + " mov.b16 h,%1; \n" + " mov.b32 f,{0,h}; \n" + " lg2.approx.f32 f,f; \n" + " mov.b32 C, 0x3f317218U; \n" + " mul.f32 f,f,C; \n" + " cvt.rn.bf16.f32 r,f; \n" + " mov.b16 %0,r; \n" + "}": "=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 h2log(const __nv_bfloat162 a) { + __nv_bfloat162 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 r, fl, fu, C, h; \n" + " mov.b32 {hl, hu}, %1; \n" + " mov.b32 h, %1; \n" + " mov.b32 fl, {0,hl}; \n" + " mov.b32 fu, {0,hu}; \n" + " lg2.approx.f32 fl, fl; \n" + " lg2.approx.f32 fu, fu; \n" + " mov.b32 C, 0x3f317218U; \n" + " mul.f32 fl,fl,C; \n" + " mul.f32 fu,fu,C; \n" + " cvt.rn.bf16.f32 hl, fl; \n" + " cvt.rn.bf16.f32 hu, fu; \n" + " mov.b32 r, {hl, hu}; \n" + " mov.b32 %0, r; \n" + "}":"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 hlog10(const __nv_bfloat16 a) { + __nv_bfloat16 val; + asm("{.reg.b16 h, r; \n" + " .reg.b32 f, C; \n" + " mov.b16 h, %1; \n" + " mov.b32 f, {0,h}; \n" + " lg2.approx.f32 f, f; \n" + " mov.b32 C, 0x3E9A209BU; \n" + " mul.f32 f,f,C; \n" + " cvt.rn.bf16.f32 r, f; \n" + " mov.b16 %0, r; \n" + "}":"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 h2log10(const __nv_bfloat162 a) { + __nv_bfloat162 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 r, fl, fu, C, h; \n" + " mov.b32 {hl, hu}, %1; \n" + " mov.b32 h, %1; \n" + " mov.b32 fl, {0,hl}; \n" + " mov.b32 fu, {0,hu}; \n" + " lg2.approx.f32 fl, fl; \n" + " lg2.approx.f32 fu, fu; \n" + " mov.b32 C, 0x3E9A209BU; \n" + " mul.f32 fl,fl,C; \n" + " mul.f32 fu,fu,C; \n" + " cvt.rn.bf16.f32 hl, fl; \n" + " cvt.rn.bf16.f32 hu, fu; \n" + " mov.b32 r, {hl, hu}; \n" + " mov.b32 %0, r; \n" + "}":"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a))); + return val; +} +#undef __BF16_SPEC_CASE2 +#undef __BF16_SPEC_CASE +__CUDA_BF16_DECL__ __nv_bfloat162 h2rcp(const __nv_bfloat162 a) { + __APPROX_FCAST2(rcp) +} +__CUDA_BF16_DECL__ __nv_bfloat16 hrcp(const __nv_bfloat16 a) { + __APPROX_FCAST(rcp) +} +__CUDA_BF16_DECL__ __nv_bfloat162 h2rsqrt(const __nv_bfloat162 a) { + __APPROX_FCAST2(rsqrt) +} +__CUDA_BF16_DECL__ __nv_bfloat16 hrsqrt(const __nv_bfloat16 a) { + __APPROX_FCAST(rsqrt) +} +__CUDA_BF16_DECL__ __nv_bfloat162 h2sqrt(const __nv_bfloat162 a) { + __APPROX_FCAST2(sqrt) +} +__CUDA_BF16_DECL__ __nv_bfloat16 hsqrt(const __nv_bfloat16 a) { + __APPROX_FCAST(sqrt) +} +#undef __APPROX_FCAST +#undef __APPROX_FCAST2 +__CUDA_BF16_DECL__ __nv_bfloat162 __hisnan2(const __nv_bfloat162 a) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat162 r; + asm("{set.nan.bf16x2.bf16x2 %0,%1,%1;\n}" + :"=r"(__BFLOAT162_TO_UI(r)) : "r"(__BFLOAT162_TO_CUI(a))); + return r; +, + const __nv_bfloat162 b = a; + __BINARY_OP_BFLOAT162_MACRO(set.nan.f32) +) +} +__CUDA_BF16_DECL__ bool __hisnan(const __nv_bfloat16 a) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 r; + asm("{set.nan.bf16.bf16 %0,%1,%1;\n}" + :"=h"(__BFLOAT16_TO_US(r)) : "h"(__BFLOAT16_TO_CUS(a))); + return __BFLOAT16_TO_CUS(r) != 0U; +, + unsigned int r; + asm( "{.reg .b32 a;\n" + " mov.b32 a, {0,%1};\n" + " set.nan.f32.f32 %0, a, a;}\n" + :"=r"(r) : "h"(__BFLOAT16_TO_CUS(a))); + return r != 0U; +) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hneg2(const __nv_bfloat162 a) +{ + __nv_bfloat162 r; + asm("{neg.bf16x2 %0,%1;\n}" + :"=r"(__BFLOAT162_TO_UI(r)) : "r"(__BFLOAT162_TO_CUI(a))); + return r; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hneg(const __nv_bfloat16 a) +{ + __nv_bfloat16 r; + asm("{neg.bf16 %0,%1;\n}" + :"=h"(__BFLOAT16_TO_US(r)) : "h"(__BFLOAT16_TO_CUS(a))); + return r; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __habs2(const __nv_bfloat162 a) +{ + __nv_bfloat162 r; + asm("{abs.bf16x2 %0,%1;\n}" + :"=r"(__BFLOAT162_TO_UI(r)) : "r"(__BFLOAT162_TO_CUI(a))); + return r; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __habs(const __nv_bfloat16 a) +{ + __nv_bfloat16 r; + asm("{abs.bf16 %0,%1;\n}" + :"=h"(__BFLOAT16_TO_US(r)) : "h"(__BFLOAT16_TO_CUS(a))); + return r; +} +/****************************************************************************** +* __nv_bfloat16 arithmetic * +******************************************************************************/ +__CUDA_BF16_DECL__ __nv_bfloat16 __hmax(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; + asm( "{ max.bf16 %0,%1,%2;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hmin(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; + asm( "{ min.bf16 %0,%1,%2;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hmax_nan(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; + asm( "{ max.NaN.bf16 %0,%1,%2;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hmin_nan(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; + asm( "{ min.NaN.bf16 %0,%1,%2;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hfma_relu(const __nv_bfloat16 a, const __nv_bfloat16 b, const __nv_bfloat16 c) +{ + __nv_bfloat16 val; + asm( "{ fma.rn.relu.bf16 %0,%1,%2,%3;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b)),"h"(__BFLOAT16_TO_CUS(c))); + return val; +} +/****************************************************************************** +* __nv_bfloat162 arithmetic * +******************************************************************************/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hmax2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; + asm( "{ max.bf16x2 %0,%1,%2;\n}" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hmin2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; + asm( "{ min.bf16x2 %0,%1,%2;\n}" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hmax2_nan(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; + asm( "{ max.NaN.bf16x2 %0,%1,%2;\n}" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hmin2_nan(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; + asm( "{ min.NaN.bf16x2 %0,%1,%2;\n}" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hfma2_relu(const __nv_bfloat162 a, const __nv_bfloat162 b, const __nv_bfloat162 c) +{ + __nv_bfloat162 val; + asm( "{ fma.rn.relu.bf16x2 %0,%1,%2,%3;\n}" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b)),"r"(__BFLOAT162_TO_CUI(c))); + return val; +} + +__CUDA_BF16_DECL__ __nv_bfloat162 __hcmadd(const __nv_bfloat162 a, const __nv_bfloat162 b, const __nv_bfloat162 c) +{ + // fast version of complex multiply-accumulate + // (a.re, a.im) * (b.re, b.im) + (c.re, c.im) + // acc.re = (c.re + a.re*b.re) - a.im*b.im + // acc.im = (c.im + a.re*b.im) + a.im*b.re + __nv_bfloat16 real_tmp = __hfma(a.x, b.x, c.x); + __nv_bfloat16 img_tmp = __hfma(a.x, b.y, c.y); + real_tmp = __hfma(__hneg(a.y), b.y, real_tmp); + img_tmp = __hfma(a.y, b.x, img_tmp); + return make_bfloat162(real_tmp, img_tmp); +} + + +/* Define __PTR for atomicAdd prototypes below, undef after done */ +#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__) +#define __PTR "l" +#else +#define __PTR "r" +#endif /*(defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)*/ + +__CUDA_BF16_DECL__ __nv_bfloat162 atomicAdd(__nv_bfloat162 *const address, const __nv_bfloat162 val) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat162 r; + asm volatile ("{ atom.add.noftz.bf16x2 %0,[%1],%2; }\n" + : "=r"(__BFLOAT162_TO_UI(r)) : __PTR(address), "r"(__BFLOAT162_TO_CUI(val)) + : "memory"); + return r; +, + unsigned int* address_as_uint = (unsigned int*)address; + unsigned int old = *address_as_uint; + unsigned int assumed; + do { + assumed = old; + __nv_bfloat162 new_val = __hadd2(val, *(__nv_bfloat162*)&assumed); + old = atomicCAS(address_as_uint, assumed, *(unsigned int*)&new_val); + } while (assumed != old); + return *(__nv_bfloat162*)&old; +) +} + +__CUDA_BF16_DECL__ __nv_bfloat16 atomicAdd(__nv_bfloat16 *const address, const __nv_bfloat16 val) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 r; + asm volatile ("{ atom.add.noftz.bf16 %0,[%1],%2; }\n" + : "=h"(__BFLOAT16_TO_US(r)) + : __PTR(address), "h"(__BFLOAT16_TO_CUS(val)) + : "memory"); + return r; +, + unsigned short int* address_as_us = (unsigned short int*)address; + unsigned short int old = *address_as_us; + unsigned short int assumed; + do { + assumed = old; + old = atomicCAS(address_as_us, assumed, + __bfloat16_as_ushort(__hadd(val, __ushort_as_bfloat16(assumed)))); + } while (assumed != old); + return __ushort_as_bfloat16(old); +) +} + +#undef __PTR +#undef __CUDA_BF16_DECL__ +#endif /* (defined(__CUDACC__) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800))) || defined(_NVHPC_CUDA) */ +#endif /* defined(__cplusplus) */ + +#undef __BINARY_OP_BFLOAT162_MACRO +#undef __BINARY_OP_BFLOAT16_MACRO + +#undef __CUDA_HOSTDEVICE_BF16_DECL__ +#undef __CUDA_BF16_DECL__ + +/* Define first-class types "nv_bfloat16" and "nv_bfloat162", unless user specifies otherwise via "#define CUDA_NO_BFLOAT16" */ +/* C cannot ever have these types defined here, because __nv_bfloat16 and __nv_bfloat162 are C++ classes */ +#if defined(__cplusplus) && !defined(CUDA_NO_BFLOAT16) +typedef __nv_bfloat16 nv_bfloat16; +typedef __nv_bfloat162 nv_bfloat162; + +#endif /* defined(__cplusplus) && !defined(CUDA_NO_BFLOAT16) */ + +#if defined(__CPP_VERSION_AT_LEAST_11_BF16) +#undef __CPP_VERSION_AT_LEAST_11_BF16 +#endif /* defined(__CPP_VERSION_AT_LEAST_11_BF16) */ + +#endif /* end of include guard: __CUDA_BF16_HPP__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_device_runtime_api.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_device_runtime_api.h new file mode 100644 index 0000000000000000000000000000000000000000..2730d51bf8c4b0931b8bbfc3c9a0ff0c72694725 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_device_runtime_api.h @@ -0,0 +1,735 @@ +/* + * Copyright 1993-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_DEVICE_RUNTIME_API_H__) +#define __CUDA_DEVICE_RUNTIME_API_H__ + +#if defined(__CUDACC__) && !defined(__CUDACC_RTC__) +#include +#endif + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if !defined(CUDA_FORCE_CDP1_IF_SUPPORTED) && !defined(__CUDADEVRT_INTERNAL__) && !defined(_NVHPC_CUDA) && !(defined(_WIN32) && !defined(_WIN64)) +#define __CUDA_INTERNAL_USE_CDP2 +#endif + +#if !defined(__CUDACC_RTC__) + +#if !defined(__CUDACC_INTERNAL_NO_STUBS__) && !defined(__CUDACC_RDC__) && !defined(__CUDACC_EWP__) && defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 350) && !defined(__CUDADEVRT_INTERNAL__) + +#if defined(__cplusplus) +extern "C" { +#endif + +struct cudaFuncAttributes; + + +#ifndef __CUDA_INTERNAL_USE_CDP2 +inline __device__ cudaError_t CUDARTAPI cudaMalloc(void **p, size_t s) +{ + return cudaErrorUnknown; +} + +inline __device__ cudaError_t CUDARTAPI cudaFuncGetAttributes(struct cudaFuncAttributes *p, const void *c) +{ + return cudaErrorUnknown; +} + +inline __device__ cudaError_t CUDARTAPI cudaDeviceGetAttribute(int *value, enum cudaDeviceAttr attr, int device) +{ + return cudaErrorUnknown; +} + +inline __device__ cudaError_t CUDARTAPI cudaGetDevice(int *device) +{ + return cudaErrorUnknown; +} + +inline __device__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize) +{ + return cudaErrorUnknown; +} + +inline __device__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize, unsigned int flags) +{ + return cudaErrorUnknown; +} +#else // __CUDA_INTERNAL_USE_CDP2 +inline __device__ cudaError_t CUDARTAPI __cudaCDP2Malloc(void **p, size_t s) +{ + return cudaErrorUnknown; +} + +inline __device__ cudaError_t CUDARTAPI __cudaCDP2FuncGetAttributes(struct cudaFuncAttributes *p, const void *c) +{ + return cudaErrorUnknown; +} + +inline __device__ cudaError_t CUDARTAPI __cudaCDP2DeviceGetAttribute(int *value, enum cudaDeviceAttr attr, int device) +{ + return cudaErrorUnknown; +} + +inline __device__ cudaError_t CUDARTAPI __cudaCDP2GetDevice(int *device) +{ + return cudaErrorUnknown; +} + +inline __device__ cudaError_t CUDARTAPI __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize) +{ + return cudaErrorUnknown; +} + +inline __device__ cudaError_t CUDARTAPI __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize, unsigned int flags) +{ + return cudaErrorUnknown; +} +#endif // __CUDA_INTERNAL_USE_CDP2 + + +#if defined(__cplusplus) +} +#endif + +#endif /* !defined(__CUDACC_INTERNAL_NO_STUBS__) && !defined(__CUDACC_RDC__) && !defined(__CUDACC_EWP__) && defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 350) && !defined(__CUDADEVRT_INTERNAL__) */ + +#endif /* !defined(__CUDACC_RTC__) */ + +#if defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED) +# define __DEPRECATED__(msg) +#elif defined(_WIN32) +# define __DEPRECATED__(msg) __declspec(deprecated(msg)) +#elif (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 5 && !defined(__clang__)))) +# define __DEPRECATED__(msg) __attribute__((deprecated)) +#else +# define __DEPRECATED__(msg) __attribute__((deprecated(msg))) +#endif + +#if defined(__CUDA_ARCH__) && !defined(__CDPRT_SUPPRESS_SYNC_DEPRECATION_WARNING) +# define __CDPRT_DEPRECATED(func_name) __DEPRECATED__("Use of "#func_name" from device code is deprecated. Moreover, such use will cause this module to fail to load on sm_90+ devices. If calls to "#func_name" from device code cannot be removed for older devices at this time, you may guard them with __CUDA_ARCH__ macros to remove them only for sm_90+ devices, making sure to generate code for compute_90 for the macros to take effect. Note that this mitigation will no longer work when support for "#func_name" from device code is eventually dropped for all devices. Disable this warning with -D__CDPRT_SUPPRESS_SYNC_DEPRECATION_WARNING.") +#else +# define __CDPRT_DEPRECATED(func_name) +#endif + +#if defined(__cplusplus) && defined(__CUDACC__) /* Visible to nvcc front-end only */ +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 350) // Visible to SM>=3.5 and "__host__ __device__" only + +#include "driver_types.h" +#include "crt/host_defines.h" + +#define cudaStreamGraphTailLaunch (cudaStream_t)0x0100000000000000 +#define cudaStreamGraphFireAndForget (cudaStream_t)0x0200000000000000 + +#ifdef __CUDA_INTERNAL_USE_CDP2 +#define cudaStreamTailLaunch ((cudaStream_t)0x3) /**< Per-grid stream with a fire-and-forget synchronization behavior. Only applicable when used with CUDA Dynamic Parallelism. */ +#define cudaStreamFireAndForget ((cudaStream_t)0x4) /**< Per-grid stream with a tail launch semantics. Only applicable when used with CUDA Dynamic Parallelism. */ +#endif + +extern "C" +{ + +// Symbols beginning with __cudaCDP* should not be used outside +// this header file. Instead, compile with -DCUDA_FORCE_CDP1_IF_SUPPORTED if +// CDP1 support is required. + +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaDeviceSynchronizeDeprecationAvoidance(void); + +#ifndef __CUDA_INTERNAL_USE_CDP2 +//// CDP1 endpoints +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetAttribute(int *value, enum cudaDeviceAttr attr, int device); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetLimit(size_t *pValue, enum cudaLimit limit); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetCacheConfig(enum cudaFuncCache *pCacheConfig); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetSharedMemConfig(enum cudaSharedMemConfig *pConfig); +#if (__CUDA_ARCH__ < 900) && (defined(CUDA_FORCE_CDP1_IF_SUPPORTED) || (defined(_WIN32) && !defined(_WIN64))) +// cudaDeviceSynchronize is removed on sm_90+ +extern __device__ __cudart_builtin__ __CDPRT_DEPRECATED(cudaDeviceSynchronize) cudaError_t CUDARTAPI cudaDeviceSynchronize(void); +#endif +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetLastError(void); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaPeekAtLastError(void); +extern __device__ __cudart_builtin__ const char* CUDARTAPI cudaGetErrorString(cudaError_t error); +extern __device__ __cudart_builtin__ const char* CUDARTAPI cudaGetErrorName(cudaError_t error); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetDeviceCount(int *count); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetDevice(int *device); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamCreateWithFlags(cudaStream_t *pStream, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamDestroy(cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamWaitEvent(cudaStream_t stream, cudaEvent_t event, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamWaitEvent_ptsz(cudaStream_t stream, cudaEvent_t event, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventCreateWithFlags(cudaEvent_t *event, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecord(cudaEvent_t event, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecord_ptsz(cudaEvent_t event, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecordWithFlags(cudaEvent_t event, cudaStream_t stream, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecordWithFlags_ptsz(cudaEvent_t event, cudaStream_t stream, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventDestroy(cudaEvent_t event); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaFuncGetAttributes(struct cudaFuncAttributes *attr, const void *func); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaFree(void *devPtr); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMalloc(void **devPtr, size_t size); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpyAsync(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpyAsync_ptsz(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy2DAsync(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy2DAsync_ptsz(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy3DAsync(const struct cudaMemcpy3DParms *p, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy3DAsync_ptsz(const struct cudaMemcpy3DParms *p, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemsetAsync(void *devPtr, int value, size_t count, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemsetAsync_ptsz(void *devPtr, int value, size_t count, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset2DAsync(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset2DAsync_ptsz(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset3DAsync(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset3DAsync_ptsz(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaRuntimeGetVersion(int *runtimeVersion); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize, unsigned int flags); +#endif // __CUDA_INTERNAL_USE_CDP2 + +//// CDP2 endpoints +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2DeviceGetAttribute(int *value, enum cudaDeviceAttr attr, int device); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2DeviceGetLimit(size_t *pValue, enum cudaLimit limit); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2DeviceGetCacheConfig(enum cudaFuncCache *pCacheConfig); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2DeviceGetSharedMemConfig(enum cudaSharedMemConfig *pConfig); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2GetLastError(void); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2PeekAtLastError(void); +extern __device__ __cudart_builtin__ const char* CUDARTAPI __cudaCDP2GetErrorString(cudaError_t error); +extern __device__ __cudart_builtin__ const char* CUDARTAPI __cudaCDP2GetErrorName(cudaError_t error); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2GetDeviceCount(int *count); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2GetDevice(int *device); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2StreamCreateWithFlags(cudaStream_t *pStream, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2StreamDestroy(cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2StreamWaitEvent(cudaStream_t stream, cudaEvent_t event, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2StreamWaitEvent_ptsz(cudaStream_t stream, cudaEvent_t event, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2EventCreateWithFlags(cudaEvent_t *event, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2EventRecord(cudaEvent_t event, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2EventRecord_ptsz(cudaEvent_t event, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2EventRecordWithFlags(cudaEvent_t event, cudaStream_t stream, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2EventRecordWithFlags_ptsz(cudaEvent_t event, cudaStream_t stream, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2EventDestroy(cudaEvent_t event); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2FuncGetAttributes(struct cudaFuncAttributes *attr, const void *func); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Free(void *devPtr); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Malloc(void **devPtr, size_t size); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2MemcpyAsync(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2MemcpyAsync_ptsz(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memcpy2DAsync(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memcpy2DAsync_ptsz(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memcpy3DAsync(const struct cudaMemcpy3DParms *p, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memcpy3DAsync_ptsz(const struct cudaMemcpy3DParms *p, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2MemsetAsync(void *devPtr, int value, size_t count, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2MemsetAsync_ptsz(void *devPtr, int value, size_t count, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memset2DAsync(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memset2DAsync_ptsz(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memset3DAsync(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memset3DAsync_ptsz(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2RuntimeGetVersion(int *runtimeVersion); +extern __device__ __cudart_builtin__ void * CUDARTAPI __cudaCDP2GetParameterBuffer(size_t alignment, size_t size); +extern __device__ __cudart_builtin__ void * CUDARTAPI __cudaCDP2GetParameterBufferV2(void *func, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2LaunchDevice_ptsz(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2LaunchDeviceV2_ptsz(void *parameterBuffer, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2LaunchDevice(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2LaunchDeviceV2(void *parameterBuffer, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize, unsigned int flags); + + +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGraphLaunch(cudaGraphExec_t graphExec, cudaStream_t stream); +#if defined(CUDA_API_PER_THREAD_DEFAULT_STREAM) +static inline __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGraphLaunch_ptsz(cudaGraphExec_t graphExec, cudaStream_t stream) +{ + if (stream == 0) { + stream = cudaStreamPerThread; + } + return cudaGraphLaunch(graphExec, stream); +} +#endif + +/** + * \ingroup CUDART_GRAPH + * \brief Get the currently running device graph id. + * + * Get the currently running device graph id. + * \return Returns the current device graph id, 0 if the call is outside of a device graph. + * \sa cudaLaunchDevice + */ +static inline __device__ __cudart_builtin__ cudaGraphExec_t CUDARTAPI cudaGetCurrentGraphExec(void) +{ + unsigned long long current_graph_exec; + asm ("mov.u64 %0, %%current_graph_exec;" : "=l"(current_graph_exec)); + return (cudaGraphExec_t)current_graph_exec; +} + +/** + * \ingroup CUDART_EXECUTION + * \brief Programmatic dependency trigger + * + * This device function ensures the programmatic launch completion edges / + * events are fulfilled. See + * ::cudaLaunchAttributeID::cudaLaunchAttributeProgrammaticStreamSerialization + * and ::cudaLaunchAttributeID::cudaLaunchAttributeProgrammaticEvent for more + * information. The event / edge kick off only happens when every CTAs + * in the grid has either exited or called this function at least once, + * otherwise the kick off happens automatically after all warps finishes + * execution but before the grid completes. The kick off only enables + * scheduling of the secondary kernel. It provides no memory visibility + * guarantee itself. The user could enforce memory visibility by inserting a + * memory fence of the correct scope. + */ +static inline __device__ __cudart_builtin__ void CUDARTAPI cudaTriggerProgrammaticLaunchCompletion(void) +{ + asm volatile("griddepcontrol.launch_dependents;":::); +} + +/** + * \ingroup CUDART_EXECUTION + * \brief Programmatic grid dependency synchronization + * + * This device function will block the thread until all direct grid + * dependencies have completed. This API is intended to use in conjuncture with + * programmatic / launch event / dependency. See + * ::cudaLaunchAttributeID::cudaLaunchAttributeProgrammaticStreamSerialization + * and ::cudaLaunchAttributeID::cudaLaunchAttributeProgrammaticEvent for more + * information. + */ +static inline __device__ __cudart_builtin__ void CUDARTAPI cudaGridDependencySynchronize(void) +{ + asm volatile("griddepcontrol.wait;":::"memory"); +} + + +//// CG API +extern __device__ __cudart_builtin__ unsigned long long CUDARTAPI cudaCGGetIntrinsicHandle(enum cudaCGScope scope); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaCGSynchronize(unsigned long long handle, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaCGSynchronizeGrid(unsigned long long handle, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaCGGetSize(unsigned int *numThreads, unsigned int *numGrids, unsigned long long handle); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaCGGetRank(unsigned int *threadRank, unsigned int *gridRank, unsigned long long handle); + + +//// CDP API + +#ifdef __CUDA_ARCH__ + +#ifdef __CUDA_INTERNAL_USE_CDP2 +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetAttribute(int *value, enum cudaDeviceAttr attr, int device) +{ + return __cudaCDP2DeviceGetAttribute(value, attr, device); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetLimit(size_t *pValue, enum cudaLimit limit) +{ + return __cudaCDP2DeviceGetLimit(pValue, limit); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetCacheConfig(enum cudaFuncCache *pCacheConfig) +{ + return __cudaCDP2DeviceGetCacheConfig(pCacheConfig); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetSharedMemConfig(enum cudaSharedMemConfig *pConfig) +{ + return __cudaCDP2DeviceGetSharedMemConfig(pConfig); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetLastError(void) +{ + return __cudaCDP2GetLastError(); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaPeekAtLastError(void) +{ + return __cudaCDP2PeekAtLastError(); +} + +static __inline__ __device__ __cudart_builtin__ const char* CUDARTAPI cudaGetErrorString(cudaError_t error) +{ + return __cudaCDP2GetErrorString(error); +} + +static __inline__ __device__ __cudart_builtin__ const char* CUDARTAPI cudaGetErrorName(cudaError_t error) +{ + return __cudaCDP2GetErrorName(error); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetDeviceCount(int *count) +{ + return __cudaCDP2GetDeviceCount(count); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetDevice(int *device) +{ + return __cudaCDP2GetDevice(device); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamCreateWithFlags(cudaStream_t *pStream, unsigned int flags) +{ + return __cudaCDP2StreamCreateWithFlags(pStream, flags); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamDestroy(cudaStream_t stream) +{ + return __cudaCDP2StreamDestroy(stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamWaitEvent(cudaStream_t stream, cudaEvent_t event, unsigned int flags) +{ + return __cudaCDP2StreamWaitEvent(stream, event, flags); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamWaitEvent_ptsz(cudaStream_t stream, cudaEvent_t event, unsigned int flags) +{ + return __cudaCDP2StreamWaitEvent_ptsz(stream, event, flags); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventCreateWithFlags(cudaEvent_t *event, unsigned int flags) +{ + return __cudaCDP2EventCreateWithFlags(event, flags); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecord(cudaEvent_t event, cudaStream_t stream) +{ + return __cudaCDP2EventRecord(event, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecord_ptsz(cudaEvent_t event, cudaStream_t stream) +{ + return __cudaCDP2EventRecord_ptsz(event, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecordWithFlags(cudaEvent_t event, cudaStream_t stream, unsigned int flags) +{ + return __cudaCDP2EventRecordWithFlags(event, stream, flags); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecordWithFlags_ptsz(cudaEvent_t event, cudaStream_t stream, unsigned int flags) +{ + return __cudaCDP2EventRecordWithFlags_ptsz(event, stream, flags); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventDestroy(cudaEvent_t event) +{ + return __cudaCDP2EventDestroy(event); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaFuncGetAttributes(struct cudaFuncAttributes *attr, const void *func) +{ + return __cudaCDP2FuncGetAttributes(attr, func); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaFree(void *devPtr) +{ + return __cudaCDP2Free(devPtr); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMalloc(void **devPtr, size_t size) +{ + return __cudaCDP2Malloc(devPtr, size); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpyAsync(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream) +{ + return __cudaCDP2MemcpyAsync(dst, src, count, kind, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpyAsync_ptsz(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream) +{ + return __cudaCDP2MemcpyAsync_ptsz(dst, src, count, kind, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy2DAsync(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream) +{ + return __cudaCDP2Memcpy2DAsync(dst, dpitch, src, spitch, width, height, kind, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy2DAsync_ptsz(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream) +{ + return __cudaCDP2Memcpy2DAsync_ptsz(dst, dpitch, src, spitch, width, height, kind, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy3DAsync(const struct cudaMemcpy3DParms *p, cudaStream_t stream) +{ + return __cudaCDP2Memcpy3DAsync(p, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy3DAsync_ptsz(const struct cudaMemcpy3DParms *p, cudaStream_t stream) +{ + return __cudaCDP2Memcpy3DAsync_ptsz(p, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemsetAsync(void *devPtr, int value, size_t count, cudaStream_t stream) +{ + return __cudaCDP2MemsetAsync(devPtr, value, count, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemsetAsync_ptsz(void *devPtr, int value, size_t count, cudaStream_t stream) +{ + return __cudaCDP2MemsetAsync_ptsz(devPtr, value, count, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset2DAsync(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream) +{ + return __cudaCDP2Memset2DAsync(devPtr, pitch, value, width, height, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset2DAsync_ptsz(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream) +{ + return __cudaCDP2Memset2DAsync_ptsz(devPtr, pitch, value, width, height, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset3DAsync(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream) +{ + return __cudaCDP2Memset3DAsync(pitchedDevPtr, value, extent, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset3DAsync_ptsz(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream) +{ + return __cudaCDP2Memset3DAsync_ptsz(pitchedDevPtr, value, extent, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaRuntimeGetVersion(int *runtimeVersion) +{ + return __cudaCDP2RuntimeGetVersion(runtimeVersion); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize) +{ + return __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessor(numBlocks, func, blockSize, dynamicSmemSize); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize, unsigned int flags) +{ + return __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessorWithFlags(numBlocks, func, blockSize, dynamicSmemSize, flags); +} +#endif // __CUDA_INTERNAL_USE_CDP2 + +#endif // __CUDA_ARCH__ + + +/** + * \ingroup CUDART_EXECUTION + * \brief Obtains a parameter buffer + * + * Obtains a parameter buffer which can be filled with parameters for a kernel launch. + * Parameters passed to ::cudaLaunchDevice must be allocated via this function. + * + * This is a low level API and can only be accessed from Parallel Thread Execution (PTX). + * CUDA user code should use <<< >>> to launch kernels. + * + * \param alignment - Specifies alignment requirement of the parameter buffer + * \param size - Specifies size requirement in bytes + * + * \return + * Returns pointer to the allocated parameterBuffer + * \notefnerr + * + * \sa cudaLaunchDevice + */ +#ifdef __CUDA_INTERNAL_USE_CDP2 +static __inline__ __device__ __cudart_builtin__ void * CUDARTAPI cudaGetParameterBuffer(size_t alignment, size_t size) +{ + return __cudaCDP2GetParameterBuffer(alignment, size); +} +#else +extern __device__ __cudart_builtin__ void * CUDARTAPI cudaGetParameterBuffer(size_t alignment, size_t size); +#endif + + +/** + * \ingroup CUDART_EXECUTION + * \brief Launches a specified kernel + * + * Launches a specified kernel with the specified parameter buffer. A parameter buffer can be obtained + * by calling ::cudaGetParameterBuffer(). + * + * This is a low level API and can only be accessed from Parallel Thread Execution (PTX). + * CUDA user code should use <<< >>> to launch the kernels. + * + * \param func - Pointer to the kernel to be launched + * \param parameterBuffer - Holds the parameters to the launched kernel. parameterBuffer can be NULL. (Optional) + * \param gridDimension - Specifies grid dimensions + * \param blockDimension - Specifies block dimensions + * \param sharedMemSize - Specifies size of shared memory + * \param stream - Specifies the stream to be used + * + * \return + * ::cudaSuccess, ::cudaErrorInvalidDevice, ::cudaErrorLaunchMaxDepthExceeded, ::cudaErrorInvalidConfiguration, + * ::cudaErrorStartupFailure, ::cudaErrorLaunchPendingCountExceeded, ::cudaErrorLaunchOutOfResources + * \notefnerr + * \n Please refer to Execution Configuration and Parameter Buffer Layout from the CUDA Programming + * Guide for the detailed descriptions of launch configuration and parameter layout respectively. + * + * \sa cudaGetParameterBuffer + */ +#ifdef __CUDA_INTERNAL_USE_CDP2 +static __inline__ __device__ __cudart_builtin__ void * CUDARTAPI cudaGetParameterBufferV2(void *func, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize) +{ + return __cudaCDP2GetParameterBufferV2(func, gridDimension, blockDimension, sharedMemSize); +} +#else +extern __device__ __cudart_builtin__ void * CUDARTAPI cudaGetParameterBufferV2(void *func, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize); +#endif + + +#ifdef __CUDA_INTERNAL_USE_CDP2 +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDevice_ptsz(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream) +{ + return __cudaCDP2LaunchDevice_ptsz(func, parameterBuffer, gridDimension, blockDimension, sharedMemSize, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDeviceV2_ptsz(void *parameterBuffer, cudaStream_t stream) +{ + return __cudaCDP2LaunchDeviceV2_ptsz(parameterBuffer, stream); +} +#else +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDevice_ptsz(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDeviceV2_ptsz(void *parameterBuffer, cudaStream_t stream); +#endif + + +#if defined(CUDA_API_PER_THREAD_DEFAULT_STREAM) && defined(__CUDA_ARCH__) + // When compiling for the device and per thread default stream is enabled, add + // a static inline redirect to the per thread stream entry points. + + static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI + cudaLaunchDevice(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream) + { +#ifdef __CUDA_INTERNAL_USE_CDP2 + return __cudaCDP2LaunchDevice_ptsz(func, parameterBuffer, gridDimension, blockDimension, sharedMemSize, stream); +#else + return cudaLaunchDevice_ptsz(func, parameterBuffer, gridDimension, blockDimension, sharedMemSize, stream); +#endif + } + + static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI + cudaLaunchDeviceV2(void *parameterBuffer, cudaStream_t stream) + { +#ifdef __CUDA_INTERNAL_USE_CDP2 + return __cudaCDP2LaunchDeviceV2_ptsz(parameterBuffer, stream); +#else + return cudaLaunchDeviceV2_ptsz(parameterBuffer, stream); +#endif + } +#else // defined(CUDA_API_PER_THREAD_DEFAULT_STREAM) && defined(__CUDA_ARCH__) +#ifdef __CUDA_INTERNAL_USE_CDP2 + static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDevice(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream) + { + return __cudaCDP2LaunchDevice(func, parameterBuffer, gridDimension, blockDimension, sharedMemSize, stream); + } + + static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDeviceV2(void *parameterBuffer, cudaStream_t stream) + { + return __cudaCDP2LaunchDeviceV2(parameterBuffer, stream); + } +#else // __CUDA_INTERNAL_USE_CDP2 +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDevice(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDeviceV2(void *parameterBuffer, cudaStream_t stream); +#endif // __CUDA_INTERNAL_USE_CDP2 +#endif // defined(CUDA_API_PER_THREAD_DEFAULT_STREAM) && defined(__CUDA_ARCH__) + + +// These symbols should not be used outside of this header file. +#define __cudaCDP2DeviceGetAttribute +#define __cudaCDP2DeviceGetLimit +#define __cudaCDP2DeviceGetCacheConfig +#define __cudaCDP2DeviceGetSharedMemConfig +#define __cudaCDP2GetLastError +#define __cudaCDP2PeekAtLastError +#define __cudaCDP2GetErrorString +#define __cudaCDP2GetErrorName +#define __cudaCDP2GetDeviceCount +#define __cudaCDP2GetDevice +#define __cudaCDP2StreamCreateWithFlags +#define __cudaCDP2StreamDestroy +#define __cudaCDP2StreamWaitEvent +#define __cudaCDP2StreamWaitEvent_ptsz +#define __cudaCDP2EventCreateWithFlags +#define __cudaCDP2EventRecord +#define __cudaCDP2EventRecord_ptsz +#define __cudaCDP2EventRecordWithFlags +#define __cudaCDP2EventRecordWithFlags_ptsz +#define __cudaCDP2EventDestroy +#define __cudaCDP2FuncGetAttributes +#define __cudaCDP2Free +#define __cudaCDP2Malloc +#define __cudaCDP2MemcpyAsync +#define __cudaCDP2MemcpyAsync_ptsz +#define __cudaCDP2Memcpy2DAsync +#define __cudaCDP2Memcpy2DAsync_ptsz +#define __cudaCDP2Memcpy3DAsync +#define __cudaCDP2Memcpy3DAsync_ptsz +#define __cudaCDP2MemsetAsync +#define __cudaCDP2MemsetAsync_ptsz +#define __cudaCDP2Memset2DAsync +#define __cudaCDP2Memset2DAsync_ptsz +#define __cudaCDP2Memset3DAsync +#define __cudaCDP2Memset3DAsync_ptsz +#define __cudaCDP2RuntimeGetVersion +#define __cudaCDP2GetParameterBuffer +#define __cudaCDP2GetParameterBufferV2 +#define __cudaCDP2LaunchDevice_ptsz +#define __cudaCDP2LaunchDeviceV2_ptsz +#define __cudaCDP2LaunchDevice +#define __cudaCDP2LaunchDeviceV2 +#define __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessor +#define __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessorWithFlags + +} + +template static __inline__ __device__ __cudart_builtin__ cudaError_t cudaMalloc(T **devPtr, size_t size); +template static __inline__ __device__ __cudart_builtin__ cudaError_t cudaFuncGetAttributes(struct cudaFuncAttributes *attr, T *entry); +template static __inline__ __device__ __cudart_builtin__ cudaError_t cudaOccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, T func, int blockSize, size_t dynamicSmemSize); +template static __inline__ __device__ __cudart_builtin__ cudaError_t cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, T func, int blockSize, size_t dynamicSmemSize, unsigned int flags); + + +#endif // !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 350) +#endif /* defined(__cplusplus) && defined(__CUDACC__) */ + +#undef __DEPRECATED__ +#undef __CDPRT_DEPRECATED +#undef __CUDA_INTERNAL_USE_CDP2 + +#endif /* !__CUDA_DEVICE_RUNTIME_API_H__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_egl_interop.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_egl_interop.h new file mode 100644 index 0000000000000000000000000000000000000000..40ab01b33e0e9bec536192676c2a804809276fc4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_egl_interop.h @@ -0,0 +1,642 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_EGL_INTEROP_H__) +#define __CUDA_EGL_INTEROP_H__ + +#include "cuda_runtime_api.h" +#include "cuda_runtime.h" +#include "cudart_platform.h" +#include "EGL/egl.h" +#include "EGL/eglext.h" + +#if defined(__cplusplus) +extern "C" { +#endif /* __cplusplus */ + +/** + * \addtogroup CUDART_TYPES + * @{ + */ + + /** + * Maximum number of planes per frame + */ +#define CUDA_EGL_MAX_PLANES 3 + +/** + * CUDA EglFrame type - array or pointer + */ +typedef enum cudaEglFrameType_enum +{ + cudaEglFrameTypeArray = 0, /**< Frame type CUDA array */ + cudaEglFrameTypePitch = 1, /**< Frame type CUDA pointer */ +} cudaEglFrameType; + +/** + * Resource location flags- sysmem or vidmem + * + * For CUDA context on iGPU, since video and system memory are equivalent - + * these flags will not have an effect on the execution. + * + * For CUDA context on dGPU, applications can use the flag ::cudaEglResourceLocationFlags + * to give a hint about the desired location. + * + * ::cudaEglResourceLocationSysmem - the frame data is made resident on the system memory + * to be accessed by CUDA. + * + * ::cudaEglResourceLocationVidmem - the frame data is made resident on the dedicated + * video memory to be accessed by CUDA. + * + * There may be an additional latency due to new allocation and data migration, + * if the frame is produced on a different memory. + */ +typedef enum cudaEglResourceLocationFlags_enum { + cudaEglResourceLocationSysmem = 0x00, /**< Resource location sysmem */ + cudaEglResourceLocationVidmem = 0x01, /**< Resource location vidmem */ +} cudaEglResourceLocationFlags; + +/** + * CUDA EGL Color Format - The different planar and multiplanar formats currently supported for CUDA_EGL interops. + */ +typedef enum cudaEglColorFormat_enum { + cudaEglColorFormatYUV420Planar = 0, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatYUV420SemiPlanar = 1, /**< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV420Planar. */ + cudaEglColorFormatYUV422Planar = 2, /**< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. */ + cudaEglColorFormatYUV422SemiPlanar = 3, /**< Y, UV in two surfaces with VU byte ordering, width, height ratio same as YUV422Planar. */ + cudaEglColorFormatARGB = 6, /**< R/G/B/A four channels in one surface with BGRA byte ordering. */ + cudaEglColorFormatRGBA = 7, /**< R/G/B/A four channels in one surface with ABGR byte ordering. */ + cudaEglColorFormatL = 8, /**< single luminance channel in one surface. */ + cudaEglColorFormatR = 9, /**< single color channel in one surface. */ + cudaEglColorFormatYUV444Planar = 10, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. */ + cudaEglColorFormatYUV444SemiPlanar = 11, /**< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV444Planar. */ + cudaEglColorFormatYUYV422 = 12, /**< Y, U, V in one surface, interleaved as UYVY in one channel. */ + cudaEglColorFormatUYVY422 = 13, /**< Y, U, V in one surface, interleaved as YUYV in one channel. */ + cudaEglColorFormatABGR = 14, /**< R/G/B/A four channels in one surface with RGBA byte ordering. */ + cudaEglColorFormatBGRA = 15, /**< R/G/B/A four channels in one surface with ARGB byte ordering. */ + cudaEglColorFormatA = 16, /**< Alpha color format - one channel in one surface. */ + cudaEglColorFormatRG = 17, /**< R/G color format - two channels in one surface with GR byte ordering */ + cudaEglColorFormatAYUV = 18, /**< Y, U, V, A four channels in one surface, interleaved as VUYA. */ + cudaEglColorFormatYVU444SemiPlanar = 19, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */ + cudaEglColorFormatYVU422SemiPlanar = 20, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */ + cudaEglColorFormatYVU420SemiPlanar = 21, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatY10V10U10_444SemiPlanar = 22, /**< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */ + cudaEglColorFormatY10V10U10_420SemiPlanar = 23, /**< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatY12V12U12_444SemiPlanar = 24, /**< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */ + cudaEglColorFormatY12V12U12_420SemiPlanar = 25, /**< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatVYUY_ER = 26, /**< Extended Range Y, U, V in one surface, interleaved as YVYU in one channel. */ + cudaEglColorFormatUYVY_ER = 27, /**< Extended Range Y, U, V in one surface, interleaved as YUYV in one channel. */ + cudaEglColorFormatYUYV_ER = 28, /**< Extended Range Y, U, V in one surface, interleaved as UYVY in one channel. */ + cudaEglColorFormatYVYU_ER = 29, /**< Extended Range Y, U, V in one surface, interleaved as VYUY in one channel. */ + cudaEglColorFormatYUVA_ER = 31, /**< Extended Range Y, U, V, A four channels in one surface, interleaved as AVUY. */ + cudaEglColorFormatAYUV_ER = 32, /**< Extended Range Y, U, V, A four channels in one surface, interleaved as VUYA. */ + cudaEglColorFormatYUV444Planar_ER = 33, /**< Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height. */ + cudaEglColorFormatYUV422Planar_ER = 34, /**< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. */ + cudaEglColorFormatYUV420Planar_ER = 35, /**< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatYUV444SemiPlanar_ER = 36, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = Y width, U/V height = Y height. */ + cudaEglColorFormatYUV422SemiPlanar_ER = 37, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */ + cudaEglColorFormatYUV420SemiPlanar_ER = 38, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatYVU444Planar_ER = 39, /**< Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height. */ + cudaEglColorFormatYVU422Planar_ER = 40, /**< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. */ + cudaEglColorFormatYVU420Planar_ER = 41, /**< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatYVU444SemiPlanar_ER = 42, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */ + cudaEglColorFormatYVU422SemiPlanar_ER = 43, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */ + cudaEglColorFormatYVU420SemiPlanar_ER = 44, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatBayerRGGB = 45, /**< Bayer format - one channel in one surface with interleaved RGGB ordering. */ + cudaEglColorFormatBayerBGGR = 46, /**< Bayer format - one channel in one surface with interleaved BGGR ordering. */ + cudaEglColorFormatBayerGRBG = 47, /**< Bayer format - one channel in one surface with interleaved GRBG ordering. */ + cudaEglColorFormatBayerGBRG = 48, /**< Bayer format - one channel in one surface with interleaved GBRG ordering. */ + cudaEglColorFormatBayer10RGGB = 49, /**< Bayer10 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 10 bits used 6 bits No-op. */ + cudaEglColorFormatBayer10BGGR = 50, /**< Bayer10 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 10 bits used 6 bits No-op. */ + cudaEglColorFormatBayer10GRBG = 51, /**< Bayer10 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 10 bits used 6 bits No-op. */ + cudaEglColorFormatBayer10GBRG = 52, /**< Bayer10 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 10 bits used 6 bits No-op. */ + cudaEglColorFormatBayer12RGGB = 53, /**< Bayer12 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + cudaEglColorFormatBayer12BGGR = 54, /**< Bayer12 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + cudaEglColorFormatBayer12GRBG = 55, /**< Bayer12 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + cudaEglColorFormatBayer12GBRG = 56, /**< Bayer12 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + cudaEglColorFormatBayer14RGGB = 57, /**< Bayer14 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 14 bits used 2 bits No-op. */ + cudaEglColorFormatBayer14BGGR = 58, /**< Bayer14 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 14 bits used 2 bits No-op. */ + cudaEglColorFormatBayer14GRBG = 59, /**< Bayer14 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 14 bits used 2 bits No-op. */ + cudaEglColorFormatBayer14GBRG = 60, /**< Bayer14 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 14 bits used 2 bits No-op. */ + cudaEglColorFormatBayer20RGGB = 61, /**< Bayer20 format - one channel in one surface with interleaved RGGB ordering. Out of 32 bits, 20 bits used 12 bits No-op. */ + cudaEglColorFormatBayer20BGGR = 62, /**< Bayer20 format - one channel in one surface with interleaved BGGR ordering. Out of 32 bits, 20 bits used 12 bits No-op. */ + cudaEglColorFormatBayer20GRBG = 63, /**< Bayer20 format - one channel in one surface with interleaved GRBG ordering. Out of 32 bits, 20 bits used 12 bits No-op. */ + cudaEglColorFormatBayer20GBRG = 64, /**< Bayer20 format - one channel in one surface with interleaved GBRG ordering. Out of 32 bits, 20 bits used 12 bits No-op. */ + cudaEglColorFormatYVU444Planar = 65, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. */ + cudaEglColorFormatYVU422Planar = 66, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. */ + cudaEglColorFormatYVU420Planar = 67, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatBayerIspRGGB = 68, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved RGGB ordering and mapped to opaque integer datatype. */ + cudaEglColorFormatBayerIspBGGR = 69, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved BGGR ordering and mapped to opaque integer datatype. */ + cudaEglColorFormatBayerIspGRBG = 70, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GRBG ordering and mapped to opaque integer datatype. */ + cudaEglColorFormatBayerIspGBRG = 71, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GBRG ordering and mapped to opaque integer datatype. */ + cudaEglColorFormatBayerBCCR = 72, /**< Bayer format - one channel in one surface with interleaved BCCR ordering. */ + cudaEglColorFormatBayerRCCB = 73, /**< Bayer format - one channel in one surface with interleaved RCCB ordering. */ + cudaEglColorFormatBayerCRBC = 74, /**< Bayer format - one channel in one surface with interleaved CRBC ordering. */ + cudaEglColorFormatBayerCBRC = 75, /**< Bayer format - one channel in one surface with interleaved CBRC ordering. */ + cudaEglColorFormatBayer10CCCC = 76, /**< Bayer10 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 10 bits used 6 bits No-op. */ + cudaEglColorFormatBayer12BCCR = 77, /**< Bayer12 format - one channel in one surface with interleaved BCCR ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + cudaEglColorFormatBayer12RCCB = 78, /**< Bayer12 format - one channel in one surface with interleaved RCCB ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + cudaEglColorFormatBayer12CRBC = 79, /**< Bayer12 format - one channel in one surface with interleaved CRBC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + cudaEglColorFormatBayer12CBRC = 80, /**< Bayer12 format - one channel in one surface with interleaved CBRC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + cudaEglColorFormatBayer12CCCC = 81, /**< Bayer12 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + cudaEglColorFormatY = 82, /**< Color format for single Y plane. */ + cudaEglColorFormatYUV420SemiPlanar_2020 = 83, /**< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatYVU420SemiPlanar_2020 = 84, /**< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatYUV420Planar_2020 = 85, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatYVU420Planar_2020 = 86, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatYUV420SemiPlanar_709 = 87, /**< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatYVU420SemiPlanar_709 = 88, /**< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatYUV420Planar_709 = 89, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatYVU420Planar_709 = 90, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatY10V10U10_420SemiPlanar_709 = 91, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatY10V10U10_420SemiPlanar_2020 = 92, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatY10V10U10_422SemiPlanar_2020 = 93, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */ + cudaEglColorFormatY10V10U10_422SemiPlanar = 94, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */ + cudaEglColorFormatY10V10U10_422SemiPlanar_709 = 95, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */ + cudaEglColorFormatY_ER = 96, /**< Extended Range Color format for single Y plane. */ + cudaEglColorFormatY_709_ER = 97, /**< Extended Range Color format for single Y plane. */ + cudaEglColorFormatY10_ER = 98, /**< Extended Range Color format for single Y10 plane. */ + cudaEglColorFormatY10_709_ER = 99, /**< Extended Range Color format for single Y10 plane. */ + cudaEglColorFormatY12_ER = 100, /**< Extended Range Color format for single Y12 plane. */ + cudaEglColorFormatY12_709_ER = 101, /**< Extended Range Color format for single Y12 plane. */ + cudaEglColorFormatYUVA = 102, /**< Y, U, V, A four channels in one surface, interleaved as AVUY. */ + cudaEglColorFormatYVYU = 104, /**< Y, U, V in one surface, interleaved as YVYU in one channel. */ + cudaEglColorFormatVYUY = 105, /**< Y, U, V in one surface, interleaved as VYUY in one channel. */ + cudaEglColorFormatY10V10U10_420SemiPlanar_ER = 106, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatY10V10U10_420SemiPlanar_709_ER = 107, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatY10V10U10_444SemiPlanar_ER = 108, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */ + cudaEglColorFormatY10V10U10_444SemiPlanar_709_ER = 109, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */ + cudaEglColorFormatY12V12U12_420SemiPlanar_ER = 110, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatY12V12U12_420SemiPlanar_709_ER = 111, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatY12V12U12_444SemiPlanar_ER = 112, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */ + cudaEglColorFormatY12V12U12_444SemiPlanar_709_ER = 113, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */ +} cudaEglColorFormat; + +/** + * CUDA EGL Plane Descriptor - structure defining each plane of a CUDA EGLFrame + */ +typedef struct cudaEglPlaneDesc_st { + unsigned int width; /**< Width of plane */ + unsigned int height; /**< Height of plane */ + unsigned int depth; /**< Depth of plane */ + unsigned int pitch; /**< Pitch of plane */ + unsigned int numChannels; /**< Number of channels for the plane */ + struct cudaChannelFormatDesc channelDesc; /**< Channel Format Descriptor */ + unsigned int reserved[4]; /**< Reserved for future use */ +} cudaEglPlaneDesc; + +/** + * CUDA EGLFrame Descriptor - structure defining one frame of EGL. + * + * Each frame may contain one or more planes depending on whether the surface is Multiplanar or not. + * Each plane of EGLFrame is represented by ::cudaEglPlaneDesc which is defined as: + * \code + * typedef struct cudaEglPlaneDesc_st { + * unsigned int width; + * unsigned int height; + * unsigned int depth; + * unsigned int pitch; + * unsigned int numChannels; + * struct cudaChannelFormatDesc channelDesc; + * unsigned int reserved[4]; + * } cudaEglPlaneDesc; + * \endcode + +*/ +typedef struct cudaEglFrame_st { + union { + cudaArray_t pArray[CUDA_EGL_MAX_PLANES]; /**< Array of CUDA arrays corresponding to each plane*/ + struct cudaPitchedPtr pPitch[CUDA_EGL_MAX_PLANES]; /**< Array of Pointers corresponding to each plane*/ + } frame; + cudaEglPlaneDesc planeDesc[CUDA_EGL_MAX_PLANES]; /**< CUDA EGL Plane Descriptor ::cudaEglPlaneDesc*/ + unsigned int planeCount; /**< Number of planes */ + cudaEglFrameType frameType; /**< Array or Pitch */ + cudaEglColorFormat eglColorFormat; /**< CUDA EGL Color Format*/ +} cudaEglFrame; + +/** + * CUDA EGLSream Connection + */ +typedef struct CUeglStreamConnection_st *cudaEglStreamConnection; + +/** @} */ /* END CUDART_TYPES */ + +/** + * \addtogroup CUDART_EGL EGL Interoperability + * This section describes the EGL interoperability functions of the CUDA + * runtime application programming interface. + * + * @{ + */ + +/** + * \brief Registers an EGL image + * + * Registers the EGLImageKHR specified by \p image for access by + * CUDA. A handle to the registered object is returned as \p pCudaResource. + * Additional Mapping/Unmapping is not required for the registered resource and + * ::cudaGraphicsResourceGetMappedEglFrame can be directly called on the \p pCudaResource. + * + * The application will be responsible for synchronizing access to shared objects. + * The application must ensure that any pending operation which access the objects have completed + * before passing control to CUDA. This may be accomplished by issuing and waiting for + * glFinish command on all GLcontexts (for OpenGL and likewise for other APIs). + * The application will be also responsible for ensuring that any pending operation on the + * registered CUDA resource has completed prior to executing subsequent commands in other APIs + * accesing the same memory objects. + * This can be accomplished by calling cuCtxSynchronize or cuEventSynchronize (preferably). + * + * The surface's intended usage is specified using \p flags, as follows: + * + * - ::cudaGraphicsRegisterFlagsNone: Specifies no hints about how this + * resource will be used. It is therefore assumed that this resource will be + * read from and written to by CUDA. This is the default value. + * - ::cudaGraphicsRegisterFlagsReadOnly: Specifies that CUDA + * will not write to this resource. + * - ::cudaGraphicsRegisterFlagsWriteDiscard: Specifies that + * CUDA will not read from this resource and will write over the + * entire contents of the resource, so none of the data previously + * stored in the resource will be preserved. + * + * The EGLImageKHR is an object which can be used to create EGLImage target resource. It is defined as a void pointer. + * typedef void* EGLImageKHR + * + * \param pCudaResource - Pointer to the returned object handle + * \param image - An EGLImageKHR image which can be used to create target resource. + * \param flags - Map flags + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown + * + * \sa + * ::cudaGraphicsUnregisterResource, + * ::cudaGraphicsResourceGetMappedEglFrame, + * ::cuGraphicsEGLRegisterImage + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphicsEGLRegisterImage(struct cudaGraphicsResource **pCudaResource, EGLImageKHR image, unsigned int flags); + +/** + * \brief Connect CUDA to EGLStream as a consumer. + * + * Connect CUDA as a consumer to EGLStreamKHR specified by \p eglStream. + * + * The EGLStreamKHR is an EGL object that transfers a sequence of image frames from one + * API to another. + * + * \param conn - Pointer to the returned connection handle + * \param eglStream - EGLStreamKHR handle + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown + * + * \sa + * ::cudaEGLStreamConsumerDisconnect, + * ::cudaEGLStreamConsumerAcquireFrame, + * ::cudaEGLStreamConsumerReleaseFrame, + * ::cuEGLStreamConsumerConnect + */ +extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerConnect(cudaEglStreamConnection *conn, EGLStreamKHR eglStream); + +/** + * \brief Connect CUDA to EGLStream as a consumer with given flags. + * + * Connect CUDA as a consumer to EGLStreamKHR specified by \p stream with specified \p flags defined by + * ::cudaEglResourceLocationFlags. + * + * The flags specify whether the consumer wants to access frames from system memory or video memory. + * Default is ::cudaEglResourceLocationVidmem. + * + * \param conn - Pointer to the returned connection handle + * \param eglStream - EGLStreamKHR handle + * \param flags - Flags denote intended location - system or video. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown + * + * \sa + * ::cudaEGLStreamConsumerDisconnect, + * ::cudaEGLStreamConsumerAcquireFrame, + * ::cudaEGLStreamConsumerReleaseFrame, + * ::cuEGLStreamConsumerConnectWithFlags + */ +extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerConnectWithFlags(cudaEglStreamConnection *conn, EGLStreamKHR eglStream, unsigned int flags); + +/** + * \brief Disconnect CUDA as a consumer to EGLStream . + * + * Disconnect CUDA as a consumer to EGLStreamKHR. + * + * \param conn - Conection to disconnect. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown + * + * \sa + * ::cudaEGLStreamConsumerConnect, + * ::cudaEGLStreamConsumerAcquireFrame, + * ::cudaEGLStreamConsumerReleaseFrame, + * ::cuEGLStreamConsumerDisconnect + */ +extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerDisconnect(cudaEglStreamConnection *conn); + +/** + * \brief Acquire an image frame from the EGLStream with CUDA as a consumer. + * + * Acquire an image frame from EGLStreamKHR. + * ::cudaGraphicsResourceGetMappedEglFrame can be called on \p pCudaResource to get + * ::cudaEglFrame. + * + * \param conn - Connection on which to acquire + * \param pCudaResource - CUDA resource on which the EGLStream frame will be mapped for use. + * \param pStream - CUDA stream for synchronization and any data migrations + * implied by ::cudaEglResourceLocationFlags. + * \param timeout - Desired timeout in usec. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * ::cudaErrorLaunchTimeout + * + * \sa + * ::cudaEGLStreamConsumerConnect, + * ::cudaEGLStreamConsumerDisconnect, + * ::cudaEGLStreamConsumerReleaseFrame, + * ::cuEGLStreamConsumerAcquireFrame + */ + +extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerAcquireFrame(cudaEglStreamConnection *conn, + cudaGraphicsResource_t *pCudaResource, cudaStream_t *pStream, unsigned int timeout); +/** + * \brief Releases the last frame acquired from the EGLStream. + * + * Release the acquired image frame specified by \p pCudaResource to EGLStreamKHR. + * + * \param conn - Connection on which to release + * \param pCudaResource - CUDA resource whose corresponding frame is to be released + * \param pStream - CUDA stream on which release will be done. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown + * + * \sa + * ::cudaEGLStreamConsumerConnect, + * ::cudaEGLStreamConsumerDisconnect, + * ::cudaEGLStreamConsumerAcquireFrame, + * ::cuEGLStreamConsumerReleaseFrame + */ +extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerReleaseFrame(cudaEglStreamConnection *conn, + cudaGraphicsResource_t pCudaResource, cudaStream_t *pStream); + +/** + * \brief Connect CUDA to EGLStream as a producer. + * + * Connect CUDA as a producer to EGLStreamKHR specified by \p stream. + * + * The EGLStreamKHR is an EGL object that transfers a sequence of image frames from one + * API to another. + * + * \param conn - Pointer to the returned connection handle + * \param eglStream - EGLStreamKHR handle + * \param width - width of the image to be submitted to the stream + * \param height - height of the image to be submitted to the stream + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown + * + * \sa + * ::cudaEGLStreamProducerDisconnect, + * ::cudaEGLStreamProducerPresentFrame, + * ::cudaEGLStreamProducerReturnFrame, + * ::cuEGLStreamProducerConnect + */ +extern __host__ cudaError_t CUDARTAPI cudaEGLStreamProducerConnect(cudaEglStreamConnection *conn, + EGLStreamKHR eglStream, EGLint width, EGLint height); + +/** + * \brief Disconnect CUDA as a producer to EGLStream . + * + * Disconnect CUDA as a producer to EGLStreamKHR. + * + * \param conn - Conection to disconnect. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown + * + * \sa + * ::cudaEGLStreamProducerConnect, + * ::cudaEGLStreamProducerPresentFrame, + * ::cudaEGLStreamProducerReturnFrame, + * ::cuEGLStreamProducerDisconnect + */ +extern __host__ cudaError_t CUDARTAPI cudaEGLStreamProducerDisconnect(cudaEglStreamConnection *conn); + +/** + * \brief Present a CUDA eglFrame to the EGLStream with CUDA as a producer. + * + * The ::cudaEglFrame is defined as: + * \code + * typedef struct cudaEglFrame_st { + * union { + * cudaArray_t pArray[CUDA_EGL_MAX_PLANES]; + * struct cudaPitchedPtr pPitch[CUDA_EGL_MAX_PLANES]; + * } frame; + * cudaEglPlaneDesc planeDesc[CUDA_EGL_MAX_PLANES]; + * unsigned int planeCount; + * cudaEglFrameType frameType; + * cudaEglColorFormat eglColorFormat; + * } cudaEglFrame; + * \endcode + * + * For ::cudaEglFrame of type ::cudaEglFrameTypePitch, the application may present sub-region of a memory + * allocation. In that case, ::cudaPitchedPtr::ptr will specify the start address of the sub-region in + * the allocation and ::cudaEglPlaneDesc will specify the dimensions of the sub-region. + * + * \param conn - Connection on which to present the CUDA array + * \param eglframe - CUDA Eglstream Proucer Frame handle to be sent to the consumer over EglStream. + * \param pStream - CUDA stream on which to present the frame. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown + * + * \sa + * ::cudaEGLStreamProducerConnect, + * ::cudaEGLStreamProducerDisconnect, + * ::cudaEGLStreamProducerReturnFrame, + * ::cuEGLStreamProducerPresentFrame + */ +extern __host__ cudaError_t CUDARTAPI cudaEGLStreamProducerPresentFrame(cudaEglStreamConnection *conn, + cudaEglFrame eglframe, cudaStream_t *pStream); + +/** + * \brief Return the CUDA eglFrame to the EGLStream last released by the consumer. + * + * This API can potentially return cudaErrorLaunchTimeout if the consumer has not + * returned a frame to EGL stream. If timeout is returned the application can retry. + * + * \param conn - Connection on which to present the CUDA array + * \param eglframe - CUDA Eglstream Proucer Frame handle returned from the consumer over EglStream. + * \param pStream - CUDA stream on which to return the frame. + * + * \return + * ::cudaSuccess, + * ::cudaErrorLaunchTimeout, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown + * + * \sa + * ::cudaEGLStreamProducerConnect, + * ::cudaEGLStreamProducerDisconnect, + * ::cudaEGLStreamProducerPresentFrame, + * ::cuEGLStreamProducerReturnFrame + */ +extern __host__ cudaError_t CUDARTAPI cudaEGLStreamProducerReturnFrame(cudaEglStreamConnection *conn, + cudaEglFrame *eglframe, cudaStream_t *pStream); + +/** + * \brief Get an eglFrame through which to access a registered EGL graphics resource. + * + * Returns in \p *eglFrame an eglFrame pointer through which the registered graphics resource + * \p resource may be accessed. + * This API can only be called for EGL graphics resources. + * + * The ::cudaEglFrame is defined as + * \code + * typedef struct cudaEglFrame_st { + * union { + * cudaArray_t pArray[CUDA_EGL_MAX_PLANES]; + * struct cudaPitchedPtr pPitch[CUDA_EGL_MAX_PLANES]; + * } frame; + * cudaEglPlaneDesc planeDesc[CUDA_EGL_MAX_PLANES]; + * unsigned int planeCount; + * cudaEglFrameType frameType; + * cudaEglColorFormat eglColorFormat; + * } cudaEglFrame; + * \endcode + * + * + * \param eglFrame - Returned eglFrame. + * \param resource - Registered resource to access. + * \param index - Index for cubemap surfaces. + * \param mipLevel - Mipmap level for the subresource to access. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown + * + * \note Note that in case of multiplanar \p *eglFrame, pitch of only first plane (unsigned int cudaEglPlaneDesc::pitch) is to be considered by the application. + * + * \sa + * ::cudaGraphicsSubResourceGetMappedArray, + * ::cudaGraphicsResourceGetMappedPointer, + * ::cuGraphicsResourceGetMappedEglFrame + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphicsResourceGetMappedEglFrame(cudaEglFrame* eglFrame, + cudaGraphicsResource_t resource, unsigned int index, unsigned int mipLevel); + +/** + * \brief Creates an event from EGLSync object + * + * Creates an event *phEvent from an EGLSyncKHR eglSync with the flages specified + * via \p flags. Valid flags include: + * - ::cudaEventDefault: Default event creation flag. + * - ::cudaEventBlockingSync: Specifies that the created event should use blocking + * synchronization. A CPU thread that uses ::cudaEventSynchronize() to wait on + * an event created with this flag will block until the event has actually + * been completed. + * + * ::cudaEventRecord and TimingData are not supported for events created from EGLSync. + * + * The EGLSyncKHR is an opaque handle to an EGL sync object. + * typedef void* EGLSyncKHR + * + * \param phEvent - Returns newly created event + * \param eglSync - Opaque handle to EGLSync object + * \param flags - Event creation flags + * + * \return + * ::cudaSuccess, + * ::cudaErrorInitializationError, + * ::cudaErrorInvalidValue, + * ::cudaErrorLaunchFailure, + * ::cudaErrorMemoryAllocation + * + * \sa + * ::cudaEventQuery, + * ::cudaEventSynchronize, + * ::cudaEventDestroy + */ +extern __host__ cudaError_t CUDARTAPI cudaEventCreateFromEGLSync(cudaEvent_t *phEvent, EGLSyncKHR eglSync, unsigned int flags); + +/** @} */ /* END CUDART_EGL */ + +#if defined(__cplusplus) +} +#endif /* __cplusplus */ + +#endif /* __CUDA_EGL_INTEROP_H__ */ + diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp16.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp16.h new file mode 100644 index 0000000000000000000000000000000000000000..01981ed8108a18ad872f65d8914cab7638af596f --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp16.h @@ -0,0 +1,4023 @@ +/* +* Copyright 1993-2021 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO LICENSEE: +* +* This source code and/or documentation ("Licensed Deliverables") are +* subject to NVIDIA intellectual property rights under U.S. and +* international Copyright laws. +* +* These Licensed Deliverables contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and +* conditions of a form of NVIDIA software license agreement by and +* between NVIDIA and Licensee ("License Agreement") or electronically +* accepted by Licensee. Notwithstanding any terms or conditions to +* the contrary in the License Agreement, reproduction or disclosure +* of the Licensed Deliverables to any third party without the express +* written consent of NVIDIA is prohibited. +* +* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE +* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE +* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS +* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. +* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED +* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, +* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE +* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY +* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY +* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +* OF THESE LICENSED DELIVERABLES. +* +* U.S. Government End Users. These Licensed Deliverables are a +* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT +* 1995), consisting of "commercial computer software" and "commercial +* computer software documentation" as such terms are used in 48 +* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government +* only as a commercial end item. Consistent with 48 C.F.R.12.212 and +* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all +* U.S. Government End Users acquire the Licensed Deliverables with +* only those rights set forth herein. +* +* Any use of the Licensed Deliverables in individual and commercial +* software must include, in the user documentation and internal +* comments to the code, the above Disclaimer and U.S. Government End +* Users Notice. +*/ + +/** +* \defgroup CUDA_MATH_INTRINSIC_HALF Half Precision Intrinsics +* This section describes half precision intrinsic functions that are +* only supported in device code. +* To use these functions, include the header file \p cuda_fp16.h in your program. +* The following macros are available to help users selectively enable/disable +* various definitions present in the header file: +* - \p CUDA_NO_HALF - If defined, this macro will prevent the definition of +* additional type aliases in the global namespace, helping to avoid potential +* conflicts with symbols defined in the user program. +* - \p __CUDA_NO_HALF_CONVERSIONS__ - If defined, this macro will prevent the +* use of the C++ type conversions (converting constructors and conversion +* operators) that are common for built-in floating-point types, but may be +* undesirable for \p half which is essentially a user-defined type. +* - \p __CUDA_NO_HALF_OPERATORS__ and \p __CUDA_NO_HALF2_OPERATORS__ - If +* defined, these macros will prevent the inadvertent use of usual arithmetic +* and comparison operators. This enforces the storage-only type semantics and +* prevents C++ style computations on \p half and \p half2 types. +*/ + +/** +* \defgroup CUDA_MATH__HALF_ARITHMETIC Half Arithmetic Functions +* \ingroup CUDA_MATH_INTRINSIC_HALF +* To use these functions, include the header file \p cuda_fp16.h in your program. +*/ + +/** +* \defgroup CUDA_MATH__HALF2_ARITHMETIC Half2 Arithmetic Functions +* \ingroup CUDA_MATH_INTRINSIC_HALF +* To use these functions, include the header file \p cuda_fp16.h in your program. +*/ + +/** +* \defgroup CUDA_MATH__HALF_COMPARISON Half Comparison Functions +* \ingroup CUDA_MATH_INTRINSIC_HALF +* To use these functions, include the header file \p cuda_fp16.h in your program. +*/ + +/** +* \defgroup CUDA_MATH__HALF2_COMPARISON Half2 Comparison Functions +* \ingroup CUDA_MATH_INTRINSIC_HALF +* To use these functions, include the header file \p cuda_fp16.h in your program. +*/ + +/** +* \defgroup CUDA_MATH__HALF_MISC Half Precision Conversion and Data Movement +* \ingroup CUDA_MATH_INTRINSIC_HALF +* To use these functions, include the header file \p cuda_fp16.h in your program. +*/ + +/** +* \defgroup CUDA_MATH__HALF_FUNCTIONS Half Math Functions +* \ingroup CUDA_MATH_INTRINSIC_HALF +* To use these functions, include the header file \p cuda_fp16.h in your program. +*/ + +/** +* \defgroup CUDA_MATH__HALF2_FUNCTIONS Half2 Math Functions +* \ingroup CUDA_MATH_INTRINSIC_HALF +* To use these functions, include the header file \p cuda_fp16.h in your program. +*/ + +#ifndef __CUDA_FP16_H__ +#define __CUDA_FP16_H__ + +#define ___CUDA_FP16_STRINGIFY_INNERMOST(x) #x +#define __CUDA_FP16_STRINGIFY(x) ___CUDA_FP16_STRINGIFY_INNERMOST(x) + +#if defined(__cplusplus) +#if defined(__CUDACC__) +#define __CUDA_FP16_DECL__ static __device__ __inline__ +#define __CUDA_HOSTDEVICE_FP16_DECL__ static __host__ __device__ __inline__ +#else +#define __CUDA_HOSTDEVICE_FP16_DECL__ static +#endif /* defined(__CUDACC__) */ + +#define __CUDA_FP16_TYPES_EXIST__ + +/* Forward-declaration of structures defined in "cuda_fp16.hpp" */ + +/** + * \brief half datatype + * + * \details This structure implements the datatype for storing + * half-precision floating-point numbers. The structure implements + * assignment operators and type conversions. + * 16 bits are being used in total: 1 sign bit, 5 bits for the exponent, + * and the significand is being stored in 10 bits. + * The total precision is 11 bits. There are 15361 representable + * numbers within the interval [0.0, 1.0], endpoints included. + * On average we have log10(2**11) ~ 3.311 decimal digits. + * + * \internal + * \req IEEE 754-2008 compliant implementation of half-precision + * floating-point numbers. + * \endinternal + */ +struct __half; + +/** + * \brief half2 datatype + * + * \details This structure implements the datatype for storing two + * half-precision floating-point numbers. + * The structure implements assignment operators and type conversions. + * + * \internal + * \req Vectorified version of half. + * \endinternal + */ +struct __half2; + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts double number to half precision in round-to-nearest-even mode +* and returns \p half with converted value. +* +* \details Converts double number \p a to half precision in round-to-nearest-even mode. +* \param[in] a - double. Is only being read. +* \returns half +* - \p a converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __double2half(const double a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts float number to half precision in round-to-nearest-even mode +* and returns \p half with converted value. +* +* \details Converts float number \p a to half precision in round-to-nearest-even mode. +* \param[in] a - float. Is only being read. +* \returns half +* - \p a converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half(const float a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts float number to half precision in round-to-nearest-even mode +* and returns \p half with converted value. +* +* \details Converts float number \p a to half precision in round-to-nearest-even mode. +* \param[in] a - float. Is only being read. +* \returns half +* - \p a converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rn(const float a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts float number to half precision in round-towards-zero mode +* and returns \p half with converted value. +* +* \details Converts float number \p a to half precision in round-towards-zero mode. +* \param[in] a - float. Is only being read. +* \returns half +* - \p a converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rz(const float a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts float number to half precision in round-down mode +* and returns \p half with converted value. +* +* \details Converts float number \p a to half precision in round-down mode. +* \param[in] a - float. Is only being read. +* +* \returns half +* - \p a converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rd(const float a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts float number to half precision in round-up mode +* and returns \p half with converted value. +* +* \details Converts float number \p a to half precision in round-up mode. +* \param[in] a - float. Is only being read. +* +* \returns half +* - \p a converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_ru(const float a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts \p half number to float. +* +* \details Converts half number \p a to float. +* \param[in] a - float. Is only being read. +* +* \returns float +* - \p a converted to float. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ float __half2float(const __half a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts input to half precision in round-to-nearest-even mode and +* populates both halves of \p half2 with converted value. +* +* \details Converts input \p a to half precision in round-to-nearest-even mode and +* populates both halves of \p half2 with converted value. +* \param[in] a - float. Is only being read. +* +* \returns half2 +* - The \p half2 value with both halves equal to the converted half +* precision number. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __float2half2_rn(const float a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts both input floats to half precision in round-to-nearest-even +* mode and returns \p half2 with converted values. +* +* \details Converts both input floats to half precision in round-to-nearest-even mode +* and combines the results into one \p half2 number. Low 16 bits of the return +* value correspond to the input \p a, high 16 bits correspond to the input \p +* b. +* \param[in] a - float. Is only being read. +* \param[in] b - float. Is only being read. +* +* \returns half2 +* - The \p half2 value with corresponding halves equal to the +* converted input floats. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __floats2half2_rn(const float a, const float b); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts low 16 bits of \p half2 to float and returns the result +* +* \details Converts low 16 bits of \p half2 input \p a to 32-bit floating-point number +* and returns the result. +* \param[in] a - half2. Is only being read. +* +* \returns float +* - The low 16 bits of \p a converted to float. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ float __low2float(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts high 16 bits of \p half2 to float and returns the result +* +* \details Converts high 16 bits of \p half2 input \p a to 32-bit floating-point number +* and returns the result. +* \param[in] a - half2. Is only being read. +* +* \returns float +* - The high 16 bits of \p a converted to float. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ float __high2float(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed short integer in round-towards-zero mode. +* +* \details Convert the half-precision floating-point value \p h to a signed short +* integer in round-towards-zero mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns short int +* - \p h converted to a signed short integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ short int __half2short_rz(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned short integer in round-towards-zero +* mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned short +* integer in round-towards-zero mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns unsigned short int +* - \p h converted to an unsigned short integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ unsigned short int __half2ushort_rz(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed integer in round-towards-zero mode. +* +* \details Convert the half-precision floating-point value \p h to a signed integer in +* round-towards-zero mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns int +* - \p h converted to a signed integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ int __half2int_rz(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned integer in round-towards-zero mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned integer +* in round-towards-zero mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns unsigned int +* - \p h converted to an unsigned integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ unsigned int __half2uint_rz(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed 64-bit integer in round-towards-zero mode. +* +* \details Convert the half-precision floating-point value \p h to a signed 64-bit +* integer in round-towards-zero mode. NaN inputs return a long long int with hex value of 0x8000000000000000. +* \param[in] h - half. Is only being read. +* +* \returns long long int +* - \p h converted to a signed 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ long long int __half2ll_rz(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned 64-bit integer in round-towards-zero +* mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned 64-bit +* integer in round-towards-zero mode. NaN inputs return 0x8000000000000000. +* \param[in] h - half. Is only being read. +* +* \returns unsigned long long int +* - \p h converted to an unsigned 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ unsigned long long int __half2ull_rz(const __half h); + +#if defined(__CUDACC__) +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts both components of float2 number to half precision in +* round-to-nearest-even mode and returns \p half2 with converted values. +* +* \details Converts both components of float2 to half precision in round-to-nearest +* mode and combines the results into one \p half2 number. Low 16 bits of the +* return value correspond to \p a.x and high 16 bits of the return value +* correspond to \p a.y. +* \param[in] a - float2. Is only being read. +* +* \returns half2 +* - The \p half2 which has corresponding halves equal to the +* converted float2 components. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __float22half2_rn(const float2 a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts both halves of \p half2 to float2 and returns the result. +* +* \details Converts both halves of \p half2 input \p a to float2 and returns the +* result. +* \param[in] a - half2. Is only being read. +* +* \returns float2 +* - \p a converted to float2. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ float2 __half22float2(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed integer in round-to-nearest-even mode. +* +* \details Convert the half-precision floating-point value \p h to a signed integer in +* round-to-nearest-even mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns int +* - \p h converted to a signed integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ int __half2int_rn(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed integer in round-down mode. +* +* \details Convert the half-precision floating-point value \p h to a signed integer in +* round-down mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns int +* - \p h converted to a signed integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ int __half2int_rd(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed integer in round-up mode. +* +* \details Convert the half-precision floating-point value \p h to a signed integer in +* round-up mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns int +* - \p h converted to a signed integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ int __half2int_ru(const __half h); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed integer to a half in round-to-nearest-even mode. +* +* \details Convert the signed integer value \p i to a half-precision floating-point +* value in round-to-nearest-even mode. +* \param[in] i - int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __int2half_rn(const int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed integer to a half in round-towards-zero mode. +* +* \details Convert the signed integer value \p i to a half-precision floating-point +* value in round-towards-zero mode. +* \param[in] i - int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __int2half_rz(const int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed integer to a half in round-down mode. +* +* \details Convert the signed integer value \p i to a half-precision floating-point +* value in round-down mode. +* \param[in] i - int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __int2half_rd(const int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed integer to a half in round-up mode. +* +* \details Convert the signed integer value \p i to a half-precision floating-point +* value in round-up mode. +* \param[in] i - int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __int2half_ru(const int i); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed short integer in round-to-nearest-even +* mode. +* +* \details Convert the half-precision floating-point value \p h to a signed short +* integer in round-to-nearest-even mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns short int +* - \p h converted to a signed short integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ short int __half2short_rn(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed short integer in round-down mode. +* +* \details Convert the half-precision floating-point value \p h to a signed short +* integer in round-down mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns short int +* - \p h converted to a signed short integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ short int __half2short_rd(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed short integer in round-up mode. +* +* \details Convert the half-precision floating-point value \p h to a signed short +* integer in round-up mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns short int +* - \p h converted to a signed short integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ short int __half2short_ru(const __half h); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed short integer to a half in round-to-nearest-even +* mode. +* +* \details Convert the signed short integer value \p i to a half-precision floating-point +* value in round-to-nearest-even mode. +* \param[in] i - short int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __short2half_rn(const short int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed short integer to a half in round-towards-zero mode. +* +* \details Convert the signed short integer value \p i to a half-precision floating-point +* value in round-towards-zero mode. +* \param[in] i - short int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __short2half_rz(const short int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed short integer to a half in round-down mode. +* +* \details Convert the signed short integer value \p i to a half-precision floating-point +* value in round-down mode. +* \param[in] i - short int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __short2half_rd(const short int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed short integer to a half in round-up mode. +* +* \details Convert the signed short integer value \p i to a half-precision floating-point +* value in round-up mode. +* \param[in] i - short int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __short2half_ru(const short int i); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned integer in round-to-nearest-even mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned integer +* in round-to-nearest-even mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns unsigned int +* - \p h converted to an unsigned integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned int __half2uint_rn(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned integer in round-down mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned integer +* in round-down mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns unsigned int +* - \p h converted to an unsigned integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned int __half2uint_rd(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned integer in round-up mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned integer +* in round-up mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns unsigned int +* - \p h converted to an unsigned integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned int __half2uint_ru(const __half h); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned integer to a half in round-to-nearest-even mode. +* +* \details Convert the unsigned integer value \p i to a half-precision floating-point +* value in round-to-nearest-even mode. +* \param[in] i - unsigned int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __uint2half_rn(const unsigned int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned integer to a half in round-towards-zero mode. +* +* \details Convert the unsigned integer value \p i to a half-precision floating-point +* value in round-towards-zero mode. +* \param[in] i - unsigned int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __uint2half_rz(const unsigned int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned integer to a half in round-down mode. +* +* \details Convert the unsigned integer value \p i to a half-precision floating-point +* value in round-down mode. +* \param[in] i - unsigned int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __uint2half_rd(const unsigned int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned integer to a half in round-up mode. +* +* \details Convert the unsigned integer value \p i to a half-precision floating-point +* value in round-up mode. +* \param[in] i - unsigned int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __uint2half_ru(const unsigned int i); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned short integer in round-to-nearest-even +* mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned short +* integer in round-to-nearest-even mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns unsigned short int +* - \p h converted to an unsigned short integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned short int __half2ushort_rn(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned short integer in round-down mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned short +* integer in round-down mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns unsigned short int +* - \p h converted to an unsigned short integer. +*/ +__CUDA_FP16_DECL__ unsigned short int __half2ushort_rd(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned short integer in round-up mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned short +* integer in round-up mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns unsigned short int +* - \p h converted to an unsigned short integer. +*/ +__CUDA_FP16_DECL__ unsigned short int __half2ushort_ru(const __half h); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned short integer to a half in round-to-nearest-even +* mode. +* +* \details Convert the unsigned short integer value \p i to a half-precision floating-point +* value in round-to-nearest-even mode. +* \param[in] i - unsigned short int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __ushort2half_rn(const unsigned short int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned short integer to a half in round-towards-zero +* mode. +* +* \details Convert the unsigned short integer value \p i to a half-precision floating-point +* value in round-towards-zero mode. +* \param[in] i - unsigned short int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ushort2half_rz(const unsigned short int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned short integer to a half in round-down mode. +* +* \details Convert the unsigned short integer value \p i to a half-precision floating-point +* value in round-down mode. +* \param[in] i - unsigned short int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ushort2half_rd(const unsigned short int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned short integer to a half in round-up mode. +* +* \details Convert the unsigned short integer value \p i to a half-precision floating-point +* value in round-up mode. +* \param[in] i - unsigned short int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ushort2half_ru(const unsigned short int i); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned 64-bit integer in round-to-nearest-even +* mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned 64-bit +* integer in round-to-nearest-even mode. NaN inputs return 0x8000000000000000. +* \param[in] h - half. Is only being read. +* +* \returns unsigned long long int +* - \p h converted to an unsigned 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned long long int __half2ull_rn(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned 64-bit integer in round-down mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned 64-bit +* integer in round-down mode. NaN inputs return 0x8000000000000000. +* \param[in] h - half. Is only being read. +* +* \returns unsigned long long int +* - \p h converted to an unsigned 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned long long int __half2ull_rd(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned 64-bit integer in round-up mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned 64-bit +* integer in round-up mode. NaN inputs return 0x8000000000000000. +* \param[in] h - half. Is only being read. +* +* \returns unsigned long long int +* - \p h converted to an unsigned 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned long long int __half2ull_ru(const __half h); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned 64-bit integer to a half in round-to-nearest-even +* mode. +* +* \details Convert the unsigned 64-bit integer value \p i to a half-precision floating-point +* value in round-to-nearest-even mode. +* \param[in] i - unsigned long long int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __ull2half_rn(const unsigned long long int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned 64-bit integer to a half in round-towards-zero +* mode. +* +* \details Convert the unsigned 64-bit integer value \p i to a half-precision floating-point +* value in round-towards-zero mode. +* \param[in] i - unsigned long long int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ull2half_rz(const unsigned long long int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned 64-bit integer to a half in round-down mode. +* +* \details Convert the unsigned 64-bit integer value \p i to a half-precision floating-point +* value in round-down mode. +* \param[in] i - unsigned long long int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ull2half_rd(const unsigned long long int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned 64-bit integer to a half in round-up mode. +* +* \details Convert the unsigned 64-bit integer value \p i to a half-precision floating-point +* value in round-up mode. +* \param[in] i - unsigned long long int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ull2half_ru(const unsigned long long int i); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed 64-bit integer in round-to-nearest-even +* mode. +* +* \details Convert the half-precision floating-point value \p h to a signed 64-bit +* integer in round-to-nearest-even mode. NaN inputs return a long long int with hex value of 0x8000000000000000. +* \param[in] h - half. Is only being read. +* +* \returns long long int +* - \p h converted to a signed 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ long long int __half2ll_rn(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed 64-bit integer in round-down mode. +* +* \details Convert the half-precision floating-point value \p h to a signed 64-bit +* integer in round-down mode. NaN inputs return a long long int with hex value of 0x8000000000000000. +* \param[in] h - half. Is only being read. +* +* \returns long long int +* - \p h converted to a signed 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ long long int __half2ll_rd(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed 64-bit integer in round-up mode. +* +* \details Convert the half-precision floating-point value \p h to a signed 64-bit +* integer in round-up mode. NaN inputs return a long long int with hex value of 0x8000000000000000. +* \param[in] h - half. Is only being read. +* +* \returns long long int +* - \p h converted to a signed 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ long long int __half2ll_ru(const __half h); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed 64-bit integer to a half in round-to-nearest-even +* mode. +* +* \details Convert the signed 64-bit integer value \p i to a half-precision floating-point +* value in round-to-nearest-even mode. +* \param[in] i - long long int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __ll2half_rn(const long long int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed 64-bit integer to a half in round-towards-zero mode. +* +* \details Convert the signed 64-bit integer value \p i to a half-precision floating-point +* value in round-towards-zero mode. +* \param[in] i - long long int. Is only being read. +* +* \returns half +* - \p i converted to half. +*/ +__CUDA_FP16_DECL__ __half __ll2half_rz(const long long int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed 64-bit integer to a half in round-down mode. +* +* \details Convert the signed 64-bit integer value \p i to a half-precision floating-point +* value in round-down mode. +* \param[in] i - long long int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ll2half_rd(const long long int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed 64-bit integer to a half in round-up mode. +* +* \details Convert the signed 64-bit integer value \p i to a half-precision floating-point +* value in round-up mode. +* \param[in] i - long long int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ll2half_ru(const long long int i); + +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Truncate input argument to the integral part. +* +* \details Round \p h to the nearest integer value that does not exceed \p h in +* magnitude. +* \param[in] h - half. Is only being read. +* +* \returns half +* - The truncated integer value. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half htrunc(const __half h); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculate ceiling of the input argument. +* +* \details Compute the smallest integer value not less than \p h. +* \param[in] h - half. Is only being read. +* +* \returns half +* - The smallest integer value not less than \p h. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hceil(const __half h); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculate the largest integer less than or equal to \p h. +* +* \details Calculate the largest integer value which is less than or equal to \p h. +* \param[in] h - half. Is only being read. +* +* \returns half +* - The largest integer value which is less than or equal to \p h. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hfloor(const __half h); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Round input to nearest integer value in half-precision floating-point +* number. +* +* \details Round \p h to the nearest integer value in half-precision floating-point +* format, with halfway cases rounded to the nearest even integer value. +* \param[in] h - half. Is only being read. +* +* \returns half +* - The nearest integer to \p h. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hrint(const __half h); + +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Truncate \p half2 vector input argument to the integral part. +* +* \details Round each component of vector \p h to the nearest integer value that does +* not exceed \p h in magnitude. +* \param[in] h - half2. Is only being read. +* +* \returns half2 +* - The truncated \p h. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2trunc(const __half2 h); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculate \p half2 vector ceiling of the input argument. +* +* \details For each component of vector \p h compute the smallest integer value not less +* than \p h. +* \param[in] h - half2. Is only being read. +* +* \returns half2 +* - The vector of smallest integers not less than \p h. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2ceil(const __half2 h); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculate the largest integer less than or equal to \p h. +* +* \details For each component of vector \p h calculate the largest integer value which +* is less than or equal to \p h. +* \param[in] h - half2. Is only being read. +* +* \returns half2 +* - The vector of largest integers which is less than or equal to \p h. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2floor(const __half2 h); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Round input to nearest integer value in half-precision floating-point +* number. +* +* \details Round each component of \p half2 vector \p h to the nearest integer value in +* half-precision floating-point format, with halfway cases rounded to the +* nearest even integer value. +* \param[in] h - half2. Is only being read. +* +* \returns half2 +* - The vector of rounded integer values. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2rint(const __half2 h); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Returns \p half2 with both halves equal to the input value. +* +* \details Returns \p half2 number with both halves equal to the input \p a \p half +* number. +* \param[in] a - half. Is only being read. +* +* \returns half2 +* - The vector which has both its halves equal to the input \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __half2half2(const __half a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Swaps both halves of the \p half2 input. +* +* \details Swaps both halves of the \p half2 input and returns a new \p half2 number +* with swapped halves. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - \p a with its halves being swapped. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __lowhigh2highlow(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Extracts low 16 bits from each of the two \p half2 inputs and combines +* into one \p half2 number. +* +* \details Extracts low 16 bits from each of the two \p half2 inputs and combines into +* one \p half2 number. Low 16 bits from input \p a is stored in low 16 bits of +* the return value, low 16 bits from input \p b is stored in high 16 bits of +* the return value. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The low 16 bits of \p a and of \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __lows2half2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Extracts high 16 bits from each of the two \p half2 inputs and +* combines into one \p half2 number. +* +* \details Extracts high 16 bits from each of the two \p half2 inputs and combines into +* one \p half2 number. High 16 bits from input \p a is stored in low 16 bits of +* the return value, high 16 bits from input \p b is stored in high 16 bits of +* the return value. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The high 16 bits of \p a and of \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __highs2half2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Returns high 16 bits of \p half2 input. +* +* \details Returns high 16 bits of \p half2 input \p a. +* \param[in] a - half2. Is only being read. +* +* \returns half +* - The high 16 bits of the input. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __high2half(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Returns low 16 bits of \p half2 input. +* +* \details Returns low 16 bits of \p half2 input \p a. +* \param[in] a - half2. Is only being read. +* +* \returns half +* - Returns \p half which contains low 16 bits of the input \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __low2half(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Checks if the input \p half number is infinite. +* +* \details Checks if the input \p half number \p a is infinite. +* \param[in] a - half. Is only being read. +* +* \returns int +* - -1 iff \p a is equal to negative infinity, +* - 1 iff \p a is equal to positive infinity, +* - 0 otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ int __hisinf(const __half a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Combines two \p half numbers into one \p half2 number. +* +* \details Combines two input \p half number \p a and \p b into one \p half2 number. +* Input \p a is stored in low 16 bits of the return value, input \p b is stored +* in high 16 bits of the return value. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half2 +* - The half2 with one half equal to \p a and the other to \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __halves2half2(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Extracts low 16 bits from \p half2 input. +* +* \details Extracts low 16 bits from \p half2 input \p a and returns a new \p half2 +* number which has both halves equal to the extracted bits. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The half2 with both halves equal to the low 16 bits of the input. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __low2half2(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Extracts high 16 bits from \p half2 input. +* +* \details Extracts high 16 bits from \p half2 input \p a and returns a new \p half2 +* number which has both halves equal to the extracted bits. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The half2 with both halves equal to the high 16 bits of the input. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __high2half2(const __half2 a); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Reinterprets bits in a \p half as a signed short integer. +* +* \details Reinterprets the bits in the half-precision floating-point number \p h +* as a signed short integer. +* \param[in] h - half. Is only being read. +* +* \returns short int +* - The reinterpreted value. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ short int __half_as_short(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Reinterprets bits in a \p half as an unsigned short integer. +* +* \details Reinterprets the bits in the half-precision floating-point \p h +* as an unsigned short number. +* \param[in] h - half. Is only being read. +* +* \returns unsigned short int +* - The reinterpreted value. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned short int __half_as_ushort(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Reinterprets bits in a signed short integer as a \p half. +* +* \details Reinterprets the bits in the signed short integer \p i as a +* half-precision floating-point number. +* \param[in] i - short int. Is only being read. +* +* \returns half +* - The reinterpreted value. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __short_as_half(const short int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Reinterprets bits in an unsigned short integer as a \p half. +* +* \details Reinterprets the bits in the unsigned short integer \p i as a +* half-precision floating-point number. +* \param[in] i - unsigned short int. Is only being read. +* +* \returns half +* - The reinterpreted value. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ushort_as_half(const unsigned short int i); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Calculates \p half maximum of two input values. +* +* \details Calculates \p half max(\p a, \p b) +* defined as (\p a > \p b) ? \p a : \p b. +* - If either of inputs is NaN, the other input is returned. +* - If both inputs are NaNs, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hmax(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Calculates \p half minimum of two input values. +* +* \details Calculates \p half min(\p a, \p b) +* defined as (\p a < \p b) ? \p a : \p b. +* - If either of inputs is NaN, the other input is returned. +* - If both inputs are NaNs, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hmin(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Calculates \p half2 vector maximum of two inputs. +* +* \details Calculates \p half2 vector max(\p a, \p b). +* Elementwise \p half operation is defined as +* (\p a > \p b) ? \p a : \p b. +* - If either of inputs is NaN, the other input is returned. +* - If both inputs are NaNs, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The result of elementwise maximum of vectors \p a and \p b +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hmax2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Calculates \p half2 vector minimum of two inputs. +* +* \details Calculates \p half2 vector min(\p a, \p b). +* Elementwise \p half operation is defined as +* (\p a < \p b) ? \p a : \p b. +* - If either of inputs is NaN, the other input is returned. +* - If both inputs are NaNs, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The result of elementwise minimum of vectors \p a and \p b +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hmin2(const __half2 a, const __half2 b); + +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 300) +#if !defined warpSize && !defined __local_warpSize +#define warpSize 32 +#define __local_warpSize +#endif + +#if defined(_WIN32) +# define __DEPRECATED__(msg) __declspec(deprecated(msg)) +#elif (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 5 && !defined(__clang__)))) +# define __DEPRECATED__(msg) __attribute__((deprecated)) +#else +# define __DEPRECATED__(msg) __attribute__((deprecated(msg))) +#endif + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 +#define __WSB_DEPRECATION_MESSAGE(x) __CUDA_FP16_STRINGIFY(x) "() is deprecated in favor of " __CUDA_FP16_STRINGIFY(x) "_sync() and may be removed in a future release (Use -Wno-deprecated-declarations to suppress this warning)." + +__CUDA_FP16_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) __half2 __shfl(const __half2 var, const int delta, const int width = warpSize); +__CUDA_FP16_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) __half2 __shfl_up(const __half2 var, const unsigned int delta, const int width = warpSize); +__CUDA_FP16_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down))__half2 __shfl_down(const __half2 var, const unsigned int delta, const int width = warpSize); +__CUDA_FP16_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) __half2 __shfl_xor(const __half2 var, const int delta, const int width = warpSize); +__CUDA_FP16_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) __half __shfl(const __half var, const int delta, const int width = warpSize); +__CUDA_FP16_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) __half __shfl_up(const __half var, const unsigned int delta, const int width = warpSize); +__CUDA_FP16_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) __half __shfl_down(const __half var, const unsigned int delta, const int width = warpSize); +__CUDA_FP16_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) __half __shfl_xor(const __half var, const int delta, const int width = warpSize); +#endif + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Exchange a variable between threads within a warp. Direct copy from indexed thread. +* +* \details Returns the value of var held by the thread whose ID is given by delta. +* If width is less than warpSize then each subsection of the warp behaves as a separate +* entity with a starting logical thread ID of 0. If delta is outside the range [0:width-1], +* the value returned corresponds to the value of var held by the delta modulo width (i.e. +* within the same subsection). width must have a value which is a power of 2; +* results are undefined if width is not a power of 2, or is a number greater than +* warpSize. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - half2. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 4-byte word referenced by var from the source thread ID as half2. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __shfl_sync(const unsigned mask, const __half2 var, const int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Exchange a variable between threads within a warp. Copy from a thread with lower ID relative to the caller. +* +* \details Calculates a source thread ID by subtracting delta from the caller's lane ID. +* The value of var held by the resulting lane ID is returned: in effect, var is shifted up +* the warp by delta threads. If width is less than warpSize then each subsection of the warp +* behaves as a separate entity with a starting logical thread ID of 0. The source thread index +* will not wrap around the value of width, so effectively the lower delta threads will be unchanged. +* width must have a value which is a power of 2; results are undefined if width is not a power of 2, +* or is a number greater than warpSize. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - half2. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 4-byte word referenced by var from the source thread ID as half2. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \note_ref_guide_warp_shuffle +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __shfl_up_sync(const unsigned mask, const __half2 var, const unsigned int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Exchange a variable between threads within a warp. Copy from a thread with higher ID relative to the caller. +* +* \details Calculates a source thread ID by adding delta to the caller's thread ID. +* The value of var held by the resulting thread ID is returned: this has the effect +* of shifting var down the warp by delta threads. If width is less than warpSize then +* each subsection of the warp behaves as a separate entity with a starting logical +* thread ID of 0. As for __shfl_up_sync(), the ID number of the source thread +* will not wrap around the value of width and so the upper delta threads +* will remain unchanged. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - half2. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 4-byte word referenced by var from the source thread ID as half2. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \note_ref_guide_warp_shuffle +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __shfl_down_sync(const unsigned mask, const __half2 var, const unsigned int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Exchange a variable between threads within a warp. Copy from a thread based on bitwise XOR of own thread ID. +* +* \details Calculates a source thread ID by performing a bitwise XOR of the caller's thread ID with mask: +* the value of var held by the resulting thread ID is returned. If width is less than warpSize then each +* group of width consecutive threads are able to access elements from earlier groups of threads, +* however if they attempt to access elements from later groups of threads their own value of var +* will be returned. This mode implements a butterfly addressing pattern such as is used in tree +* reduction and broadcast. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - half2. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 4-byte word referenced by var from the source thread ID as half2. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \note_ref_guide_warp_shuffle +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __shfl_xor_sync(const unsigned mask, const __half2 var, const int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Exchange a variable between threads within a warp. Direct copy from indexed thread. +* +* \details Returns the value of var held by the thread whose ID is given by delta. +* If width is less than warpSize then each subsection of the warp behaves as a separate +* entity with a starting logical thread ID of 0. If delta is outside the range [0:width-1], +* the value returned corresponds to the value of var held by the delta modulo width (i.e. +* within the same subsection). width must have a value which is a power of 2; +* results are undefined if width is not a power of 2, or is a number greater than +* warpSize. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - half. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 2-byte word referenced by var from the source thread ID as half. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \note_ref_guide_warp_shuffle +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __shfl_sync(const unsigned mask, const __half var, const int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Exchange a variable between threads within a warp. Copy from a thread with lower ID relative to the caller. +* \details Calculates a source thread ID by subtracting delta from the caller's lane ID. +* The value of var held by the resulting lane ID is returned: in effect, var is shifted up +* the warp by delta threads. If width is less than warpSize then each subsection of the warp +* behaves as a separate entity with a starting logical thread ID of 0. The source thread index +* will not wrap around the value of width, so effectively the lower delta threads will be unchanged. +* width must have a value which is a power of 2; results are undefined if width is not a power of 2, +* or is a number greater than warpSize. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - half. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 2-byte word referenced by var from the source thread ID as half. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \note_ref_guide_warp_shuffle +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __shfl_up_sync(const unsigned mask, const __half var, const unsigned int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Exchange a variable between threads within a warp. Copy from a thread with higher ID relative to the caller. +* +* \details Calculates a source thread ID by adding delta to the caller's thread ID. +* The value of var held by the resulting thread ID is returned: this has the effect +* of shifting var down the warp by delta threads. If width is less than warpSize then +* each subsection of the warp behaves as a separate entity with a starting logical +* thread ID of 0. As for __shfl_up_sync(), the ID number of the source thread +* will not wrap around the value of width and so the upper delta threads +* will remain unchanged. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - half. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 2-byte word referenced by var from the source thread ID as half. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \note_ref_guide_warp_shuffle +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __shfl_down_sync(const unsigned mask, const __half var, const unsigned int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Exchange a variable between threads within a warp. Copy from a thread based on bitwise XOR of own thread ID. +* +* \details Calculates a source thread ID by performing a bitwise XOR of the caller's thread ID with mask: +* the value of var held by the resulting thread ID is returned. If width is less than warpSize then each +* group of width consecutive threads are able to access elements from earlier groups of threads, +* however if they attempt to access elements from later groups of threads their own value of var +* will be returned. This mode implements a butterfly addressing pattern such as is used in tree +* reduction and broadcast. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - half. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 2-byte word referenced by var from the source thread ID as half. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \note_ref_guide_warp_shuffle +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __shfl_xor_sync(const unsigned mask, const __half var, const int delta, const int width = warpSize); + +#if defined(__local_warpSize) +#undef warpSize +#undef __local_warpSize +#endif +#endif /*!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 300) */ + +#if defined(__cplusplus) && ( !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 320) ) +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.nc` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half2 __ldg(const __half2 *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.nc` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half __ldg(const __half *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.cg` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half2 __ldcg(const __half2 *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.cg` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half __ldcg(const __half *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.ca` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half2 __ldca(const __half2 *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.ca` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half __ldca(const __half *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.cs` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half2 __ldcs(const __half2 *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.cs` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half __ldcs(const __half *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.lu` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half2 __ldlu(const __half2 *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.lu` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half __ldlu(const __half *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.cv` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half2 __ldcv(const __half2 *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.cv` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half __ldcv(const __half *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `st.global.wb` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_FP16_DECL__ void __stwb(__half2 *const ptr, const __half2 value); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `st.global.wb` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_FP16_DECL__ void __stwb(__half *const ptr, const __half value); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `st.global.cg` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_FP16_DECL__ void __stcg(__half2 *const ptr, const __half2 value); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `st.global.cg` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_FP16_DECL__ void __stcg(__half *const ptr, const __half value); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `st.global.cs` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_FP16_DECL__ void __stcs(__half2 *const ptr, const __half2 value); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `st.global.cs` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_FP16_DECL__ void __stcs(__half *const ptr, const __half value); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `st.global.wt` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_FP16_DECL__ void __stwt(__half2 *const ptr, const __half2 value); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `st.global.wt` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_FP16_DECL__ void __stwt(__half *const ptr, const __half value); +#endif /*defined(__cplusplus) && ( !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 320) )*/ + +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs half2 vector if-equal comparison. +* +* \details Performs \p half2 vector if-equal comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The vector result of if-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __heq2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector not-equal comparison. +* +* \details Performs \p half2 vector not-equal comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The vector result of not-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hne2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector less-equal comparison. +* +* \details Performs \p half2 vector less-equal comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The \p half2 result of less-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hle2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector greater-equal comparison. +* +* \details Performs \p half2 vector greater-equal comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The vector result of greater-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hge2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector less-than comparison. +* +* \details Performs \p half2 vector less-than comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The half2 vector result of less-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hlt2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector greater-than comparison. +* +* \details Performs \p half2 vector greater-than comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The vector result of greater-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hgt2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered if-equal comparison. +* +* \details Performs \p half2 vector if-equal comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The vector result of unordered if-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hequ2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered not-equal comparison. +* +* \details Performs \p half2 vector not-equal comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The vector result of unordered not-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hneu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered less-equal comparison. +* +* Performs \p half2 vector less-equal comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The vector result of unordered less-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hleu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered greater-equal comparison. +* +* \details Performs \p half2 vector greater-equal comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The \p half2 vector result of unordered greater-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hgeu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered less-than comparison. +* +* \details Performs \p half2 vector less-than comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The vector result of unordered less-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hltu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered greater-than comparison. +* +* \details Performs \p half2 vector greater-than comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The \p half2 vector result of unordered greater-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hgtu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs half2 vector if-equal comparison. +* +* \details Performs \p half2 vector if-equal comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns unsigned int +* - The vector mask result of if-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned __heq2_mask(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector not-equal comparison. +* +* \details Performs \p half2 vector not-equal comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns unsigned int +* - The vector mask result of not-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned __hne2_mask(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector less-equal comparison. +* +* \details Performs \p half2 vector less-equal comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns unsigned int +* - The vector mask result of less-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned __hle2_mask(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector greater-equal comparison. +* +* \details Performs \p half2 vector greater-equal comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns unsigned int +* - The vector mask result of greater-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned __hge2_mask(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector less-than comparison. +* +* \details Performs \p half2 vector less-than comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns unsigned int +* - The vector mask result of less-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned __hlt2_mask(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector greater-than comparison. +* +* \details Performs \p half2 vector greater-than comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns unsigned int +* - The vector mask result of greater-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned __hgt2_mask(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered if-equal comparison. +* +* \details Performs \p half2 vector if-equal comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns unsigned int +* - The vector mask result of unordered if-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned __hequ2_mask(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered not-equal comparison. +* +* \details Performs \p half2 vector not-equal comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns unsigned int +* - The vector mask result of unordered not-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned __hneu2_mask(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered less-equal comparison. +* +* Performs \p half2 vector less-equal comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns unsigned int +* - The vector mask result of unordered less-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned __hleu2_mask(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered greater-equal comparison. +* +* \details Performs \p half2 vector greater-equal comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns unsigned int +* - The vector mask result of unordered greater-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned __hgeu2_mask(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered less-than comparison. +* +* \details Performs \p half2 vector less-than comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns unsigned int +* - The vector mask result of unordered less-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned __hltu2_mask(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered greater-than comparison. +* +* \details Performs \p half2 vector greater-than comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns unsigned int +* - The vector mask result of unordered greater-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned __hgtu2_mask(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Determine whether \p half2 argument is a NaN. +* +* \details Determine whether each half of input \p half2 number \p a is a NaN. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The half2 with the corresponding \p half results set to +* 1.0 for NaN, 0.0 otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hisnan2(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector addition in round-to-nearest-even mode. +* +* \details Performs \p half2 vector add of inputs \p a and \p b, in round-to-nearest +* mode. +* \internal +* \req DEEPLEARN-SRM_REQ-95 +* \endinternal +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The sum of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hadd2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector subtraction in round-to-nearest-even mode. +* +* \details Subtracts \p half2 input vector \p b from input vector \p a in +* round-to-nearest-even mode. +* \internal +* \req DEEPLEARN-SRM_REQ-104 +* \endinternal +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The subtraction of vector \p b from \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hsub2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector multiplication in round-to-nearest-even mode. +* +* \details Performs \p half2 vector multiplication of inputs \p a and \p b, in +* round-to-nearest-even mode. +* \internal +* \req DEEPLEARN-SRM_REQ-102 +* \endinternal +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The result of elementwise multiplying the vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hmul2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector addition in round-to-nearest-even mode. +* +* \details Performs \p half2 vector add of inputs \p a and \p b, in round-to-nearest +* mode. Prevents floating-point contractions of mul+add into fma. +* \internal +* \req DEEPLEARN-SRM_REQ-95 +* \endinternal +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The sum of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hadd2_rn(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector subtraction in round-to-nearest-even mode. +* +* \details Subtracts \p half2 input vector \p b from input vector \p a in +* round-to-nearest-even mode. Prevents floating-point contractions of mul+sub +* into fma. +* \internal +* \req DEEPLEARN-SRM_REQ-104 +* \endinternal +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The subtraction of vector \p b from \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hsub2_rn(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector multiplication in round-to-nearest-even mode. +* +* \details Performs \p half2 vector multiplication of inputs \p a and \p b, in +* round-to-nearest-even mode. Prevents floating-point contractions of +* mul+add or sub into fma. +* \internal +* \req DEEPLEARN-SRM_REQ-102 +* \endinternal +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The result of elementwise multiplying the vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hmul2_rn(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector division in round-to-nearest-even mode. +* +* \details Divides \p half2 input vector \p a by input vector \p b in round-to-nearest +* mode. +* \internal +* \req DEEPLEARN-SRM_REQ-103 +* \endinternal +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The elementwise division of \p a with \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __h2div(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Calculates the absolute value of both halves of the input \p half2 number and +* returns the result. +* +* \details Calculates the absolute value of both halves of the input \p half2 number and +* returns the result. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - Returns \p a with the absolute value of both halves. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __habs2(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector addition in round-to-nearest-even mode, with +* saturation to [0.0, 1.0]. +* +* \details Performs \p half2 vector add of inputs \p a and \p b, in round-to-nearest +* mode, and clamps the results to range [0.0, 1.0]. NaN results are flushed to +* +0.0. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The sum of \p a and \p b, with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hadd2_sat(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector subtraction in round-to-nearest-even mode, +* with saturation to [0.0, 1.0]. +* +* \details Subtracts \p half2 input vector \p b from input vector \p a in +* round-to-nearest-even mode, and clamps the results to range [0.0, 1.0]. NaN +* results are flushed to +0.0. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The subtraction of vector \p b from \p a, with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hsub2_sat(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector multiplication in round-to-nearest-even mode, +* with saturation to [0.0, 1.0]. +* +* \details Performs \p half2 vector multiplication of inputs \p a and \p b, in +* round-to-nearest-even mode, and clamps the results to range [0.0, 1.0]. NaN +* results are flushed to +0.0. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The result of elementwise multiplication of vectors \p a and \p b, +* with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hmul2_sat(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector fused multiply-add in round-to-nearest-even +* mode. +* +* \details Performs \p half2 vector multiply on inputs \p a and \p b, +* then performs a \p half2 vector add of the result with \p c, +* rounding the result once in round-to-nearest-even mode. +* \internal +* \req DEEPLEARN-SRM_REQ-105 +* \endinternal +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* \param[in] c - half2. Is only being read. +* +* \returns half2 +* - The result of elementwise fused multiply-add operation on vectors \p a, \p b, and \p c. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hfma2(const __half2 a, const __half2 b, const __half2 c); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector fused multiply-add in round-to-nearest-even +* mode, with saturation to [0.0, 1.0]. +* +* \details Performs \p half2 vector multiply on inputs \p a and \p b, +* then performs a \p half2 vector add of the result with \p c, +* rounding the result once in round-to-nearest-even mode, and clamps the +* results to range [0.0, 1.0]. NaN results are flushed to +0.0. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* \param[in] c - half2. Is only being read. +* +* \returns half2 +* - The result of elementwise fused multiply-add operation on vectors \p a, \p b, and \p c, +* with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hfma2_sat(const __half2 a, const __half2 b, const __half2 c); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Negates both halves of the input \p half2 number and returns the +* result. +* +* \details Negates both halves of the input \p half2 number \p a and returns the result. +* \internal +* \req DEEPLEARN-SRM_REQ-101 +* \endinternal +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - Returns \p a with both halves negated. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hneg2(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Calculates the absolute value of input \p half number and returns the result. +* +* \details Calculates the absolute value of input \p half number and returns the result. +* \param[in] a - half. Is only being read. +* +* \returns half +* - The absolute value of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __habs(const __half a); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half addition in round-to-nearest-even mode. +* +* \details Performs \p half addition of inputs \p a and \p b, in round-to-nearest-even +* mode. +* \internal +* \req DEEPLEARN-SRM_REQ-94 +* \endinternal +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* - The sum of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hadd(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half subtraction in round-to-nearest-even mode. +* +* \details Subtracts \p half input \p b from input \p a in round-to-nearest +* mode. +* \internal +* \req DEEPLEARN-SRM_REQ-97 +* \endinternal +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* - The result of subtracting \p b from \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hsub(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half multiplication in round-to-nearest-even mode. +* +* \details Performs \p half multiplication of inputs \p a and \p b, in round-to-nearest +* mode. +* \internal +* \req DEEPLEARN-SRM_REQ-99 +* \endinternal +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* - The result of multiplying \p a and \p b. +*/ +__CUDA_FP16_DECL__ __half __hmul(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half addition in round-to-nearest-even mode. +* +* \details Performs \p half addition of inputs \p a and \p b, in round-to-nearest-even +* mode. Prevents floating-point contractions of mul+add into fma. +* \internal +* \req DEEPLEARN-SRM_REQ-94 +* \endinternal +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* - The sum of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hadd_rn(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half subtraction in round-to-nearest-even mode. +* +* \details Subtracts \p half input \p b from input \p a in round-to-nearest +* mode. Prevents floating-point contractions of mul+sub into fma. +* \internal +* \req DEEPLEARN-SRM_REQ-97 +* \endinternal +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* - The result of subtracting \p b from \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hsub_rn(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half multiplication in round-to-nearest-even mode. +* +* \details Performs \p half multiplication of inputs \p a and \p b, in round-to-nearest +* mode. Prevents floating-point contractions of mul+add or sub into fma. +* \internal +* \req DEEPLEARN-SRM_REQ-99 +* \endinternal +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* - The result of multiplying \p a and \p b. +*/ +__CUDA_FP16_DECL__ __half __hmul_rn(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half division in round-to-nearest-even mode. +* +* \details Divides \p half input \p a by input \p b in round-to-nearest +* mode. +* \internal +* \req DEEPLEARN-SRM_REQ-98 +* \endinternal +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* - The result of dividing \p a by \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hdiv(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half addition in round-to-nearest-even mode, with +* saturation to [0.0, 1.0]. +* +* \details Performs \p half add of inputs \p a and \p b, in round-to-nearest-even mode, +* and clamps the result to range [0.0, 1.0]. NaN results are flushed to +0.0. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* - The sum of \p a and \p b, with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hadd_sat(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half subtraction in round-to-nearest-even mode, with +* saturation to [0.0, 1.0]. +* +* \details Subtracts \p half input \p b from input \p a in round-to-nearest +* mode, +* and clamps the result to range [0.0, 1.0]. NaN results are flushed to +0.0. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* - The result of subtraction of \p b from \p a, with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hsub_sat(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half multiplication in round-to-nearest-even mode, with +* saturation to [0.0, 1.0]. +* +* \details Performs \p half multiplication of inputs \p a and \p b, in round-to-nearest +* mode, and clamps the result to range [0.0, 1.0]. NaN results are flushed to +* +0.0. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* - The result of multiplying \p a and \p b, with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hmul_sat(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half fused multiply-add in round-to-nearest-even mode. +* +* \details Performs \p half multiply on inputs \p a and \p b, +* then performs a \p half add of the result with \p c, +* rounding the result once in round-to-nearest-even mode. +* \internal +* \req DEEPLEARN-SRM_REQ-96 +* \endinternal +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* \param[in] c - half. Is only being read. +* +* \returns half +* - The result of fused multiply-add operation on \p +* a, \p b, and \p c. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hfma(const __half a, const __half b, const __half c); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half fused multiply-add in round-to-nearest-even mode, +* with saturation to [0.0, 1.0]. +* +* \details Performs \p half multiply on inputs \p a and \p b, +* then performs a \p half add of the result with \p c, +* rounding the result once in round-to-nearest-even mode, and clamps the result +* to range [0.0, 1.0]. NaN results are flushed to +0.0. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* \param[in] c - half. Is only being read. +* +* \returns half +* - The result of fused multiply-add operation on \p +* a, \p b, and \p c, with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hfma_sat(const __half a, const __half b, const __half c); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Negates input \p half number and returns the result. +* +* \details Negates input \p half number and returns the result. +* \internal +* \req DEEPLEARN-SRM_REQ-100 +* \endinternal +* \param[in] a - half. Is only being read. +* +* \returns half +* - minus a +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hneg(const __half a); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector if-equal comparison and returns boolean true +* iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector if-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half if-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* - true if both \p half results of if-equal comparison +* of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbeq2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector not-equal comparison and returns boolean +* true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector not-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half not-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* - true if both \p half results of not-equal comparison +* of vectors \p a and \p b are true, +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbne2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector less-equal comparison and returns boolean +* true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector less-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half less-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* - true if both \p half results of less-equal comparison +* of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hble2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector greater-equal comparison and returns boolean +* true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector greater-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half greater-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* - true if both \p half results of greater-equal +* comparison of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbge2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector less-than comparison and returns boolean +* true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector less-than comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half less-than comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* - true if both \p half results of less-than comparison +* of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hblt2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector greater-than comparison and returns boolean +* true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector greater-than comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half greater-than comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* - true if both \p half results of greater-than +* comparison of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbgt2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered if-equal comparison and returns +* boolean true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector if-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half if-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* - true if both \p half results of unordered if-equal +* comparison of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbequ2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered not-equal comparison and returns +* boolean true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector not-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half not-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* - true if both \p half results of unordered not-equal +* comparison of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbneu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered less-equal comparison and returns +* boolean true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector less-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half less-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* - true if both \p half results of unordered less-equal +* comparison of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbleu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered greater-equal comparison and +* returns boolean true iff both \p half results are true, boolean false +* otherwise. +* +* \details Performs \p half2 vector greater-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half greater-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* - true if both \p half results of unordered +* greater-equal comparison of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbgeu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered less-than comparison and returns +* boolean true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector less-than comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half less-than comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* - true if both \p half results of unordered less-than comparison of +* vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbltu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered greater-than comparison and +* returns boolean true iff both \p half results are true, boolean false +* otherwise. +* +* \details Performs \p half2 vector greater-than comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half greater-than comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* - true if both \p half results of unordered +* greater-than comparison of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbgtu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half if-equal comparison. +* +* \details Performs \p half if-equal comparison of inputs \p a and \p b. +* NaN inputs generate false results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* - The boolean result of if-equal comparison of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __heq(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half not-equal comparison. +* +* \details Performs \p half not-equal comparison of inputs \p a and \p b. +* NaN inputs generate false results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* - The boolean result of not-equal comparison of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hne(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half less-equal comparison. +* +* \details Performs \p half less-equal comparison of inputs \p a and \p b. +* NaN inputs generate false results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* - The boolean result of less-equal comparison of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hle(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half greater-equal comparison. +* +* \details Performs \p half greater-equal comparison of inputs \p a and \p b. +* NaN inputs generate false results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* - The boolean result of greater-equal comparison of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hge(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half less-than comparison. +* +* \details Performs \p half less-than comparison of inputs \p a and \p b. +* NaN inputs generate false results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* - The boolean result of less-than comparison of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hlt(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half greater-than comparison. +* +* \details Performs \p half greater-than comparison of inputs \p a and \p b. +* NaN inputs generate false results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* - The boolean result of greater-than comparison of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hgt(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half unordered if-equal comparison. +* +* \details Performs \p half if-equal comparison of inputs \p a and \p b. +* NaN inputs generate true results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* - The boolean result of unordered if-equal comparison of \p a and +* \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hequ(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half unordered not-equal comparison. +* +* \details Performs \p half not-equal comparison of inputs \p a and \p b. +* NaN inputs generate true results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* - The boolean result of unordered not-equal comparison of \p a and +* \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hneu(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half unordered less-equal comparison. +* +* \details Performs \p half less-equal comparison of inputs \p a and \p b. +* NaN inputs generate true results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* - The boolean result of unordered less-equal comparison of \p a and +* \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hleu(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half unordered greater-equal comparison. +* +* \details Performs \p half greater-equal comparison of inputs \p a and \p b. +* NaN inputs generate true results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* - The boolean result of unordered greater-equal comparison of \p a +* and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hgeu(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half unordered less-than comparison. +* +* \details Performs \p half less-than comparison of inputs \p a and \p b. +* NaN inputs generate true results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* - The boolean result of unordered less-than comparison of \p a and +* \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hltu(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half unordered greater-than comparison. +* +* \details Performs \p half greater-than comparison of inputs \p a and \p b. +* NaN inputs generate true results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* - The boolean result of unordered greater-than comparison of \p a +* and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hgtu(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Determine whether \p half argument is a NaN. +* +* \details Determine whether \p half value \p a is a NaN. +* \param[in] a - half. Is only being read. +* +* \returns bool +* - true iff argument is NaN. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hisnan(const __half a); +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800) +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Calculates \p half maximum of two input values, NaNs pass through. +* +* \details Calculates \p half max(\p a, \p b) +* defined as (\p a > \p b) ? \p a : \p b. +* - If either of inputs is NaN, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hmax_nan(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Calculates \p half minimum of two input values, NaNs pass through. +* +* \details Calculates \p half min(\p a, \p b) +* defined as (\p a < \p b) ? \p a : \p b. +* - If either of inputs is NaN, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hmin_nan(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half fused multiply-add in round-to-nearest-even mode with relu saturation. +* +* \details Performs \p half multiply on inputs \p a and \p b, +* then performs a \p half add of the result with \p c, +* rounding the result once in round-to-nearest-even mode. +* Then negative result is clamped to 0. +* NaN result is converted to canonical NaN. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* \param[in] c - half. Is only being read. +* +* \returns half +* - The result of fused multiply-add operation on \p +* a, \p b, and \p c with relu saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hfma_relu(const __half a, const __half b, const __half c); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Calculates \p half2 vector maximum of two inputs, NaNs pass through. +* +* \details Calculates \p half2 vector max(\p a, \p b). +* Elementwise \p half operation is defined as +* (\p a > \p b) ? \p a : \p b. +* - If either of inputs is NaN, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The result of elementwise maximum of vectors \p a and \p b, with NaNs pass through +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hmax2_nan(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Calculates \p half2 vector minimum of two inputs, NaNs pass through. +* +* \details Calculates \p half2 vector min(\p a, \p b). +* Elementwise \p half operation is defined as +* (\p a < \p b) ? \p a : \p b. +* - If either of inputs is NaN, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The result of elementwise minimum of vectors \p a and \p b, with NaNs pass through +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hmin2_nan(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector fused multiply-add in round-to-nearest-even +* mode with relu saturation. +* +* \details Performs \p half2 vector multiply on inputs \p a and \p b, +* then performs a \p half2 vector add of the result with \p c, +* rounding the result once in round-to-nearest-even mode. +* Then negative result is clamped to 0. +* NaN result is converted to canonical NaN. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* \param[in] c - half2. Is only being read. +* +* \returns half2 +* - The result of elementwise fused multiply-add operation on vectors \p a, \p b, and \p c with relu saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hfma2_relu(const __half2 a, const __half2 b, const __half2 c); +#endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800) */ +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs fast complex multiply-accumulate +* +* \details Interprets vector \p half2 input pairs \p a, \p b, and \p c as +* complex numbers in \p half precision and performs +* complex multiply-accumulate operation: a*b + c +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* \param[in] c - half2. Is only being read. +* +* \returns half2 +* - The result of complex multiply-accumulate operation on complex numbers \p a, \p b, and \p c +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hcmadd(const __half2 a, const __half2 b, const __half2 c); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half square root in round-to-nearest-even mode. +* +* \details Calculates \p half square root of input \p a in round-to-nearest-even mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* - The square root of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hsqrt(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half reciprocal square root in round-to-nearest-even +* mode. +* +* \details Calculates \p half reciprocal square root of input \p a in round-to-nearest +* mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* - The reciprocal square root of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hrsqrt(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half reciprocal in round-to-nearest-even mode. +* +* \details Calculates \p half reciprocal of input \p a in round-to-nearest-even mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* - The reciprocal of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hrcp(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half natural logarithm in round-to-nearest-even mode. +* +* \details Calculates \p half natural logarithm of input \p a in round-to-nearest-even +* mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* - The natural logarithm of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hlog(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half binary logarithm in round-to-nearest-even mode. +* +* \details Calculates \p half binary logarithm of input \p a in round-to-nearest-even +* mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* - The binary logarithm of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hlog2(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half decimal logarithm in round-to-nearest-even mode. +* +* \details Calculates \p half decimal logarithm of input \p a in round-to-nearest-even +* mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* - The decimal logarithm of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hlog10(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half natural exponential function in round-to-nearest +* mode. +* +* \details Calculates \p half natural exponential function of input \p a in +* round-to-nearest-even mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* - The natural exponential function on \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hexp(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half binary exponential function in round-to-nearest +* mode. +* +* \details Calculates \p half binary exponential function of input \p a in +* round-to-nearest-even mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* - The binary exponential function on \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hexp2(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half decimal exponential function in round-to-nearest +* mode. +* +* \details Calculates \p half decimal exponential function of input \p a in +* round-to-nearest-even mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* - The decimal exponential function on \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hexp10(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half cosine in round-to-nearest-even mode. +* +* \details Calculates \p half cosine of input \p a in round-to-nearest-even mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* - The cosine of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hcos(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half sine in round-to-nearest-even mode. +* +* \details Calculates \p half sine of input \p a in round-to-nearest-even mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* - The sine of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hsin(const __half a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector square root in round-to-nearest-even mode. +* +* \details Calculates \p half2 square root of input vector \p a in round-to-nearest +* mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The elementwise square root on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2sqrt(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector reciprocal square root in round-to-nearest +* mode. +* +* \details Calculates \p half2 reciprocal square root of input vector \p a in +* round-to-nearest-even mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The elementwise reciprocal square root on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2rsqrt(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector reciprocal in round-to-nearest-even mode. +* +* \details Calculates \p half2 reciprocal of input vector \p a in round-to-nearest-even +* mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The elementwise reciprocal on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2rcp(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector natural logarithm in round-to-nearest-even +* mode. +* +* \details Calculates \p half2 natural logarithm of input vector \p a in +* round-to-nearest-even mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The elementwise natural logarithm on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2log(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector binary logarithm in round-to-nearest-even +* mode. +* +* \details Calculates \p half2 binary logarithm of input vector \p a in round-to-nearest +* mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The elementwise binary logarithm on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2log2(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector decimal logarithm in round-to-nearest-even +* mode. +* +* \details Calculates \p half2 decimal logarithm of input vector \p a in +* round-to-nearest-even mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The elementwise decimal logarithm on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2log10(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector exponential function in round-to-nearest +* mode. +* +* \details Calculates \p half2 exponential function of input vector \p a in +* round-to-nearest-even mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The elementwise exponential function on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2exp(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector binary exponential function in +* round-to-nearest-even mode. +* +* \details Calculates \p half2 binary exponential function of input vector \p a in +* round-to-nearest-even mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The elementwise binary exponential function on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2exp2(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector decimal exponential function in +* round-to-nearest-even mode. +* +* \details Calculates \p half2 decimal exponential function of input vector \p a in +* round-to-nearest-even mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The elementwise decimal exponential function on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2exp10(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector cosine in round-to-nearest-even mode. +* +* \details Calculates \p half2 cosine of input vector \p a in round-to-nearest-even +* mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The elementwise cosine on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2cos(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector sine in round-to-nearest-even mode. +* +* \details Calculates \p half2 sine of input vector \p a in round-to-nearest-even mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The elementwise sine on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2sin(const __half2 a); + +#endif /*if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)*/ + +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 600) + +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Vector add \p val to the value stored at \p address in global or shared memory, and writes this +* value back to \p address. The atomicity of the add operation is guaranteed separately for each of the +* two __half elements; the entire __half2 is not guaranteed to be atomic as a single 32-bit access. +* +* \details The location of \p address must be in global or shared memory. This operation has undefined +* behavior otherwise. This operation is only supported by devices of compute capability 6.x and higher. +* +* \param[in] address - half2*. An address in global or shared memory. +* \param[in] val - half2. The value to be added. +* +* \returns half2 +* - The old value read from \p address. +* +* \note_ref_guide_atomic +*/ +__CUDA_FP16_DECL__ __half2 atomicAdd(__half2 *const address, const __half2 val); + +#endif /*if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 600)*/ + +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) + +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Adds \p val to the value stored at \p address in global or shared memory, and writes this value +* back to \p address. This operation is performed in one atomic operation. +* +* \details The location of \p address must be in global or shared memory. This operation has undefined +* behavior otherwise. This operation is only supported by devices of compute capability 7.x and higher. +* +* \param[in] address - half*. An address in global or shared memory. +* \param[in] val - half. The value to be added. +* +* \returns half +* - The old value read from \p address. +* +* \note_ref_guide_atomic +*/ +__CUDA_FP16_DECL__ __half atomicAdd(__half *const address, const __half val); + +#endif /*if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700)*/ + +#endif /* defined(__CUDACC__) */ + +#undef __CUDA_FP16_DECL__ +#undef __CUDA_HOSTDEVICE_FP16_DECL__ + +#endif /* defined(__cplusplus) */ + +/* Note the .hpp file is included even for host-side compilation, to capture the "half" & "half2" definitions */ +#include "cuda_fp16.hpp" +#undef ___CUDA_FP16_STRINGIFY_INNERMOST +#undef __CUDA_FP16_STRINGIFY + +#endif /* end of include guard: __CUDA_FP16_H__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp16.hpp b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp16.hpp new file mode 100644 index 0000000000000000000000000000000000000000..22fb0c025e6086938f308a51090a239237a50c9c --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp16.hpp @@ -0,0 +1,2738 @@ +/* +* Copyright 1993-2022 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO LICENSEE: +* +* This source code and/or documentation ("Licensed Deliverables") are +* subject to NVIDIA intellectual property rights under U.S. and +* international Copyright laws. +* +* These Licensed Deliverables contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and +* conditions of a form of NVIDIA software license agreement by and +* between NVIDIA and Licensee ("License Agreement") or electronically +* accepted by Licensee. Notwithstanding any terms or conditions to +* the contrary in the License Agreement, reproduction or disclosure +* of the Licensed Deliverables to any third party without the express +* written consent of NVIDIA is prohibited. +* +* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE +* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE +* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS +* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. +* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED +* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, +* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE +* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY +* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY +* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +* OF THESE LICENSED DELIVERABLES. +* +* U.S. Government End Users. These Licensed Deliverables are a +* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT +* 1995), consisting of "commercial computer software" and "commercial +* computer software documentation" as such terms are used in 48 +* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government +* only as a commercial end item. Consistent with 48 C.F.R.12.212 and +* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all +* U.S. Government End Users acquire the Licensed Deliverables with +* only those rights set forth herein. +* +* Any use of the Licensed Deliverables in individual and commercial +* software must include, in the user documentation and internal +* comments to the code, the above Disclaimer and U.S. Government End +* Users Notice. +*/ + +#if !defined(__CUDA_FP16_HPP__) +#define __CUDA_FP16_HPP__ + +#if !defined(__CUDA_FP16_H__) +#error "Do not include this file directly. Instead, include cuda_fp16.h." +#endif + +#if !defined(_MSC_VER) && __cplusplus >= 201103L +# define __CPP_VERSION_AT_LEAST_11_FP16 +#elif _MSC_FULL_VER >= 190024210 && _MSVC_LANG >= 201103L +# define __CPP_VERSION_AT_LEAST_11_FP16 +#endif + +// implicitly provided by NVRTC +#if !defined(__CUDACC_RTC__) +#include +#endif /* !defined(__CUDACC_RTC__) */ + + +#if !defined(IF_DEVICE_OR_CUDACC) +#if defined(__CUDACC__) + #define IF_DEVICE_OR_CUDACC(d, c, f) NV_IF_ELSE_TARGET(NV_IS_DEVICE, d, c) +#else + #define IF_DEVICE_OR_CUDACC(d, c, f) NV_IF_ELSE_TARGET(NV_IS_DEVICE, d, f) +#endif +#endif +/* C++11 header for std::move. + * In RTC mode, std::move is provided implicitly; don't include the header + */ +#if defined(__CPP_VERSION_AT_LEAST_11_FP16) && !defined(__CUDACC_RTC__) +#include +#endif /* __cplusplus >= 201103L && !defined(__CUDACC_RTC__) */ + +/* C++ header for std::memcpy (used for type punning in host-side implementations). + * When compiling as a CUDA source file memcpy is provided implicitly. + * !defined(__CUDACC__) implies !defined(__CUDACC_RTC__). + */ +#if defined(__cplusplus) && !defined(__CUDACC__) +#include +#endif /* defined(__cplusplus) && !defined(__CUDACC__) */ + + +/* Set up function decorations */ +#if defined(__CUDACC__) || defined(_NVHPC_CUDA) +#define __CUDA_FP16_DECL__ static __device__ __inline__ +#define __CUDA_HOSTDEVICE_FP16_DECL__ static __host__ __device__ __inline__ +#define __VECTOR_FUNCTIONS_DECL__ static __inline__ __host__ __device__ +#define __CUDA_HOSTDEVICE__ __host__ __device__ +#else /* !defined(__CUDACC__) */ +#if defined(__GNUC__) +#define __CUDA_HOSTDEVICE_FP16_DECL__ static __attribute__ ((unused)) +#else +#define __CUDA_HOSTDEVICE_FP16_DECL__ static +#endif /* defined(__GNUC__) */ +#define __CUDA_HOSTDEVICE__ +#endif /* defined(__CUDACC_) */ + +/* Set up structure-alignment attribute */ +#if defined(__CUDACC__) +#define __CUDA_ALIGN__(align) __align__(align) +#else +/* Define alignment macro based on compiler type (cannot assume C11 "_Alignas" is available) */ +#if __cplusplus >= 201103L +#define __CUDA_ALIGN__(n) alignas(n) /* C++11 kindly gives us a keyword for this */ +#else /* !defined(__CPP_VERSION_AT_LEAST_11_FP16)*/ +#if defined(__GNUC__) +#define __CUDA_ALIGN__(n) __attribute__ ((aligned(n))) +#elif defined(_MSC_VER) +#define __CUDA_ALIGN__(n) __declspec(align(n)) +#else +#define __CUDA_ALIGN__(n) +#endif /* defined(__GNUC__) */ +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP16) */ +#endif /* defined(__CUDACC__) */ + +/* Macros to allow half & half2 to be used by inline assembly */ +#define __HALF_TO_US(var) *(reinterpret_cast(&(var))) +#define __HALF_TO_CUS(var) *(reinterpret_cast(&(var))) +#define __HALF2_TO_UI(var) *(reinterpret_cast(&(var))) +#define __HALF2_TO_CUI(var) *(reinterpret_cast(&(var))) + +/* Macros for half & half2 binary arithmetic */ +#define __BINARY_OP_HALF_MACRO(name) /* do */ {\ + __half val; \ + asm( "{" __CUDA_FP16_STRINGIFY(name) ".f16 %0,%1,%2;\n}" \ + :"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)),"h"(__HALF_TO_CUS(b))); \ + return val; \ +} /* while(0) */ +#define __BINARY_OP_HALF2_MACRO(name) /* do */ {\ + __half2 val; \ + asm( "{" __CUDA_FP16_STRINGIFY(name) ".f16x2 %0,%1,%2;\n}" \ + :"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b))); \ + return val; \ +} /* while(0) */ +#define __TERNARY_OP_HALF_MACRO(name) /* do */ {\ + __half val; \ + asm( "{" __CUDA_FP16_STRINGIFY(name) ".f16 %0,%1,%2,%3;\n}" \ + :"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)),"h"(__HALF_TO_CUS(b)),"h"(__HALF_TO_CUS(c))); \ + return val; \ +} /* while(0) */ +#define __TERNARY_OP_HALF2_MACRO(name) /* do */ {\ + __half2 val; \ + asm( "{" __CUDA_FP16_STRINGIFY(name) ".f16x2 %0,%1,%2,%3;\n}" \ + :"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b)),"r"(__HALF2_TO_CUI(c))); \ + return val; \ +} /* while(0) */ + +/** +* Types which allow static initialization of "half" and "half2" until +* these become an actual builtin. Note this initialization is as a +* bitfield representation of "half", and not a conversion from short->half. +* Such a representation will be deprecated in a future version of CUDA. +* (Note these are visible to non-nvcc compilers, including C-only compilation) +*/ +typedef struct __CUDA_ALIGN__(2) { + unsigned short x; +} __half_raw; + +typedef struct __CUDA_ALIGN__(4) { + unsigned short x; + unsigned short y; +} __half2_raw; + +/* All other definitions in this file are only visible to C++ compilers */ +#if defined(__cplusplus) + +/* Hide GCC member initialization list warnings because of host/device in-function init requirement */ +#if defined(__GNUC__) +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wstrict-aliasing" +#pragma GCC diagnostic ignored "-Weffc++" +#endif /* __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) */ +#endif /* defined(__GNUC__) */ + +/* class' : multiple assignment operators specified + The class has multiple assignment operators of a single type. This warning is informational */ +#if defined(_MSC_VER) && _MSC_VER >= 1500 +#pragma warning( push ) +#pragma warning( disable:4522 ) +#endif /* defined(__GNUC__) */ + +struct __CUDA_ALIGN__(2) __half { +protected: + unsigned short __x; + +public: +#if defined(__CPP_VERSION_AT_LEAST_11_FP16) + __half() = default; +#else + __CUDA_HOSTDEVICE__ __half() { } +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP16) */ + + /* Convert to/from __half_raw */ + __CUDA_HOSTDEVICE__ __half(const __half_raw &hr) : __x(hr.x) { } + __CUDA_HOSTDEVICE__ __half &operator=(const __half_raw &hr) { __x = hr.x; return *this; } + __CUDA_HOSTDEVICE__ volatile __half &operator=(const __half_raw &hr) volatile { __x = hr.x; return *this; } + __CUDA_HOSTDEVICE__ volatile __half &operator=(const volatile __half_raw &hr) volatile { __x = hr.x; return *this; } + __CUDA_HOSTDEVICE__ operator __half_raw() const { __half_raw ret; ret.x = __x; return ret; } + __CUDA_HOSTDEVICE__ operator __half_raw() const volatile { __half_raw ret; ret.x = __x; return ret; } + +#if !defined(__CUDA_NO_HALF_CONVERSIONS__) + + /* Construct from float/double */ + __CUDA_HOSTDEVICE__ __half(const float f) { __x = __float2half(f).__x; } + __CUDA_HOSTDEVICE__ __half(const double f) { __x = __double2half(f).__x; } + + __CUDA_HOSTDEVICE__ operator float() const { return __half2float(*this); } + __CUDA_HOSTDEVICE__ __half &operator=(const float f) { __x = __float2half(f).__x; return *this; } + + /* We omit "cast to double" operator, so as to not be ambiguous about up-cast */ + __CUDA_HOSTDEVICE__ __half &operator=(const double f) { __x = __double2half(f).__x; return *this; } + +/* Member functions only available to nvcc compilation so far */ +#if defined(__CUDACC__) + /* Allow automatic construction from types supported natively in hardware */ + /* Note we do avoid constructor init-list because of special host/device compilation rules */ + __CUDA_HOSTDEVICE__ __half(const short val) { __x = __short2half_rn(val).__x; } + __CUDA_HOSTDEVICE__ __half(const unsigned short val) { __x = __ushort2half_rn(val).__x; } + __CUDA_HOSTDEVICE__ __half(const int val) { __x = __int2half_rn(val).__x; } + __CUDA_HOSTDEVICE__ __half(const unsigned int val) { __x = __uint2half_rn(val).__x; } + __CUDA_HOSTDEVICE__ __half(const long long val) { __x = __ll2half_rn(val).__x; } + __CUDA_HOSTDEVICE__ __half(const unsigned long long val) { __x = __ull2half_rn(val).__x; } + + /* Allow automatic casts to supported builtin types, matching all that are permitted with float */ + __CUDA_HOSTDEVICE__ operator short() const { return __half2short_rz(*this); } + __CUDA_HOSTDEVICE__ __half &operator=(const short val) { __x = __short2half_rn(val).__x; return *this; } + + __CUDA_HOSTDEVICE__ operator unsigned short() const { return __half2ushort_rz(*this); } + __CUDA_HOSTDEVICE__ __half &operator=(const unsigned short val) { __x = __ushort2half_rn(val).__x; return *this; } + + __CUDA_HOSTDEVICE__ operator int() const { return __half2int_rz(*this); } + __CUDA_HOSTDEVICE__ __half &operator=(const int val) { __x = __int2half_rn(val).__x; return *this; } + + __CUDA_HOSTDEVICE__ operator unsigned int() const { return __half2uint_rz(*this); } + __CUDA_HOSTDEVICE__ __half &operator=(const unsigned int val) { __x = __uint2half_rn(val).__x; return *this; } + + __CUDA_HOSTDEVICE__ operator long long() const { return __half2ll_rz(*this); } + __CUDA_HOSTDEVICE__ __half &operator=(const long long val) { __x = __ll2half_rn(val).__x; return *this; } + + __CUDA_HOSTDEVICE__ operator unsigned long long() const { return __half2ull_rz(*this); } + __CUDA_HOSTDEVICE__ __half &operator=(const unsigned long long val) { __x = __ull2half_rn(val).__x; return *this; } + + /* Boolean conversion - note both 0 and -0 must return false */ + __CUDA_HOSTDEVICE__ operator bool() const { return (__x & 0x7FFFU) != 0U; } +#endif /* defined(__CUDACC__) */ +#endif /* !defined(__CUDA_NO_HALF_CONVERSIONS__) */ +}; + +/* Global-space operator functions are only available to nvcc compilation */ +#if defined(__CUDACC__) + +/* Arithmetic FP16 operations only supported on arch >= 5.3 */ +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) || defined(_NVHPC_CUDA) +#if !defined(__CUDA_NO_HALF_OPERATORS__) +/* Some basic arithmetic operations expected of a builtin */ +__device__ __forceinline__ __half operator+(const __half &lh, const __half &rh) { return __hadd(lh, rh); } +__device__ __forceinline__ __half operator-(const __half &lh, const __half &rh) { return __hsub(lh, rh); } +__device__ __forceinline__ __half operator*(const __half &lh, const __half &rh) { return __hmul(lh, rh); } +__device__ __forceinline__ __half operator/(const __half &lh, const __half &rh) { return __hdiv(lh, rh); } + +__device__ __forceinline__ __half &operator+=(__half &lh, const __half &rh) { lh = __hadd(lh, rh); return lh; } +__device__ __forceinline__ __half &operator-=(__half &lh, const __half &rh) { lh = __hsub(lh, rh); return lh; } +__device__ __forceinline__ __half &operator*=(__half &lh, const __half &rh) { lh = __hmul(lh, rh); return lh; } +__device__ __forceinline__ __half &operator/=(__half &lh, const __half &rh) { lh = __hdiv(lh, rh); return lh; } + +/* Note for increment and decrement we use the raw value 0x3C00U equating to half(1.0F), to avoid the extra conversion */ +__device__ __forceinline__ __half &operator++(__half &h) { __half_raw one; one.x = 0x3C00U; h += one; return h; } +__device__ __forceinline__ __half &operator--(__half &h) { __half_raw one; one.x = 0x3C00U; h -= one; return h; } +__device__ __forceinline__ __half operator++(__half &h, const int ignored) +{ + // ignored on purpose. Parameter only needed to distinguish the function declaration from other types of operators. + static_cast(ignored); + + const __half ret = h; + __half_raw one; + one.x = 0x3C00U; + h += one; + return ret; +} +__device__ __forceinline__ __half operator--(__half &h, const int ignored) +{ + // ignored on purpose. Parameter only needed to distinguish the function declaration from other types of operators. + static_cast(ignored); + + const __half ret = h; + __half_raw one; + one.x = 0x3C00U; + h -= one; + return ret; +} + +/* Unary plus and inverse operators */ +__device__ __forceinline__ __half operator+(const __half &h) { return h; } +__device__ __forceinline__ __half operator-(const __half &h) { return __hneg(h); } + +/* Some basic comparison operations to make it look like a builtin */ +__device__ __forceinline__ bool operator==(const __half &lh, const __half &rh) { return __heq(lh, rh); } +__device__ __forceinline__ bool operator!=(const __half &lh, const __half &rh) { return __hneu(lh, rh); } +__device__ __forceinline__ bool operator> (const __half &lh, const __half &rh) { return __hgt(lh, rh); } +__device__ __forceinline__ bool operator< (const __half &lh, const __half &rh) { return __hlt(lh, rh); } +__device__ __forceinline__ bool operator>=(const __half &lh, const __half &rh) { return __hge(lh, rh); } +__device__ __forceinline__ bool operator<=(const __half &lh, const __half &rh) { return __hle(lh, rh); } +#endif /* !defined(__CUDA_NO_HALF_OPERATORS__) */ +#endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) || defined(_NVHPC_CUDA) */ +#endif /* defined(__CUDACC__) */ + +/* __half2 is visible to non-nvcc host compilers */ +struct __CUDA_ALIGN__(4) __half2 { + __half x; + __half y; + + // All construct/copy/assign/move +public: +#if defined(__CPP_VERSION_AT_LEAST_11_FP16) + __half2() = default; + __CUDA_HOSTDEVICE__ __half2(const __half2 &&src) { __HALF2_TO_UI(*this) = std::move(__HALF2_TO_CUI(src)); } + __CUDA_HOSTDEVICE__ __half2 &operator=(const __half2 &&src) { __HALF2_TO_UI(*this) = std::move(__HALF2_TO_CUI(src)); return *this; } +#else + __CUDA_HOSTDEVICE__ __half2() { } +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP16) */ + __CUDA_HOSTDEVICE__ __half2(const __half &a, const __half &b) : x(a), y(b) { } + __CUDA_HOSTDEVICE__ __half2(const __half2 &src) { __HALF2_TO_UI(*this) = __HALF2_TO_CUI(src); } + __CUDA_HOSTDEVICE__ __half2 &operator=(const __half2 &src) { __HALF2_TO_UI(*this) = __HALF2_TO_CUI(src); return *this; } + + /* Convert to/from __half2_raw */ + __CUDA_HOSTDEVICE__ __half2(const __half2_raw &h2r ) { __HALF2_TO_UI(*this) = __HALF2_TO_CUI(h2r); } + __CUDA_HOSTDEVICE__ __half2 &operator=(const __half2_raw &h2r) { __HALF2_TO_UI(*this) = __HALF2_TO_CUI(h2r); return *this; } + __CUDA_HOSTDEVICE__ operator __half2_raw() const { __half2_raw ret; ret.x = 0U; ret.y = 0U; __HALF2_TO_UI(ret) = __HALF2_TO_CUI(*this); return ret; } +}; + +/* Global-space operator functions are only available to nvcc compilation */ +#if defined(__CUDACC__) + +/* Arithmetic FP16x2 operations only supported on arch >= 5.3 */ +#if (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) || defined(_NVHPC_CUDA)) && !defined(__CUDA_NO_HALF2_OPERATORS__) + +__device__ __forceinline__ __half2 operator+(const __half2 &lh, const __half2 &rh) { return __hadd2(lh, rh); } +__device__ __forceinline__ __half2 operator-(const __half2 &lh, const __half2 &rh) { return __hsub2(lh, rh); } +__device__ __forceinline__ __half2 operator*(const __half2 &lh, const __half2 &rh) { return __hmul2(lh, rh); } +__device__ __forceinline__ __half2 operator/(const __half2 &lh, const __half2 &rh) { return __h2div(lh, rh); } + +__device__ __forceinline__ __half2& operator+=(__half2 &lh, const __half2 &rh) { lh = __hadd2(lh, rh); return lh; } +__device__ __forceinline__ __half2& operator-=(__half2 &lh, const __half2 &rh) { lh = __hsub2(lh, rh); return lh; } +__device__ __forceinline__ __half2& operator*=(__half2 &lh, const __half2 &rh) { lh = __hmul2(lh, rh); return lh; } +__device__ __forceinline__ __half2& operator/=(__half2 &lh, const __half2 &rh) { lh = __h2div(lh, rh); return lh; } + +__device__ __forceinline__ __half2 &operator++(__half2 &h) { __half2_raw one; one.x = 0x3C00U; one.y = 0x3C00U; h = __hadd2(h, one); return h; } +__device__ __forceinline__ __half2 &operator--(__half2 &h) { __half2_raw one; one.x = 0x3C00U; one.y = 0x3C00U; h = __hsub2(h, one); return h; } +__device__ __forceinline__ __half2 operator++(__half2 &h, const int ignored) +{ + // ignored on purpose. Parameter only needed to distinguish the function declaration from other types of operators. + static_cast(ignored); + + const __half2 ret = h; + __half2_raw one; + one.x = 0x3C00U; + one.y = 0x3C00U; + h = __hadd2(h, one); + return ret; +} +__device__ __forceinline__ __half2 operator--(__half2 &h, const int ignored) +{ + // ignored on purpose. Parameter only needed to distinguish the function declaration from other types of operators. + static_cast(ignored); + + const __half2 ret = h; + __half2_raw one; + one.x = 0x3C00U; + one.y = 0x3C00U; + h = __hsub2(h, one); + return ret; +} + +__device__ __forceinline__ __half2 operator+(const __half2 &h) { return h; } +__device__ __forceinline__ __half2 operator-(const __half2 &h) { return __hneg2(h); } + +__device__ __forceinline__ bool operator==(const __half2 &lh, const __half2 &rh) { return __hbeq2(lh, rh); } +__device__ __forceinline__ bool operator!=(const __half2 &lh, const __half2 &rh) { return __hbneu2(lh, rh); } +__device__ __forceinline__ bool operator>(const __half2 &lh, const __half2 &rh) { return __hbgt2(lh, rh); } +__device__ __forceinline__ bool operator<(const __half2 &lh, const __half2 &rh) { return __hblt2(lh, rh); } +__device__ __forceinline__ bool operator>=(const __half2 &lh, const __half2 &rh) { return __hbge2(lh, rh); } +__device__ __forceinline__ bool operator<=(const __half2 &lh, const __half2 &rh) { return __hble2(lh, rh); } + +#endif /* (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) || defined(_NVHPC_CUDA)) && !defined(__CUDA_NO_HALF2_OPERATORS__) */ +#endif /* defined(__CUDACC__) */ + +/* Restore warning for multiple assignment operators */ +#if defined(_MSC_VER) && _MSC_VER >= 1500 +#pragma warning( pop ) +#endif /* defined(_MSC_VER) && _MSC_VER >= 1500 */ + +/* Restore -Weffc++ warnings from here on */ +#if defined(__GNUC__) +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic pop +#endif /* __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) */ +#endif /* defined(__GNUC__) */ + +#undef __CUDA_HOSTDEVICE__ +#undef __CUDA_ALIGN__ + +#ifndef __CUDACC_RTC__ /* no host functions in NVRTC mode */ +static inline unsigned short __internal_float2half(const float f, unsigned int &sign, unsigned int &remainder) +{ + unsigned int x; + unsigned int u; + unsigned int result; +#if defined(__CUDACC__) + (void)memcpy(&x, &f, sizeof(f)); +#else + (void)std::memcpy(&x, &f, sizeof(f)); +#endif + u = (x & 0x7fffffffU); + sign = ((x >> 16U) & 0x8000U); + // NaN/+Inf/-Inf + if (u >= 0x7f800000U) { + remainder = 0U; + result = ((u == 0x7f800000U) ? (sign | 0x7c00U) : 0x7fffU); + } else if (u > 0x477fefffU) { // Overflows + remainder = 0x80000000U; + result = (sign | 0x7bffU); + } else if (u >= 0x38800000U) { // Normal numbers + remainder = u << 19U; + u -= 0x38000000U; + result = (sign | (u >> 13U)); + } else if (u < 0x33000001U) { // +0/-0 + remainder = u; + result = sign; + } else { // Denormal numbers + const unsigned int exponent = u >> 23U; + const unsigned int shift = 0x7eU - exponent; + unsigned int mantissa = (u & 0x7fffffU); + mantissa |= 0x800000U; + remainder = mantissa << (32U - shift); + result = (sign | (mantissa >> shift)); + result &= 0x0000FFFFU; + } + return static_cast(result); +} +#endif /* #if !defined(__CUDACC_RTC__) */ + +__CUDA_HOSTDEVICE_FP16_DECL__ __half __double2half(const double a) +{ +IF_DEVICE_OR_CUDACC( + __half val; + asm("{ cvt.rn.f16.f64 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "d"(a)); + return val; +, + __half result; + // Perform rounding to 11 bits of precision, convert value + // to float and call existing float to half conversion. + // By pre-rounding to 11 bits we avoid additional rounding + // in float to half conversion. + unsigned long long int absa; + unsigned long long int ua; + (void)memcpy(&ua, &a, sizeof(a)); + absa = (ua & 0x7fffffffffffffffULL); + if ((absa >= 0x40f0000000000000ULL) || (absa <= 0x3e60000000000000ULL)) + { + // |a| >= 2^16 or NaN or |a| <= 2^(-25) + // double-rounding is not a problem + result = __float2half(static_cast(a)); + } + else + { + // here 2^(-25) < |a| < 2^16 + // prepare shifter value such that a + shifter + // done in double precision performs round-to-nearest-even + // and (a + shifter) - shifter results in a rounded to + // 11 bits of precision. Shifter needs to have exponent of + // a plus 53 - 11 = 42 and a leading bit in mantissa to guard + // against negative values. + // So need to have |a| capped to avoid overflow in exponent. + // For inputs that are smaller than half precision minnorm + // we prepare fixed shifter exponent. + unsigned long long shifterBits; + if (absa >= 0x3f10000000000000ULL) + { // Here if |a| >= 2^(-14) + // add 42 to exponent bits + shifterBits = (ua & 0x7ff0000000000000ULL) + 0x02A0000000000000ULL; + } + else + { // 2^(-25) < |a| < 2^(-14), potentially results in denormal + // set exponent bits to 42 - 14 + bias + shifterBits = 0x41B0000000000000ULL; + } + // set leading mantissa bit to protect against negative inputs + shifterBits |= 0x0008000000000000ULL; + double shifter; + (void)memcpy(&shifter, &shifterBits, sizeof(shifterBits)); + double aShiftRound = a + shifter; + + // Prevent the compiler from optimizing away a + shifter - shifter + // by doing intermediate memcopy and harmless bitwize operation + unsigned long long int aShiftRoundBits; + (void)memcpy(&aShiftRoundBits, &aShiftRound, sizeof(aShiftRound)); + + // the value is positive, so this operation doesn't change anything + aShiftRoundBits &= 0x7fffffffffffffffULL; + + (void)memcpy(&aShiftRound, &aShiftRoundBits, sizeof(aShiftRound)); + + result = __float2half(static_cast(aShiftRound - shifter)); + } + + return result; +, + __half result; + /* + // Perform rounding to 11 bits of precision, convert value + // to float and call existing float to half conversion. + // By pre-rounding to 11 bits we avoid additional rounding + // in float to half conversion. + */ + unsigned long long int absa; + unsigned long long int ua; + (void)std::memcpy(&ua, &a, sizeof(a)); + absa = (ua & 0x7fffffffffffffffULL); + if ((absa >= 0x40f0000000000000ULL) || (absa <= 0x3e60000000000000ULL)) + { + /* + // |a| >= 2^16 or NaN or |a| <= 2^(-25) + // double-rounding is not a problem + */ + result = __float2half(static_cast(a)); + } + else + { + /* + // here 2^(-25) < |a| < 2^16 + // prepare shifter value such that a + shifter + // done in double precision performs round-to-nearest-even + // and (a + shifter) - shifter results in a rounded to + // 11 bits of precision. Shifter needs to have exponent of + // a plus 53 - 11 = 42 and a leading bit in mantissa to guard + // against negative values. + // So need to have |a| capped to avoid overflow in exponent. + // For inputs that are smaller than half precision minnorm + // we prepare fixed shifter exponent. + */ + unsigned long long shifterBits; + if (absa >= 0x3f10000000000000ULL) + { + /* + // Here if |a| >= 2^(-14) + // add 42 to exponent bits + */ + shifterBits = (ua & 0x7ff0000000000000ULL) + 0x02A0000000000000ULL; + } + else + { + /* + // 2^(-25) < |a| < 2^(-14), potentially results in denormal + // set exponent bits to 42 - 14 + bias + */ + shifterBits = 0x41B0000000000000ULL; + } + // set leading mantissa bit to protect against negative inputs + shifterBits |= 0x0008000000000000ULL; + double shifter; + (void)std::memcpy(&shifter, &shifterBits, sizeof(shifterBits)); + double aShiftRound = a + shifter; + + /* + // Prevent the compiler from optimizing away a + shifter - shifter + // by doing intermediate memcopy and harmless bitwize operation + */ + unsigned long long int aShiftRoundBits; + (void)std::memcpy(&aShiftRoundBits, &aShiftRound, sizeof(aShiftRound)); + + // the value is positive, so this operation doesn't change anything + aShiftRoundBits &= 0x7fffffffffffffffULL; + + (void)std::memcpy(&aShiftRound, &aShiftRoundBits, sizeof(aShiftRound)); + + result = __float2half(static_cast(aShiftRound - shifter)); + } + + return result; +) +} + +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half(const float a) +{ + __half val; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("{ cvt.rn.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a)); +, + __half_raw r; + unsigned int sign = 0U; + unsigned int remainder = 0U; + r.x = __internal_float2half(a, sign, remainder); + if ((remainder > 0x80000000U) || ((remainder == 0x80000000U) && ((r.x & 0x1U) != 0U))) { + r.x++; + } + val = r; +) + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rn(const float a) +{ + __half val; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("{ cvt.rn.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a)); +, + __half_raw r; + unsigned int sign = 0U; + unsigned int remainder = 0U; + r.x = __internal_float2half(a, sign, remainder); + if ((remainder > 0x80000000U) || ((remainder == 0x80000000U) && ((r.x & 0x1U) != 0U))) { + r.x++; + } + val = r; +) + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rz(const float a) +{ + __half val; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("{ cvt.rz.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a)); +, + __half_raw r; + unsigned int sign = 0U; + unsigned int remainder = 0U; + r.x = __internal_float2half(a, sign, remainder); + val = r; +) + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rd(const float a) +{ + __half val; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("{ cvt.rm.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a)); +, + __half_raw r; + unsigned int sign = 0U; + unsigned int remainder = 0U; + r.x = __internal_float2half(a, sign, remainder); + if ((remainder != 0U) && (sign != 0U)) { + r.x++; + } + val = r; +) + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_ru(const float a) +{ + __half val; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("{ cvt.rp.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a)); +, + __half_raw r; + unsigned int sign = 0U; + unsigned int remainder = 0U; + r.x = __internal_float2half(a, sign, remainder); + if ((remainder != 0U) && (sign == 0U)) { + r.x++; + } + val = r; +) + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __float2half2_rn(const float a) +{ + __half2 val; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("{.reg .f16 low;\n" + " cvt.rn.f16.f32 low, %1;\n" + " mov.b32 %0, {low,low};}\n" : "=r"(__HALF2_TO_UI(val)) : "f"(a)); +, + val = __half2(__float2half_rn(a), __float2half_rn(a)); +) + return val; +} + +#if defined(__CUDACC__) || defined(_NVHPC_CUDA) +__CUDA_FP16_DECL__ __half2 __internal_device_float2_to_half2_rn(const float a, const float b) { + __half2 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80, + asm("{ cvt.rn.f16x2.f32 %0, %2, %1; }\n" + : "=r"(__HALF2_TO_UI(val)) : "f"(a), "f"(b)); +, + asm("{.reg .f16 low,high;\n" + " cvt.rn.f16.f32 low, %1;\n" + " cvt.rn.f16.f32 high, %2;\n" + " mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "f"(a), "f"(b)); +) + return val; +} + +#endif /* defined(__CUDACC__) || defined(_NVHPC_CUDA) */ + +__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __floats2half2_rn(const float a, const float b) +{ + __half2 val; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + val = __internal_device_float2_to_half2_rn(a,b); +, + val = __half2(__float2half_rn(a), __float2half_rn(b)); +) + return val; +} + +#ifndef __CUDACC_RTC__ /* no host functions in NVRTC mode */ +static inline float __internal_half2float(const unsigned short h) +{ + unsigned int sign = ((static_cast(h) >> 15U) & 1U); + unsigned int exponent = ((static_cast(h) >> 10U) & 0x1fU); + unsigned int mantissa = ((static_cast(h) & 0x3ffU) << 13U); + float f; + if (exponent == 0x1fU) { /* NaN or Inf */ + /* discard sign of a NaN */ + sign = ((mantissa != 0U) ? (sign >> 1U) : sign); + mantissa = ((mantissa != 0U) ? 0x7fffffU : 0U); + exponent = 0xffU; + } else if (exponent == 0U) { /* Denorm or Zero */ + if (mantissa != 0U) { + unsigned int msb; + exponent = 0x71U; + do { + msb = (mantissa & 0x400000U); + mantissa <<= 1U; /* normalize */ + --exponent; + } while (msb == 0U); + mantissa &= 0x7fffffU; /* 1.mantissa is implicit */ + } + } else { + exponent += 0x70U; + } + const unsigned int u = ((sign << 31U) | (exponent << 23U) | mantissa); +#if defined(__CUDACC__) + (void)memcpy(&f, &u, sizeof(u)); +#else + (void)std::memcpy(&f, &u, sizeof(u)); +#endif + return f; +} +#endif /* !defined(__CUDACC_RTC__) */ + +__CUDA_HOSTDEVICE_FP16_DECL__ float __half2float(const __half a) +{ + float val; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("{ cvt.f32.f16 %0, %1;}\n" : "=f"(val) : "h"(__HALF_TO_CUS(a))); +, + val = __internal_half2float(static_cast<__half_raw>(a).x); +) + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ float __low2float(const __half2 a) +{ + float val; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high},%1;\n" + " cvt.f32.f16 %0, low;}\n" : "=f"(val) : "r"(__HALF2_TO_CUI(a))); +, + val = __internal_half2float(static_cast<__half2_raw>(a).x); +) + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ float __high2float(const __half2 a) +{ + float val; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high},%1;\n" + " cvt.f32.f16 %0, high;}\n" : "=f"(val) : "r"(__HALF2_TO_CUI(a))); +, + val = __internal_half2float(static_cast<__half2_raw>(a).y); +) + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ short int __half2short_rz(const __half h) +{ + short int i; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("cvt.rzi.s16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h))); +, + const float f = __half2float(h); + const short int max_val = (short int)0x7fffU; + const short int min_val = (short int)0x8000U; + const unsigned short bits = static_cast(static_cast<__half_raw>(h).x << 1U); + // saturation fixup + if (bits > (unsigned short)0xF800U) { + // NaN + i = 0; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + // normal value, conversion is well-defined + i = static_cast(f); + } +) + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ unsigned short int __half2ushort_rz(const __half h) +{ + unsigned short int i; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("cvt.rzi.u16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h))); +, + const float f = __half2float(h); + const unsigned short int max_val = 0xffffU; + const unsigned short int min_val = 0U; + const unsigned short bits = static_cast(static_cast<__half_raw>(h).x << 1U); + // saturation fixup + if (bits > (unsigned short)0xF800U) { + // NaN + i = 0U; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + // normal value, conversion is well-defined + i = static_cast(f); + } +) + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ int __half2int_rz(const __half h) +{ + int i; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("cvt.rzi.s32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h))); +, + const float f = __half2float(h); + const int max_val = (int)0x7fffffffU; + const int min_val = (int)0x80000000U; + const unsigned short bits = static_cast(static_cast<__half_raw>(h).x << 1U); + // saturation fixup + if (bits > (unsigned short)0xF800U) { + // NaN + i = 0; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + // normal value, conversion is well-defined + i = static_cast(f); + } +) + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ unsigned int __half2uint_rz(const __half h) +{ + unsigned int i; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("cvt.rzi.u32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h))); +, + const float f = __half2float(h); + const unsigned int max_val = 0xffffffffU; + const unsigned int min_val = 0U; + const unsigned short bits = static_cast(static_cast<__half_raw>(h).x << 1U); + // saturation fixup + if (bits > (unsigned short)0xF800U) { + // NaN + i = 0U; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + // normal value, conversion is well-defined + i = static_cast(f); + } +) + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ long long int __half2ll_rz(const __half h) +{ + long long int i; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("cvt.rzi.s64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h))); +, + const float f = __half2float(h); + const long long int max_val = (long long int)0x7fffffffffffffffULL; + const long long int min_val = (long long int)0x8000000000000000ULL; + const unsigned short bits = static_cast(static_cast<__half_raw>(h).x << 1U); + // saturation fixup + if (bits > (unsigned short)0xF800U) { + // NaN + i = min_val; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + // normal value, conversion is well-defined + i = static_cast(f); + } +) + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ unsigned long long int __half2ull_rz(const __half h) +{ + unsigned long long int i; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("cvt.rzi.u64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h))); +, + const float f = __half2float(h); + const unsigned long long int max_val = 0xffffffffffffffffULL; + const unsigned long long int min_val = 0ULL; + const unsigned short bits = static_cast(static_cast<__half_raw>(h).x << 1U); + // saturation fixup + if (bits > (unsigned short)0xF800U) { + // NaN + i = 0x8000000000000000ULL; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + // normal value, conversion is well-defined + i = static_cast(f); + } +) + return i; +} + +/* Intrinsic functions only available to nvcc compilers */ +#if defined(__CUDACC__) + +/* CUDA vector-types compatible vector creation function (note returns __half2, not half2) */ +__VECTOR_FUNCTIONS_DECL__ __half2 make_half2(const __half x, const __half y) +{ + __half2 t; t.x = x; t.y = y; return t; +} +#undef __VECTOR_FUNCTIONS_DECL__ + + +/* Definitions of intrinsics */ +__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __float22half2_rn(const float2 a) +{ + const __half2 val = __floats2half2_rn(a.x, a.y); + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ float2 __half22float2(const __half2 a) +{ + float hi_float; + float lo_float; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high},%1;\n" + " cvt.f32.f16 %0, low;}\n" : "=f"(lo_float) : "r"(__HALF2_TO_CUI(a))); + + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high},%1;\n" + " cvt.f32.f16 %0, high;}\n" : "=f"(hi_float) : "r"(__HALF2_TO_CUI(a))); +, + lo_float = __internal_half2float(((__half2_raw)a).x); + hi_float = __internal_half2float(((__half2_raw)a).y); +) + return make_float2(lo_float, hi_float); +} +__CUDA_FP16_DECL__ int __half2int_rn(const __half h) +{ + int i; + asm("cvt.rni.s32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ int __half2int_rd(const __half h) +{ + int i; + asm("cvt.rmi.s32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ int __half2int_ru(const __half h) +{ + int i; + asm("cvt.rpi.s32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __int2half_rn(const int i) +{ + __half h; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("cvt.rn.f16.s32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i)); +, + // double-rounding is not a problem here: if integer + // has more than 24 bits, it is already too large to + // be represented in half precision, and result will + // be infinity. + const float f = static_cast(i); + h = __float2half_rn(f); +) + return h; +} +__CUDA_FP16_DECL__ __half __int2half_rz(const int i) +{ + __half h; + asm("cvt.rz.f16.s32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __int2half_rd(const int i) +{ + __half h; + asm("cvt.rm.f16.s32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __int2half_ru(const int i) +{ + __half h; + asm("cvt.rp.f16.s32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i)); + return h; +} + +__CUDA_FP16_DECL__ short int __half2short_rn(const __half h) +{ + short int i; + asm("cvt.rni.s16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ short int __half2short_rd(const __half h) +{ + short int i; + asm("cvt.rmi.s16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ short int __half2short_ru(const __half h) +{ + short int i; + asm("cvt.rpi.s16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __short2half_rn(const short int i) +{ + __half h; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("cvt.rn.f16.s16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i)); +, + const float f = static_cast(i); + h = __float2half_rn(f); +) + return h; +} +__CUDA_FP16_DECL__ __half __short2half_rz(const short int i) +{ + __half h; + asm("cvt.rz.f16.s16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __short2half_rd(const short int i) +{ + __half h; + asm("cvt.rm.f16.s16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __short2half_ru(const short int i) +{ + __half h; + asm("cvt.rp.f16.s16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i)); + return h; +} + +__CUDA_FP16_DECL__ unsigned int __half2uint_rn(const __half h) +{ + unsigned int i; + asm("cvt.rni.u32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ unsigned int __half2uint_rd(const __half h) +{ + unsigned int i; + asm("cvt.rmi.u32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ unsigned int __half2uint_ru(const __half h) +{ + unsigned int i; + asm("cvt.rpi.u32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __uint2half_rn(const unsigned int i) +{ + __half h; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("cvt.rn.f16.u32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i)); +, + // double-rounding is not a problem here: if integer + // has more than 24 bits, it is already too large to + // be represented in half precision, and result will + // be infinity. + const float f = static_cast(i); + h = __float2half_rn(f); +) + return h; +} +__CUDA_FP16_DECL__ __half __uint2half_rz(const unsigned int i) +{ + __half h; + asm("cvt.rz.f16.u32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __uint2half_rd(const unsigned int i) +{ + __half h; + asm("cvt.rm.f16.u32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __uint2half_ru(const unsigned int i) +{ + __half h; + asm("cvt.rp.f16.u32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i)); + return h; +} + +__CUDA_FP16_DECL__ unsigned short int __half2ushort_rn(const __half h) +{ + unsigned short int i; + asm("cvt.rni.u16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ unsigned short int __half2ushort_rd(const __half h) +{ + unsigned short int i; + asm("cvt.rmi.u16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ unsigned short int __half2ushort_ru(const __half h) +{ + unsigned short int i; + asm("cvt.rpi.u16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __ushort2half_rn(const unsigned short int i) +{ + __half h; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("cvt.rn.f16.u16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i)); +, + const float f = static_cast(i); + h = __float2half_rn(f); +) + return h; +} +__CUDA_FP16_DECL__ __half __ushort2half_rz(const unsigned short int i) +{ + __half h; + asm("cvt.rz.f16.u16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __ushort2half_rd(const unsigned short int i) +{ + __half h; + asm("cvt.rm.f16.u16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __ushort2half_ru(const unsigned short int i) +{ + __half h; + asm("cvt.rp.f16.u16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i)); + return h; +} + +__CUDA_FP16_DECL__ unsigned long long int __half2ull_rn(const __half h) +{ + unsigned long long int i; + asm("cvt.rni.u64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ unsigned long long int __half2ull_rd(const __half h) +{ + unsigned long long int i; + asm("cvt.rmi.u64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ unsigned long long int __half2ull_ru(const __half h) +{ + unsigned long long int i; + asm("cvt.rpi.u64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __ull2half_rn(const unsigned long long int i) +{ + __half h; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("cvt.rn.f16.u64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i)); +, + // double-rounding is not a problem here: if integer + // has more than 24 bits, it is already too large to + // be represented in half precision, and result will + // be infinity. + const float f = static_cast(i); + h = __float2half_rn(f); +) + return h; +} +__CUDA_FP16_DECL__ __half __ull2half_rz(const unsigned long long int i) +{ + __half h; + asm("cvt.rz.f16.u64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __ull2half_rd(const unsigned long long int i) +{ + __half h; + asm("cvt.rm.f16.u64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __ull2half_ru(const unsigned long long int i) +{ + __half h; + asm("cvt.rp.f16.u64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i)); + return h; +} + +__CUDA_FP16_DECL__ long long int __half2ll_rn(const __half h) +{ + long long int i; + asm("cvt.rni.s64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ long long int __half2ll_rd(const __half h) +{ + long long int i; + asm("cvt.rmi.s64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ long long int __half2ll_ru(const __half h) +{ + long long int i; + asm("cvt.rpi.s64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __ll2half_rn(const long long int i) +{ + __half h; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("cvt.rn.f16.s64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i)); +, + // double-rounding is not a problem here: if integer + // has more than 24 bits, it is already too large to + // be represented in half precision, and result will + // be infinity. + const float f = static_cast(i); + h = __float2half_rn(f); +) + return h; +} +__CUDA_FP16_DECL__ __half __ll2half_rz(const long long int i) +{ + __half h; + asm("cvt.rz.f16.s64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __ll2half_rd(const long long int i) +{ + __half h; + asm("cvt.rm.f16.s64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __ll2half_ru(const long long int i) +{ + __half h; + asm("cvt.rp.f16.s64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i)); + return h; +} + +__CUDA_FP16_DECL__ __half htrunc(const __half h) +{ + __half r; + asm("cvt.rzi.f16.f16 %0, %1;" : "=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(h))); + return r; +} +__CUDA_FP16_DECL__ __half hceil(const __half h) +{ + __half r; + asm("cvt.rpi.f16.f16 %0, %1;" : "=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(h))); + return r; +} +__CUDA_FP16_DECL__ __half hfloor(const __half h) +{ + __half r; + asm("cvt.rmi.f16.f16 %0, %1;" : "=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(h))); + return r; +} +__CUDA_FP16_DECL__ __half hrint(const __half h) +{ + __half r; + asm("cvt.rni.f16.f16 %0, %1;" : "=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(h))); + return r; +} + +__CUDA_FP16_DECL__ __half2 h2trunc(const __half2 h) +{ + __half2 val; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " cvt.rzi.f16.f16 low, low;\n" + " cvt.rzi.f16.f16 high, high;\n" + " mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(h))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2ceil(const __half2 h) +{ + __half2 val; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " cvt.rpi.f16.f16 low, low;\n" + " cvt.rpi.f16.f16 high, high;\n" + " mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(h))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2floor(const __half2 h) +{ + __half2 val; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " cvt.rmi.f16.f16 low, low;\n" + " cvt.rmi.f16.f16 high, high;\n" + " mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(h))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2rint(const __half2 h) +{ + __half2 val; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " cvt.rni.f16.f16 low, low;\n" + " cvt.rni.f16.f16 high, high;\n" + " mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(h))); + return val; +} +__CUDA_FP16_DECL__ __half2 __lows2half2(const __half2 a, const __half2 b) +{ + __half2 val; + asm("{.reg .f16 alow,ahigh,blow,bhigh;\n" + " mov.b32 {alow,ahigh}, %1;\n" + " mov.b32 {blow,bhigh}, %2;\n" + " mov.b32 %0, {alow,blow};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)), "r"(__HALF2_TO_CUI(b))); + return val; +} +__CUDA_FP16_DECL__ __half2 __highs2half2(const __half2 a, const __half2 b) +{ + __half2 val; + asm("{.reg .f16 alow,ahigh,blow,bhigh;\n" + " mov.b32 {alow,ahigh}, %1;\n" + " mov.b32 {blow,bhigh}, %2;\n" + " mov.b32 %0, {ahigh,bhigh};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)), "r"(__HALF2_TO_CUI(b))); + return val; +} +__CUDA_FP16_DECL__ __half __low2half(const __half2 a) +{ + __half ret; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " mov.b16 %0, low;}" : "=h"(__HALF_TO_US(ret)) : "r"(__HALF2_TO_CUI(a))); + return ret; +} +__CUDA_FP16_DECL__ int __hisinf(const __half a) +{ + int retval; + if (__HALF_TO_CUS(a) == 0xFC00U) { + retval = -1; + } else if (__HALF_TO_CUS(a) == 0x7C00U) { + retval = 1; + } else { + retval = 0; + } + return retval; +} +__CUDA_FP16_DECL__ __half2 __low2half2(const __half2 a) +{ + __half2 val; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " mov.b32 %0, {low,low};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +__CUDA_FP16_DECL__ __half2 __high2half2(const __half2 a) +{ + __half2 val; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " mov.b32 %0, {high,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +__CUDA_FP16_DECL__ __half __high2half(const __half2 a) +{ + __half ret; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " mov.b16 %0, high;}" : "=h"(__HALF_TO_US(ret)) : "r"(__HALF2_TO_CUI(a))); + return ret; +} +__CUDA_FP16_DECL__ __half2 __halves2half2(const __half a, const __half b) +{ + __half2 val; + asm("{ mov.b32 %0, {%1,%2};}\n" + : "=r"(__HALF2_TO_UI(val)) : "h"(__HALF_TO_CUS(a)), "h"(__HALF_TO_CUS(b))); + return val; +} +__CUDA_FP16_DECL__ __half2 __half2half2(const __half a) +{ + __half2 val; + asm("{ mov.b32 %0, {%1,%1};}\n" + : "=r"(__HALF2_TO_UI(val)) : "h"(__HALF_TO_CUS(a))); + return val; +} +__CUDA_FP16_DECL__ __half2 __lowhigh2highlow(const __half2 a) +{ + __half2 val; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " mov.b32 %0, {high,low};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +__CUDA_FP16_DECL__ short int __half_as_short(const __half h) +{ + return static_cast(__HALF_TO_CUS(h)); +} +__CUDA_FP16_DECL__ unsigned short int __half_as_ushort(const __half h) +{ + return __HALF_TO_CUS(h); +} +__CUDA_FP16_DECL__ __half __short_as_half(const short int i) +{ + __half h; + __HALF_TO_US(h) = static_cast(i); + return h; +} +__CUDA_FP16_DECL__ __half __ushort_as_half(const unsigned short int i) +{ + __half h; + __HALF_TO_US(h) = i; + return h; +} + +/****************************************************************************** +* __half arithmetic * +******************************************************************************/ +__CUDA_FP16_DECL__ __half __hmax(const __half a, const __half b) +{ +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800) + __BINARY_OP_HALF_MACRO(max) +#else + const float fa = __half2float(a); + const float fb = __half2float(b); + float fr; + asm("{max.f32 %0,%1,%2;\n}" + :"=f"(fr) : "f"(fa), "f"(fb)); + const __half hr = __float2half(fr); + return hr; +#endif +} +__CUDA_FP16_DECL__ __half __hmin(const __half a, const __half b) +{ +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800) + __BINARY_OP_HALF_MACRO(min) +#else + const float fa = __half2float(a); + const float fb = __half2float(b); + float fr; + asm("{min.f32 %0,%1,%2;\n}" + :"=f"(fr) : "f"(fa), "f"(fb)); + const __half hr = __float2half(fr); + return hr; +#endif +} + +/****************************************************************************** +* __half2 arithmetic * +******************************************************************************/ +__CUDA_FP16_DECL__ __half2 __hmax2(const __half2 a, const __half2 b) +{ +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800) + __BINARY_OP_HALF2_MACRO(max) +#else + const float2 fa = __half22float2(a); + const float2 fb = __half22float2(b); + float2 fr; + asm("{max.f32 %0,%1,%2;\n}" + :"=f"(fr.x) : "f"(fa.x), "f"(fb.x)); + asm("{max.f32 %0,%1,%2;\n}" + :"=f"(fr.y) : "f"(fa.y), "f"(fb.y)); + const __half2 hr = __float22half2_rn(fr); + return hr; +#endif +} +__CUDA_FP16_DECL__ __half2 __hmin2(const __half2 a, const __half2 b) +{ +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800) + __BINARY_OP_HALF2_MACRO(min) +#else + const float2 fa = __half22float2(a); + const float2 fb = __half22float2(b); + float2 fr; + asm("{min.f32 %0,%1,%2;\n}" + :"=f"(fr.x) : "f"(fa.x), "f"(fb.x)); + asm("{min.f32 %0,%1,%2;\n}" + :"=f"(fr.y) : "f"(fa.y), "f"(fb.y)); + const __half2 hr = __float22half2_rn(fr); + return hr; +#endif +} + + +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 300) || defined(_NVHPC_CUDA) +/****************************************************************************** +* __half, __half2 warp shuffle * +******************************************************************************/ +#define __SHUFFLE_HALF2_MACRO(name) /* do */ {\ + __half2 r; \ + asm volatile ("{" __CUDA_FP16_STRINGIFY(name) " %0,%1,%2,%3;\n}" \ + :"=r"(__HALF2_TO_UI(r)): "r"(__HALF2_TO_CUI(var)), "r"(delta), "r"(c)); \ + return r; \ +} /* while(0) */ + +#define __SHUFFLE_SYNC_HALF2_MACRO(name) /* do */ {\ + __half2 r; \ + asm volatile ("{" __CUDA_FP16_STRINGIFY(name) " %0,%1,%2,%3,%4;\n}" \ + :"=r"(__HALF2_TO_UI(r)): "r"(__HALF2_TO_CUI(var)), "r"(delta), "r"(c), "r"(mask)); \ + return r; \ +} /* while(0) */ + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ < 700) + +__CUDA_FP16_DECL__ __half2 __shfl(const __half2 var, const int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = ((warp_size - static_cast(width)) << 8U) | 0x1fU; + __SHUFFLE_HALF2_MACRO(shfl.idx.b32) +} +__CUDA_FP16_DECL__ __half2 __shfl_up(const __half2 var, const unsigned int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = (warp_size - static_cast(width)) << 8U; + __SHUFFLE_HALF2_MACRO(shfl.up.b32) +} +__CUDA_FP16_DECL__ __half2 __shfl_down(const __half2 var, const unsigned int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = ((warp_size - static_cast(width)) << 8U) | 0x1fU; + __SHUFFLE_HALF2_MACRO(shfl.down.b32) +} +__CUDA_FP16_DECL__ __half2 __shfl_xor(const __half2 var, const int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = ((warp_size - static_cast(width)) << 8U) | 0x1fU; + __SHUFFLE_HALF2_MACRO(shfl.bfly.b32) +} + +#endif /* defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ < 700) */ + +__CUDA_FP16_DECL__ __half2 __shfl_sync(const unsigned mask, const __half2 var, const int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = ((warp_size - static_cast(width)) << 8U) | 0x1fU; + __SHUFFLE_SYNC_HALF2_MACRO(shfl.sync.idx.b32) +} +__CUDA_FP16_DECL__ __half2 __shfl_up_sync(const unsigned mask, const __half2 var, const unsigned int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = (warp_size - static_cast(width)) << 8U; + __SHUFFLE_SYNC_HALF2_MACRO(shfl.sync.up.b32) +} +__CUDA_FP16_DECL__ __half2 __shfl_down_sync(const unsigned mask, const __half2 var, const unsigned int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = ((warp_size - static_cast(width)) << 8U) | 0x1fU; + __SHUFFLE_SYNC_HALF2_MACRO(shfl.sync.down.b32) +} +__CUDA_FP16_DECL__ __half2 __shfl_xor_sync(const unsigned mask, const __half2 var, const int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = ((warp_size - static_cast(width)) << 8U) | 0x1fU; + __SHUFFLE_SYNC_HALF2_MACRO(shfl.sync.bfly.b32) +} + +#undef __SHUFFLE_HALF2_MACRO +#undef __SHUFFLE_SYNC_HALF2_MACRO + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ < 700) + +__CUDA_FP16_DECL__ __half __shfl(const __half var, const int delta, const int width) +{ + const __half2 temp1 = __halves2half2(var, var); + const __half2 temp2 = __shfl(temp1, delta, width); + return __low2half(temp2); +} +__CUDA_FP16_DECL__ __half __shfl_up(const __half var, const unsigned int delta, const int width) +{ + const __half2 temp1 = __halves2half2(var, var); + const __half2 temp2 = __shfl_up(temp1, delta, width); + return __low2half(temp2); +} +__CUDA_FP16_DECL__ __half __shfl_down(const __half var, const unsigned int delta, const int width) +{ + const __half2 temp1 = __halves2half2(var, var); + const __half2 temp2 = __shfl_down(temp1, delta, width); + return __low2half(temp2); +} +__CUDA_FP16_DECL__ __half __shfl_xor(const __half var, const int delta, const int width) +{ + const __half2 temp1 = __halves2half2(var, var); + const __half2 temp2 = __shfl_xor(temp1, delta, width); + return __low2half(temp2); +} + +#endif /* defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ < 700) */ + +__CUDA_FP16_DECL__ __half __shfl_sync(const unsigned mask, const __half var, const int delta, const int width) +{ + const __half2 temp1 = __halves2half2(var, var); + const __half2 temp2 = __shfl_sync(mask, temp1, delta, width); + return __low2half(temp2); +} +__CUDA_FP16_DECL__ __half __shfl_up_sync(const unsigned mask, const __half var, const unsigned int delta, const int width) +{ + const __half2 temp1 = __halves2half2(var, var); + const __half2 temp2 = __shfl_up_sync(mask, temp1, delta, width); + return __low2half(temp2); +} +__CUDA_FP16_DECL__ __half __shfl_down_sync(const unsigned mask, const __half var, const unsigned int delta, const int width) +{ + const __half2 temp1 = __halves2half2(var, var); + const __half2 temp2 = __shfl_down_sync(mask, temp1, delta, width); + return __low2half(temp2); +} +__CUDA_FP16_DECL__ __half __shfl_xor_sync(const unsigned mask, const __half var, const int delta, const int width) +{ + const __half2 temp1 = __halves2half2(var, var); + const __half2 temp2 = __shfl_xor_sync(mask, temp1, delta, width); + return __low2half(temp2); +} + +#endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 300) || defined(_NVHPC_CUDA) */ +/****************************************************************************** +* __half and __half2 __ldg,__ldcg,__ldca,__ldcs * +******************************************************************************/ + +#if defined(__cplusplus) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 320) || defined(_NVHPC_CUDA)) +#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__) +#define __LDG_PTR "l" +#else +#define __LDG_PTR "r" +#endif /*(defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)*/ +__CUDA_FP16_DECL__ __half2 __ldg(const __half2 *const ptr) +{ + __half2 ret; + asm ("ld.global.nc.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_FP16_DECL__ __half __ldg(const __half *const ptr) +{ + __half ret; + asm ("ld.global.nc.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_FP16_DECL__ __half2 __ldcg(const __half2 *const ptr) +{ + __half2 ret; + asm ("ld.global.cg.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_FP16_DECL__ __half __ldcg(const __half *const ptr) +{ + __half ret; + asm ("ld.global.cg.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_FP16_DECL__ __half2 __ldca(const __half2 *const ptr) +{ + __half2 ret; + asm ("ld.global.ca.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_FP16_DECL__ __half __ldca(const __half *const ptr) +{ + __half ret; + asm ("ld.global.ca.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_FP16_DECL__ __half2 __ldcs(const __half2 *const ptr) +{ + __half2 ret; + asm ("ld.global.cs.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_FP16_DECL__ __half __ldcs(const __half *const ptr) +{ + __half ret; + asm ("ld.global.cs.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_FP16_DECL__ __half2 __ldlu(const __half2 *const ptr) +{ + __half2 ret; + asm ("ld.global.lu.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr) : "memory"); + return ret; +} +__CUDA_FP16_DECL__ __half __ldlu(const __half *const ptr) +{ + __half ret; + asm ("ld.global.lu.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr) : "memory"); + return ret; +} +__CUDA_FP16_DECL__ __half2 __ldcv(const __half2 *const ptr) +{ + __half2 ret; + asm ("ld.global.cv.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr) : "memory"); + return ret; +} +__CUDA_FP16_DECL__ __half __ldcv(const __half *const ptr) +{ + __half ret; + asm ("ld.global.cv.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr) : "memory"); + return ret; +} +__CUDA_FP16_DECL__ void __stwb(__half2 *const ptr, const __half2 value) +{ + asm ("st.global.wb.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__HALF2_TO_CUI(value)) : "memory"); +} +__CUDA_FP16_DECL__ void __stwb(__half *const ptr, const __half value) +{ + asm ("st.global.wb.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__HALF_TO_CUS(value)) : "memory"); +} +__CUDA_FP16_DECL__ void __stcg(__half2 *const ptr, const __half2 value) +{ + asm ("st.global.cg.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__HALF2_TO_CUI(value)) : "memory"); +} +__CUDA_FP16_DECL__ void __stcg(__half *const ptr, const __half value) +{ + asm ("st.global.cg.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__HALF_TO_CUS(value)) : "memory"); +} +__CUDA_FP16_DECL__ void __stcs(__half2 *const ptr, const __half2 value) +{ + asm ("st.global.cs.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__HALF2_TO_CUI(value)) : "memory"); +} +__CUDA_FP16_DECL__ void __stcs(__half *const ptr, const __half value) +{ + asm ("st.global.cs.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__HALF_TO_CUS(value)) : "memory"); +} +__CUDA_FP16_DECL__ void __stwt(__half2 *const ptr, const __half2 value) +{ + asm ("st.global.wt.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__HALF2_TO_CUI(value)) : "memory"); +} +__CUDA_FP16_DECL__ void __stwt(__half *const ptr, const __half value) +{ + asm ("st.global.wt.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__HALF_TO_CUS(value)) : "memory"); +} +#undef __LDG_PTR +#endif /* defined(__cplusplus) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 320) || defined(_NVHPC_CUDA)) */ +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) || defined(_NVHPC_CUDA) +/****************************************************************************** +* __half2 comparison * +******************************************************************************/ +#define __COMPARISON_OP_HALF2_MACRO(name) /* do */ {\ + __half2 val; \ + asm( "{ " __CUDA_FP16_STRINGIFY(name) ".f16x2.f16x2 %0,%1,%2;\n}" \ + :"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b))); \ + return val; \ +} /* while(0) */ +__CUDA_FP16_DECL__ __half2 __heq2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.eq) +} +__CUDA_FP16_DECL__ __half2 __hne2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.ne) +} +__CUDA_FP16_DECL__ __half2 __hle2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.le) +} +__CUDA_FP16_DECL__ __half2 __hge2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.ge) +} +__CUDA_FP16_DECL__ __half2 __hlt2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.lt) +} +__CUDA_FP16_DECL__ __half2 __hgt2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.gt) +} +__CUDA_FP16_DECL__ __half2 __hequ2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.equ) +} +__CUDA_FP16_DECL__ __half2 __hneu2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.neu) +} +__CUDA_FP16_DECL__ __half2 __hleu2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.leu) +} +__CUDA_FP16_DECL__ __half2 __hgeu2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.geu) +} +__CUDA_FP16_DECL__ __half2 __hltu2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.ltu) +} +__CUDA_FP16_DECL__ __half2 __hgtu2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.gtu) +} +#undef __COMPARISON_OP_HALF2_MACRO +/****************************************************************************** +* __half2 comparison with mask output * +******************************************************************************/ +#define __COMPARISON_OP_HALF2_MACRO_MASK(name) /* do */ {\ + unsigned val; \ + asm( "{ " __CUDA_FP16_STRINGIFY(name) ".u32.f16x2 %0,%1,%2;\n}" \ + :"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b))); \ + return val; \ +} /* while(0) */ +__CUDA_FP16_DECL__ unsigned __heq2_mask(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO_MASK(set.eq) +} +__CUDA_FP16_DECL__ unsigned __hne2_mask(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO_MASK(set.ne) +} +__CUDA_FP16_DECL__ unsigned __hle2_mask(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO_MASK(set.le) +} +__CUDA_FP16_DECL__ unsigned __hge2_mask(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO_MASK(set.ge) +} +__CUDA_FP16_DECL__ unsigned __hlt2_mask(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO_MASK(set.lt) +} +__CUDA_FP16_DECL__ unsigned __hgt2_mask(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO_MASK(set.gt) +} +__CUDA_FP16_DECL__ unsigned __hequ2_mask(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO_MASK(set.equ) +} +__CUDA_FP16_DECL__ unsigned __hneu2_mask(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO_MASK(set.neu) +} +__CUDA_FP16_DECL__ unsigned __hleu2_mask(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO_MASK(set.leu) +} +__CUDA_FP16_DECL__ unsigned __hgeu2_mask(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO_MASK(set.geu) +} +__CUDA_FP16_DECL__ unsigned __hltu2_mask(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO_MASK(set.ltu) +} +__CUDA_FP16_DECL__ unsigned __hgtu2_mask(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO_MASK(set.gtu) +} +#undef __COMPARISON_OP_HALF2_MACRO_MASK +#define __BOOL_COMPARISON_OP_HALF2_MACRO(name) /* do */ {\ + __half2 val; \ + bool retval; \ + asm( "{ " __CUDA_FP16_STRINGIFY(name) ".f16x2.f16x2 %0,%1,%2;\n}" \ + :"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b))); \ + if (__HALF2_TO_CUI(val) == 0x3C003C00U) {\ + retval = true; \ + } else { \ + retval = false; \ + }\ + return retval;\ +} /* while(0) */ +__CUDA_FP16_DECL__ bool __hbeq2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.eq) +} +__CUDA_FP16_DECL__ bool __hbne2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.ne) +} +__CUDA_FP16_DECL__ bool __hble2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.le) +} +__CUDA_FP16_DECL__ bool __hbge2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.ge) +} +__CUDA_FP16_DECL__ bool __hblt2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.lt) +} +__CUDA_FP16_DECL__ bool __hbgt2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.gt) +} +__CUDA_FP16_DECL__ bool __hbequ2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.equ) +} +__CUDA_FP16_DECL__ bool __hbneu2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.neu) +} +__CUDA_FP16_DECL__ bool __hbleu2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.leu) +} +__CUDA_FP16_DECL__ bool __hbgeu2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.geu) +} +__CUDA_FP16_DECL__ bool __hbltu2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.ltu) +} +__CUDA_FP16_DECL__ bool __hbgtu2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.gtu) +} +#undef __BOOL_COMPARISON_OP_HALF2_MACRO +/****************************************************************************** +* __half comparison * +******************************************************************************/ +#define __COMPARISON_OP_HALF_MACRO(name) /* do */ {\ + unsigned short val; \ + asm( "{ .reg .pred __$temp3;\n" \ + " setp." __CUDA_FP16_STRINGIFY(name) ".f16 __$temp3, %1, %2;\n" \ + " selp.u16 %0, 1, 0, __$temp3;}" \ + : "=h"(val) : "h"(__HALF_TO_CUS(a)), "h"(__HALF_TO_CUS(b))); \ + return (val != 0U) ? true : false; \ +} /* while(0) */ +__CUDA_FP16_DECL__ bool __heq(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(eq) +} +__CUDA_FP16_DECL__ bool __hne(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(ne) +} +__CUDA_FP16_DECL__ bool __hle(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(le) +} +__CUDA_FP16_DECL__ bool __hge(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(ge) +} +__CUDA_FP16_DECL__ bool __hlt(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(lt) +} +__CUDA_FP16_DECL__ bool __hgt(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(gt) +} +__CUDA_FP16_DECL__ bool __hequ(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(equ) +} +__CUDA_FP16_DECL__ bool __hneu(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(neu) +} +__CUDA_FP16_DECL__ bool __hleu(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(leu) +} +__CUDA_FP16_DECL__ bool __hgeu(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(geu) +} +__CUDA_FP16_DECL__ bool __hltu(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(ltu) +} +__CUDA_FP16_DECL__ bool __hgtu(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(gtu) +} +#undef __COMPARISON_OP_HALF_MACRO +/****************************************************************************** +* __half2 arithmetic * +******************************************************************************/ +__CUDA_FP16_DECL__ __half2 __hadd2(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(add) +} +__CUDA_FP16_DECL__ __half2 __hsub2(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(sub) +} +__CUDA_FP16_DECL__ __half2 __hmul2(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(mul) +} +__CUDA_FP16_DECL__ __half2 __hadd2_sat(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(add.sat) +} +__CUDA_FP16_DECL__ __half2 __hsub2_sat(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(sub.sat) +} +__CUDA_FP16_DECL__ __half2 __hmul2_sat(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(mul.sat) +} +__CUDA_FP16_DECL__ __half2 __hadd2_rn(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(add.rn) +} +__CUDA_FP16_DECL__ __half2 __hsub2_rn(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(sub.rn) +} +__CUDA_FP16_DECL__ __half2 __hmul2_rn(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(mul.rn) +} +__CUDA_FP16_DECL__ __half2 __hfma2(const __half2 a, const __half2 b, const __half2 c) +{ + __TERNARY_OP_HALF2_MACRO(fma.rn) +} +__CUDA_FP16_DECL__ __half2 __hfma2_sat(const __half2 a, const __half2 b, const __half2 c) +{ + __TERNARY_OP_HALF2_MACRO(fma.rn.sat) +} +__CUDA_FP16_DECL__ __half2 __h2div(const __half2 a, const __half2 b) { + __half ha = __low2half(a); + __half hb = __low2half(b); + + const __half v1 = __hdiv(ha, hb); + + ha = __high2half(a); + hb = __high2half(b); + + const __half v2 = __hdiv(ha, hb); + + return __halves2half2(v1, v2); +} +/****************************************************************************** +* __half arithmetic * +******************************************************************************/ +__CUDA_FP16_DECL__ __half __hadd(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(add) +} +__CUDA_FP16_DECL__ __half __hsub(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(sub) +} +__CUDA_FP16_DECL__ __half __hmul(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(mul) +} +__CUDA_FP16_DECL__ __half __hadd_sat(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(add.sat) +} +__CUDA_FP16_DECL__ __half __hsub_sat(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(sub.sat) +} +__CUDA_FP16_DECL__ __half __hmul_sat(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(mul.sat) +} +__CUDA_FP16_DECL__ __half __hadd_rn(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(add.rn) +} +__CUDA_FP16_DECL__ __half __hsub_rn(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(sub.rn) +} +__CUDA_FP16_DECL__ __half __hmul_rn(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(mul.rn) +} +__CUDA_FP16_DECL__ __half __hfma(const __half a, const __half b, const __half c) +{ + __TERNARY_OP_HALF_MACRO(fma.rn) +} +__CUDA_FP16_DECL__ __half __hfma_sat(const __half a, const __half b, const __half c) +{ + __TERNARY_OP_HALF_MACRO(fma.rn.sat) +} +__CUDA_FP16_DECL__ __half __hdiv(const __half a, const __half b) { + __half v; + __half abs; + __half den; + __HALF_TO_US(den) = 0x008FU; + + float rcp; + const float fa = __half2float(a); + const float fb = __half2float(b); + + asm("{rcp.approx.ftz.f32 %0, %1;\n}" :"=f"(rcp) : "f"(fb)); + + float fv = rcp * fa; + + v = __float2half(fv); + abs = __habs(v); + if (__hlt(abs, den) && __hlt(__float2half(0.0f), abs)) { + const float err = __fmaf_rn(-fb, fv, fa); + fv = __fmaf_rn(rcp, err, fv); + v = __float2half(fv); + } + return v; +} + +/****************************************************************************** +* __half2 functions * +******************************************************************************/ +#define __SPEC_CASE2(i,r, spc, ulp) \ + "{.reg.b32 spc, ulp, p;\n"\ + " mov.b32 spc," __CUDA_FP16_STRINGIFY(spc) ";\n"\ + " mov.b32 ulp," __CUDA_FP16_STRINGIFY(ulp) ";\n"\ + " set.eq.f16x2.f16x2 p," __CUDA_FP16_STRINGIFY(i) ", spc;\n"\ + " fma.rn.f16x2 " __CUDA_FP16_STRINGIFY(r) ",p,ulp," __CUDA_FP16_STRINGIFY(r) ";\n}\n" +#define __SPEC_CASE(i,r, spc, ulp) \ + "{.reg.b16 spc, ulp, p;\n"\ + " mov.b16 spc," __CUDA_FP16_STRINGIFY(spc) ";\n"\ + " mov.b16 ulp," __CUDA_FP16_STRINGIFY(ulp) ";\n"\ + " set.eq.f16.f16 p," __CUDA_FP16_STRINGIFY(i) ", spc;\n"\ + " fma.rn.f16 " __CUDA_FP16_STRINGIFY(r) ",p,ulp," __CUDA_FP16_STRINGIFY(r) ";\n}\n" +#define __APPROX_FCAST(fun) /* do */ {\ + __half val;\ + asm("{.reg.b32 f; \n"\ + " .reg.b16 r; \n"\ + " mov.b16 r,%1; \n"\ + " cvt.f32.f16 f,r; \n"\ + " " __CUDA_FP16_STRINGIFY(fun) ".approx.ftz.f32 f,f; \n"\ + " cvt.rn.f16.f32 r,f; \n"\ + " mov.b16 %0,r; \n"\ + "}": "=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)));\ + return val;\ +} /* while(0) */ +#define __APPROX_FCAST2(fun) /* do */ {\ + __half2 val;\ + asm("{.reg.b16 hl, hu; \n"\ + " .reg.b32 fl, fu; \n"\ + " mov.b32 {hl, hu}, %1; \n"\ + " cvt.f32.f16 fl, hl; \n"\ + " cvt.f32.f16 fu, hu; \n"\ + " " __CUDA_FP16_STRINGIFY(fun) ".approx.ftz.f32 fl, fl; \n"\ + " " __CUDA_FP16_STRINGIFY(fun) ".approx.ftz.f32 fu, fu; \n"\ + " cvt.rn.f16.f32 hl, fl; \n"\ + " cvt.rn.f16.f32 hu, fu; \n"\ + " mov.b32 %0, {hl, hu}; \n"\ + "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); \ + return val;\ +} /* while(0) */ +static __device__ __forceinline__ float __float_simpl_sinf(float a); +static __device__ __forceinline__ float __float_simpl_cosf(float a); +__CUDA_FP16_DECL__ __half hsin(const __half a) { + const float sl = __float_simpl_sinf(__half2float(a)); + __half r = __float2half_rn(sl); + asm("{\n\t" + " .reg.b16 i,r,t; \n\t" + " mov.b16 r, %0; \n\t" + " mov.b16 i, %1; \n\t" + " and.b16 t, r, 0x8000U; \n\t" + " abs.f16 r, r; \n\t" + " abs.f16 i, i; \n\t" + __SPEC_CASE(i, r, 0X32B3U, 0x0800U) + __SPEC_CASE(i, r, 0X5CB0U, 0x9000U) + " or.b16 r,r,t; \n\t" + " mov.b16 %0, r; \n" + "}\n" : "+h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a))); + return r; +} +__CUDA_FP16_DECL__ __half2 h2sin(const __half2 a) { + const float sl = __float_simpl_sinf(__half2float(a.x)); + const float sh = __float_simpl_sinf(__half2float(a.y)); + __half2 r = __floats2half2_rn(sl, sh); + asm("{\n\t" + " .reg.b32 i,r,t; \n\t" + " mov.b32 r, %0; \n\t" + " mov.b32 i, %1; \n\t" + " and.b32 t, r, 0x80008000U; \n\t" + " abs.f16x2 r, r; \n\t" + " abs.f16x2 i, i; \n\t" + __SPEC_CASE2(i, r, 0X32B332B3U, 0x08000800U) + __SPEC_CASE2(i, r, 0X5CB05CB0U, 0x90009000U) + " or.b32 r, r, t; \n\t" + " mov.b32 %0, r; \n" + "}\n" : "+r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a))); + return r; +} +__CUDA_FP16_DECL__ __half hcos(const __half a) { + const float cl = __float_simpl_cosf(__half2float(a)); + __half r = __float2half_rn(cl); + asm("{\n\t" + " .reg.b16 i,r; \n\t" + " mov.b16 r, %0; \n\t" + " mov.b16 i, %1; \n\t" + " abs.f16 i, i; \n\t" + __SPEC_CASE(i, r, 0X2B7CU, 0x1000U) + " mov.b16 %0, r; \n" + "}\n" : "+h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a))); + return r; +} +__CUDA_FP16_DECL__ __half2 h2cos(const __half2 a) { + const float cl = __float_simpl_cosf(__half2float(a.x)); + const float ch = __float_simpl_cosf(__half2float(a.y)); + __half2 r = __floats2half2_rn(cl, ch); + asm("{\n\t" + " .reg.b32 i,r; \n\t" + " mov.b32 r, %0; \n\t" + " mov.b32 i, %1; \n\t" + " abs.f16x2 i, i; \n\t" + __SPEC_CASE2(i, r, 0X2B7C2B7CU, 0x10001000U) + " mov.b32 %0, r; \n" + "}\n" : "+r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a))); + return r; +} +static __device__ __forceinline__ float __internal_trig_reduction_kernel(const float a, unsigned int *const quadrant) +{ + const float ar = __fmaf_rn(a, 0.636619772F, 12582912.0F); + const unsigned q = __float_as_uint(ar); + const float j = __fsub_rn(ar, 12582912.0F); + float t = __fmaf_rn(j, -1.5707962512969971e+000F, a); + t = __fmaf_rn(j, -7.5497894158615964e-008F, t); + *quadrant = q; + return t; +} +static __device__ __forceinline__ float __internal_sin_cos_kernel(const float x, const unsigned int i) +{ + float z; + const float x2 = x*x; + float a8; + float a6; + float a4; + float a2; + float a1; + float a0; + + if ((i & 1U) != 0U) { + // cos + a8 = 2.44331571e-5F; + a6 = -1.38873163e-3F; + a4 = 4.16666457e-2F; + a2 = -5.00000000e-1F; + a1 = x2; + a0 = 1.0F; + } + else { + // sin + a8 = -1.95152959e-4F; + a6 = 8.33216087e-3F; + a4 = -1.66666546e-1F; + a2 = 0.0F; + a1 = x; + a0 = x; + } + + z = __fmaf_rn(a8, x2, a6); + z = __fmaf_rn(z, x2, a4); + z = __fmaf_rn(z, x2, a2); + z = __fmaf_rn(z, a1, a0); + + if ((i & 2U) != 0U) { + z = -z; + } + return z; +} +static __device__ __forceinline__ float __float_simpl_sinf(float a) +{ + float z; + unsigned i; + a = __internal_trig_reduction_kernel(a, &i); + z = __internal_sin_cos_kernel(a, i); + return z; +} +static __device__ __forceinline__ float __float_simpl_cosf(float a) +{ + float z; + unsigned i; + a = __internal_trig_reduction_kernel(a, &i); + z = __internal_sin_cos_kernel(a, (i & 0x3U) + 1U); + return z; +} + +__CUDA_FP16_DECL__ __half hexp(const __half a) { + __half val; + asm("{.reg.b32 f, C, nZ; \n" + " .reg.b16 h,r; \n" + " mov.b16 h,%1; \n" + " cvt.f32.f16 f,h; \n" + " mov.b32 C, 0x3fb8aa3bU; \n" + " mov.b32 nZ, 0x80000000U;\n" + " fma.rn.f32 f,f,C,nZ; \n" + " ex2.approx.ftz.f32 f,f; \n" + " cvt.rn.f16.f32 r,f; \n" + __SPEC_CASE(h, r, 0X1F79U, 0x9400U) + __SPEC_CASE(h, r, 0X25CFU, 0x9400U) + __SPEC_CASE(h, r, 0XC13BU, 0x0400U) + __SPEC_CASE(h, r, 0XC1EFU, 0x0200U) + " mov.b16 %0,r; \n" + "}": "=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2exp(const __half2 a) { + __half2 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 h,r,fl,fu,C,nZ; \n" + " mov.b32 {hl, hu}, %1; \n" + " mov.b32 h, %1; \n" + " cvt.f32.f16 fl, hl; \n" + " cvt.f32.f16 fu, hu; \n" + " mov.b32 C, 0x3fb8aa3bU; \n" + " mov.b32 nZ, 0x80000000U;\n" + " fma.rn.f32 fl,fl,C,nZ; \n" + " fma.rn.f32 fu,fu,C,nZ; \n" + " ex2.approx.ftz.f32 fl, fl; \n" + " ex2.approx.ftz.f32 fu, fu; \n" + " cvt.rn.f16.f32 hl, fl; \n" + " cvt.rn.f16.f32 hu, fu; \n" + " mov.b32 r, {hl, hu}; \n" + __SPEC_CASE2(h, r, 0X1F791F79U, 0x94009400U) + __SPEC_CASE2(h, r, 0X25CF25CFU, 0x94009400U) + __SPEC_CASE2(h, r, 0XC13BC13BU, 0x04000400U) + __SPEC_CASE2(h, r, 0XC1EFC1EFU, 0x02000200U) + " mov.b32 %0, r; \n" + "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +__CUDA_FP16_DECL__ __half hexp2(const __half a) { + __half val; + asm("{.reg.b32 f, ULP; \n" + " .reg.b16 r; \n" + " mov.b16 r,%1; \n" + " cvt.f32.f16 f,r; \n" + " ex2.approx.ftz.f32 f,f; \n" + " mov.b32 ULP, 0x33800000U;\n" + " fma.rn.f32 f,f,ULP,f; \n" + " cvt.rn.f16.f32 r,f; \n" + " mov.b16 %0,r; \n" + "}": "=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2exp2(const __half2 a) { + __half2 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 fl, fu, ULP; \n" + " mov.b32 {hl, hu}, %1; \n" + " cvt.f32.f16 fl, hl; \n" + " cvt.f32.f16 fu, hu; \n" + " ex2.approx.ftz.f32 fl, fl; \n" + " ex2.approx.ftz.f32 fu, fu; \n" + " mov.b32 ULP, 0x33800000U;\n" + " fma.rn.f32 fl,fl,ULP,fl; \n" + " fma.rn.f32 fu,fu,ULP,fu; \n" + " cvt.rn.f16.f32 hl, fl; \n" + " cvt.rn.f16.f32 hu, fu; \n" + " mov.b32 %0, {hl, hu}; \n" + "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +__CUDA_FP16_DECL__ __half hexp10(const __half a) { + __half val; + asm("{.reg.b16 h,r; \n" + " .reg.b32 f, C, nZ; \n" + " mov.b16 h, %1; \n" + " cvt.f32.f16 f, h; \n" + " mov.b32 C, 0x40549A78U; \n" + " mov.b32 nZ, 0x80000000U;\n" + " fma.rn.f32 f,f,C,nZ; \n" + " ex2.approx.ftz.f32 f, f; \n" + " cvt.rn.f16.f32 r, f; \n" + __SPEC_CASE(h, r, 0x34DEU, 0x9800U) + __SPEC_CASE(h, r, 0x9766U, 0x9000U) + __SPEC_CASE(h, r, 0x9972U, 0x1000U) + __SPEC_CASE(h, r, 0xA5C4U, 0x1000U) + __SPEC_CASE(h, r, 0xBF0AU, 0x8100U) + " mov.b16 %0, r; \n" + "}":"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2exp10(const __half2 a) { + __half2 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 h,r,fl,fu,C,nZ; \n" + " mov.b32 {hl, hu}, %1; \n" + " mov.b32 h, %1; \n" + " cvt.f32.f16 fl, hl; \n" + " cvt.f32.f16 fu, hu; \n" + " mov.b32 C, 0x40549A78U; \n" + " mov.b32 nZ, 0x80000000U;\n" + " fma.rn.f32 fl,fl,C,nZ; \n" + " fma.rn.f32 fu,fu,C,nZ; \n" + " ex2.approx.ftz.f32 fl, fl; \n" + " ex2.approx.ftz.f32 fu, fu; \n" + " cvt.rn.f16.f32 hl, fl; \n" + " cvt.rn.f16.f32 hu, fu; \n" + " mov.b32 r, {hl, hu}; \n" + __SPEC_CASE2(h, r, 0x34DE34DEU, 0x98009800U) + __SPEC_CASE2(h, r, 0x97669766U, 0x90009000U) + __SPEC_CASE2(h, r, 0x99729972U, 0x10001000U) + __SPEC_CASE2(h, r, 0xA5C4A5C4U, 0x10001000U) + __SPEC_CASE2(h, r, 0xBF0ABF0AU, 0x81008100U) + " mov.b32 %0, r; \n" + "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +__CUDA_FP16_DECL__ __half hlog2(const __half a) { + __half val; + asm("{.reg.b16 h, r; \n" + " .reg.b32 f; \n" + " mov.b16 h, %1; \n" + " cvt.f32.f16 f, h; \n" + " lg2.approx.ftz.f32 f, f; \n" + " cvt.rn.f16.f32 r, f; \n" + __SPEC_CASE(r, r, 0xA2E2U, 0x8080U) + __SPEC_CASE(r, r, 0xBF46U, 0x9400U) + " mov.b16 %0, r; \n" + "}":"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2log2(const __half2 a) { + __half2 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 fl, fu, r, p; \n" + " mov.b32 {hl, hu}, %1; \n" + " cvt.f32.f16 fl, hl; \n" + " cvt.f32.f16 fu, hu; \n" + " lg2.approx.ftz.f32 fl, fl; \n" + " lg2.approx.ftz.f32 fu, fu; \n" + " cvt.rn.f16.f32 hl, fl; \n" + " cvt.rn.f16.f32 hu, fu; \n" + " mov.b32 r, {hl, hu}; \n" + __SPEC_CASE2(r, r, 0xA2E2A2E2U, 0x80808080U) + __SPEC_CASE2(r, r, 0xBF46BF46U, 0x94009400U) + " mov.b32 %0, r; \n" + "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +__CUDA_FP16_DECL__ __half hlog(const __half a) { + __half val; + asm("{.reg.b32 f, C; \n" + " .reg.b16 r,h; \n" + " mov.b16 h,%1; \n" + " cvt.f32.f16 f,h; \n" + " lg2.approx.ftz.f32 f,f; \n" + " mov.b32 C, 0x3f317218U; \n" + " mul.f32 f,f,C; \n" + " cvt.rn.f16.f32 r,f; \n" + __SPEC_CASE(h, r, 0X160DU, 0x9C00U) + __SPEC_CASE(h, r, 0X3BFEU, 0x8010U) + __SPEC_CASE(h, r, 0X3C0BU, 0x8080U) + __SPEC_CASE(h, r, 0X6051U, 0x1C00U) + " mov.b16 %0,r; \n" + "}": "=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2log(const __half2 a) { + __half2 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 r, fl, fu, C, h; \n" + " mov.b32 {hl, hu}, %1; \n" + " mov.b32 h, %1; \n" + " cvt.f32.f16 fl, hl; \n" + " cvt.f32.f16 fu, hu; \n" + " lg2.approx.ftz.f32 fl, fl; \n" + " lg2.approx.ftz.f32 fu, fu; \n" + " mov.b32 C, 0x3f317218U; \n" + " mul.f32 fl,fl,C; \n" + " mul.f32 fu,fu,C; \n" + " cvt.rn.f16.f32 hl, fl; \n" + " cvt.rn.f16.f32 hu, fu; \n" + " mov.b32 r, {hl, hu}; \n" + __SPEC_CASE2(h, r, 0X160D160DU, 0x9C009C00U) + __SPEC_CASE2(h, r, 0X3BFE3BFEU, 0x80108010U) + __SPEC_CASE2(h, r, 0X3C0B3C0BU, 0x80808080U) + __SPEC_CASE2(h, r, 0X60516051U, 0x1C001C00U) + " mov.b32 %0, r; \n" + "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +__CUDA_FP16_DECL__ __half hlog10(const __half a) { + __half val; + asm("{.reg.b16 h, r; \n" + " .reg.b32 f, C; \n" + " mov.b16 h, %1; \n" + " cvt.f32.f16 f, h; \n" + " lg2.approx.ftz.f32 f, f; \n" + " mov.b32 C, 0x3E9A209BU; \n" + " mul.f32 f,f,C; \n" + " cvt.rn.f16.f32 r, f; \n" + __SPEC_CASE(h, r, 0x338FU, 0x1000U) + __SPEC_CASE(h, r, 0x33F8U, 0x9000U) + __SPEC_CASE(h, r, 0x57E1U, 0x9800U) + __SPEC_CASE(h, r, 0x719DU, 0x9C00U) + " mov.b16 %0, r; \n" + "}":"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2log10(const __half2 a) { + __half2 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 r, fl, fu, C, h; \n" + " mov.b32 {hl, hu}, %1; \n" + " mov.b32 h, %1; \n" + " cvt.f32.f16 fl, hl; \n" + " cvt.f32.f16 fu, hu; \n" + " lg2.approx.ftz.f32 fl, fl; \n" + " lg2.approx.ftz.f32 fu, fu; \n" + " mov.b32 C, 0x3E9A209BU; \n" + " mul.f32 fl,fl,C; \n" + " mul.f32 fu,fu,C; \n" + " cvt.rn.f16.f32 hl, fl; \n" + " cvt.rn.f16.f32 hu, fu; \n" + " mov.b32 r, {hl, hu}; \n" + __SPEC_CASE2(h, r, 0x338F338FU, 0x10001000U) + __SPEC_CASE2(h, r, 0x33F833F8U, 0x90009000U) + __SPEC_CASE2(h, r, 0x57E157E1U, 0x98009800U) + __SPEC_CASE2(h, r, 0x719D719DU, 0x9C009C00U) + " mov.b32 %0, r; \n" + "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +#undef __SPEC_CASE2 +#undef __SPEC_CASE +__CUDA_FP16_DECL__ __half2 h2rcp(const __half2 a) { + __APPROX_FCAST2(rcp) +} +__CUDA_FP16_DECL__ __half hrcp(const __half a) { + __APPROX_FCAST(rcp) +} +__CUDA_FP16_DECL__ __half2 h2rsqrt(const __half2 a) { + __APPROX_FCAST2(rsqrt) +} +__CUDA_FP16_DECL__ __half hrsqrt(const __half a) { + __APPROX_FCAST(rsqrt) +} +__CUDA_FP16_DECL__ __half2 h2sqrt(const __half2 a) { + __APPROX_FCAST2(sqrt) +} +__CUDA_FP16_DECL__ __half hsqrt(const __half a) { + __APPROX_FCAST(sqrt) +} +#undef __APPROX_FCAST +#undef __APPROX_FCAST2 +__CUDA_FP16_DECL__ __half2 __hisnan2(const __half2 a) +{ + __half2 r; + asm("{set.nan.f16x2.f16x2 %0,%1,%2;\n}" + :"=r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a)), "r"(__HALF2_TO_CUI(a))); + return r; +} +__CUDA_FP16_DECL__ bool __hisnan(const __half a) +{ + __half r; + asm("{set.nan.f16.f16 %0,%1,%2;\n}" + :"=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a)), "h"(__HALF_TO_CUS(a))); + return __HALF_TO_CUS(r) != 0U; +} +__CUDA_FP16_DECL__ __half2 __hneg2(const __half2 a) +{ + __half2 r; + asm("{neg.f16x2 %0,%1;\n}" + :"=r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a))); + return r; +} +__CUDA_FP16_DECL__ __half __hneg(const __half a) +{ + __half r; + asm("{neg.f16 %0,%1;\n}" + :"=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a))); + return r; +} +__CUDA_FP16_DECL__ __half2 __habs2(const __half2 a) +{ + __half2 r; + asm("{abs.f16x2 %0,%1;\n}" + :"=r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a))); + return r; +} +__CUDA_FP16_DECL__ __half __habs(const __half a) +{ + __half r; + asm("{abs.f16 %0,%1;\n}" + :"=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a))); + return r; +} + +__CUDA_FP16_DECL__ __half2 __hcmadd(const __half2 a, const __half2 b, const __half2 c) +{ + // fast version of complex multiply-accumulate + // (a.re, a.im) * (b.re, b.im) + (c.re, c.im) + // acc.re = (c.re + a.re*b.re) - a.im*b.im + // acc.im = (c.im + a.re*b.im) + a.im*b.re + __half real_tmp = __hfma(a.x, b.x, c.x); + __half img_tmp = __hfma(a.x, b.y, c.y); + real_tmp = __hfma(__hneg(a.y), b.y, real_tmp); + img_tmp = __hfma(a.y, b.x, img_tmp); + return make_half2(real_tmp, img_tmp); +} + +#endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) || defined(_NVHPC_CUDA) */ + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800) +__CUDA_FP16_DECL__ __half __hmax_nan(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(max.NaN) +} +__CUDA_FP16_DECL__ __half __hmin_nan(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(min.NaN) +} +__CUDA_FP16_DECL__ __half __hfma_relu(const __half a, const __half b, const __half c) +{ + __TERNARY_OP_HALF_MACRO(fma.rn.relu) +} + +__CUDA_FP16_DECL__ __half2 __hmax2_nan(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(max.NaN) +} +__CUDA_FP16_DECL__ __half2 __hmin2_nan(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(min.NaN) +} +__CUDA_FP16_DECL__ __half2 __hfma2_relu(const __half2 a, const __half2 b, const __half2 c) +{ + __TERNARY_OP_HALF2_MACRO(fma.rn.relu) +} +#endif /*defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800)*/ + +/* Define __PTR for atomicAdd prototypes below, undef after done */ +#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__) +#define __PTR "l" +#else +#define __PTR "r" +#endif /*(defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)*/ + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 600) + +__CUDA_FP16_DECL__ __half2 atomicAdd(__half2 *const address, const __half2 val) { + __half2 r; + asm volatile ("{ atom.add.noftz.f16x2 %0,[%1],%2; }\n" + : "=r"(__HALF2_TO_UI(r)) : __PTR(address), "r"(__HALF2_TO_CUI(val)) + : "memory"); + return r; +} + +#endif /*defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 600)*/ + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) + +__CUDA_FP16_DECL__ __half atomicAdd(__half *const address, const __half val) { + __half r; + asm volatile ("{ atom.add.noftz.f16 %0,[%1],%2; }\n" + : "=h"(__HALF_TO_US(r)) + : __PTR(address), "h"(__HALF_TO_CUS(val)) + : "memory"); + return r; +} + +#endif /*defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700)*/ + +#undef __PTR + +#undef __CUDA_FP16_DECL__ +#endif /* defined(__CUDACC__) */ +#endif /* defined(__cplusplus) */ + +#undef __TERNARY_OP_HALF2_MACRO +#undef __TERNARY_OP_HALF_MACRO +#undef __BINARY_OP_HALF2_MACRO +#undef __BINARY_OP_HALF_MACRO + +#undef __CUDA_HOSTDEVICE_FP16_DECL__ +#undef __CUDA_FP16_DECL__ + +#undef __HALF_TO_US +#undef __HALF_TO_CUS +#undef __HALF2_TO_UI +#undef __HALF2_TO_CUI + +/* Define first-class types "half" and "half2", unless user specifies otherwise via "#define CUDA_NO_HALF" */ +/* C cannot ever have these types defined here, because __half and __half2 are C++ classes */ +#if defined(__cplusplus) && !defined(CUDA_NO_HALF) +typedef __half half; +typedef __half2 half2; +// for consistency with __nv_bfloat16 +typedef __half __nv_half; +typedef __half2 __nv_half2; +typedef __half_raw __nv_half_raw; +typedef __half2_raw __nv_half2_raw; +typedef __half nv_half; +typedef __half2 nv_half2; +#endif /* defined(__cplusplus) && !defined(CUDA_NO_HALF) */ + +#if defined(__CPP_VERSION_AT_LEAST_11_FP16) +#undef __CPP_VERSION_AT_LEAST_11_FP16 +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP16) */ + +#endif /* end of include guard: __CUDA_FP16_HPP__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp8.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp8.h new file mode 100644 index 0000000000000000000000000000000000000000..d47c9a9100b13192e1e6376001a4989e9c077340 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp8.h @@ -0,0 +1,367 @@ +/* + * Copyright 2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef __CUDA_FP8_H__ +#define __CUDA_FP8_H__ + +/* Set up function decorations */ +#if defined(__CUDACC__) +#define __CUDA_FP8_DECL__ static __device__ __inline__ +#define __CUDA_HOSTDEVICE_FP8__ __host__ __device__ +#define __CUDA_HOSTDEVICE_FP8_DECL__ static __host__ __device__ __inline__ +#else /* !defined(__CUDACC__) */ +#if defined(__GNUC__) +#define __CUDA_HOSTDEVICE_FP8_DECL__ static __attribute__((unused)) +#else +#define __CUDA_HOSTDEVICE_FP8_DECL__ static +#endif /* defined(__GNUC__) */ +#define __CUDA_HOSTDEVICE_FP8__ +#endif /* defined(__CUDACC_) */ + +#if !defined(_MSC_VER) && __cplusplus >= 201103L +#define __CPP_VERSION_AT_LEAST_11_FP8 +#elif _MSC_FULL_VER >= 190024210 && _MSVC_LANG >= 201103L +#define __CPP_VERSION_AT_LEAST_11_FP8 +#endif + +/* bring in __half_raw data type */ +#include "cuda_fp16.h" +/* bring in __nv_bfloat16_raw data type */ +#include "cuda_bf16.h" +/* bring in float2, double4, etc vector types */ +#include "vector_types.h" + +/** + * \defgroup CUDA_MATH_INTRINSIC_FP8 FP8 Intrinsics + * This section describes fp8 intrinsic functions. + * To use these functions, include the header file \p cuda_fp8.h in your + * program. + * The following macros are available to help users selectively enable/disable + * various definitions present in the header file: + * - \p __CUDA_NO_FP8_CONVERSIONS__ - If defined, this macro will prevent any + * use of the C++ type conversions (converting constructors and conversion + * operators) defined in the header. + * - \p __CUDA_NO_FP8_CONVERSION_OPERATORS__ - If defined, this macro will + * prevent any use of the C++ conversion operators from \p fp8 to other types. + */ + +/** + * \defgroup CUDA_MATH_FP8_MISC FP8 Conversion and Data Movement + * \ingroup CUDA_MATH_INTRINSIC_FP8 + * To use these functions, include the header file \p cuda_fp8.h in your + * program. + */ + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief 8-bit \p unsigned \p integer + * type abstraction used to for \p fp8 floating-point + * numbers storage. + */ +typedef unsigned char __nv_fp8_storage_t; + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief 16-bit \p unsigned \p integer + * type abstraction used to for storage of pairs of + * \p fp8 floating-point numbers. + */ +typedef unsigned short int __nv_fp8x2_storage_t; + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief 32-bit \p unsigned \p integer + * type abstraction used to for storage of tetrads of + * \p fp8 floating-point numbers. + */ +typedef unsigned int __nv_fp8x4_storage_t; + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief Enumerates the modes applicable when + * performing a narrowing conversion to \p fp8 destination types. + */ +typedef enum __nv_saturation_t { + /** + * Means no saturation to finite is performed when conversion + * results in rounding values outside the range of destination + * type. + * NOTE: for fp8 type of e4m3 kind, the results that are larger + * than the maximum representable finite number of the target + * format become NaN. + */ + __NV_NOSAT, + /** + * Means input larger than the maximum representable + * finite number MAXNORM of the target format round to the + * MAXNORM of the same sign as input. + */ + __NV_SATFINITE, +} __nv_saturation_t; + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief Enumerates the possible + * interpretations of the 8-bit values when referring to them as + * \p fp8 types. + */ +typedef enum __nv_fp8_interpretation_t { + __NV_E4M3, /**< Stands for \p fp8 numbers of \p e4m3 kind. */ + __NV_E5M2, /**< Stands for \p fp8 numbers of \p e5m2 kind. */ +} __nv_fp8_interpretation_t; + +/* Forward-declaration of C-style APIs */ + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief Converts input \p double precision \p x to \p fp8 type of the + * requested kind using round-to-nearest-even rounding and requested saturation + * mode. + * + * \details Converts input \p x to \p fp8 type of the kind specified by + * \p fp8_interpretation parameter, + * using round-to-nearest-even rounding and + * saturation mode specified by \p saturate parameter. + * + * \returns + * - The \p __nv_fp8_storage_t value holds the result of conversion. + */ +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t +__nv_cvt_double_to_fp8(const double x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation); + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief Converts input vector of two \p double precision numbers packed + * in \p double2 \p x into a vector of two values of \p fp8 type of + * the requested kind using round-to-nearest-even rounding and requested + * saturation mode. + * + * \details Converts input vector \p x to a vector of two \p fp8 values of the + * kind specified by \p fp8_interpretation parameter, using + * round-to-nearest-even rounding and saturation mode specified by \p saturate + * parameter. + * + * \returns + * - The \p __nv_fp8x2_storage_t value holds the result of conversion. + */ +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t +__nv_cvt_double2_to_fp8x2(const double2 x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation); + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief Converts input \p single precision \p x to \p fp8 type of the + * requested kind using round-to-nearest-even rounding and requested saturation + * mode. + * + * \details Converts input \p x to \p fp8 type of the kind specified by + * \p fp8_interpretation parameter, + * using round-to-nearest-even rounding and + * saturation mode specified by \p saturate parameter. + * + * \returns + * - The \p __nv_fp8_storage_t value holds the result of conversion. + */ +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t +__nv_cvt_float_to_fp8(const float x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation); + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief Converts input vector of two \p single precision numbers packed + * in \p float2 \p x into a vector of two values of \p fp8 type of + * the requested kind using round-to-nearest-even rounding and requested + * saturation mode. + * + * \details Converts input vector \p x to a vector of two \p fp8 values of the + * kind specified by \p fp8_interpretation parameter, using + * round-to-nearest-even rounding and saturation mode specified by \p saturate + * parameter. + * + * \returns + * - The \p __nv_fp8x2_storage_t value holds the result of conversion. + */ +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t +__nv_cvt_float2_to_fp8x2(const float2 x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation); + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief Converts input \p half precision \p x to \p fp8 type of the requested + * kind using round-to-nearest-even rounding and requested saturation mode. + * + * \details Converts input \p x to \p fp8 type of the kind specified by + * \p fp8_interpretation parameter, + * using round-to-nearest-even rounding and + * saturation mode specified by \p saturate parameter. + * + * \returns + * - The \p __nv_fp8_storage_t value holds the result of conversion. + */ +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t +__nv_cvt_halfraw_to_fp8(const __half_raw x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation); + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief Converts input vector of two \p half precision numbers packed + * in \p __half2_raw \p x into a vector of two values of \p fp8 type of + * the requested kind using round-to-nearest-even rounding and requested + * saturation mode. + * + * \details Converts input vector \p x to a vector of two \p fp8 values of the + * kind specified by \p fp8_interpretation parameter, using + * round-to-nearest-even rounding and saturation mode specified by \p saturate + * parameter. + * + * \returns + * - The \p __nv_fp8x2_storage_t value holds the result of conversion. + */ +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t __nv_cvt_halfraw2_to_fp8x2( + const __half2_raw x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation); + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief Converts input \p nv_bfloat16 precision \p x to \p fp8 type of the + * requested kind using round-to-nearest-even rounding and requested saturation + * mode. + * + * \details Converts input \p x to \p fp8 type of the kind specified by + * \p fp8_interpretation parameter, + * using round-to-nearest-even rounding and + * saturation mode specified by \p saturate parameter. + * + * \returns + * - The \p __nv_fp8_storage_t value holds the result of conversion. + */ +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t __nv_cvt_bfloat16raw_to_fp8( + const __nv_bfloat16_raw x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation); + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief Converts input vector of two \p nv_bfloat16 precision numbers packed + * in \p __nv_bfloat162_raw \p x into a vector of two values of \p fp8 type of + * the requested kind using round-to-nearest-even rounding and requested + * saturation mode. + * + * \details Converts input vector \p x to a vector of two \p fp8 values of the + * kind specified by \p fp8_interpretation parameter, using + * round-to-nearest-even rounding and saturation mode specified by \p saturate + * parameter. + * + * \returns + * - The \p __nv_fp8x2_storage_t value holds the result of conversion. + */ +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t +__nv_cvt_bfloat16raw2_to_fp8x2( + const __nv_bfloat162_raw x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation); + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief Converts input \p fp8 \p x of the specified kind + * to \p half precision. + * + * \details Converts input \p x of \p fp8 type of the kind specified by + * \p fp8_interpretation parameter + * to \p half precision. + * + * \returns + * - The \p __half_raw value holds the result of conversion. + */ +__CUDA_HOSTDEVICE_FP8_DECL__ __half_raw +__nv_cvt_fp8_to_halfraw(const __nv_fp8_storage_t x, + const __nv_fp8_interpretation_t fp8_interpretation); +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief Converts input vector of two \p fp8 values of the specified kind + * to a vector of two \p half precision values packed in \p __half2_raw + * structure. + * + * \details Converts input vector \p x of \p fp8 type of the kind specified by + * \p fp8_interpretation parameter + * to a vector of two \p half precision values and returns as \p __half2_raw + * structure. + * + * \returns + * - The \p __half2_raw value holds the result of conversion. + */ +__CUDA_HOSTDEVICE_FP8_DECL__ __half2_raw +__nv_cvt_fp8x2_to_halfraw2(const __nv_fp8x2_storage_t x, + const __nv_fp8_interpretation_t fp8_interpretation); + +#if defined(__cplusplus) + +#define __CUDA_FP8_TYPES_EXIST__ + +/* Forward-declaration of structures defined in "cuda_fp8.hpp" */ +struct __nv_fp8_e5m2; +struct __nv_fp8x2_e5m2; +struct __nv_fp8x4_e5m2; + +struct __nv_fp8_e4m3; +struct __nv_fp8x2_e4m3; +struct __nv_fp8x4_e4m3; + +#endif /* defined(__cplusplus) */ + +#include "cuda_fp8.hpp" + +#undef __CUDA_FP8_DECL__ +#undef __CUDA_HOSTDEVICE_FP8__ +#undef __CUDA_HOSTDEVICE_FP8_DECL__ + +#if defined(__CPP_VERSION_AT_LEAST_11_FP8) +#undef __CPP_VERSION_AT_LEAST_11_FP8 +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */ + +#endif /* end of include guard: __CUDA_FP8_H__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp8.hpp b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp8.hpp new file mode 100644 index 0000000000000000000000000000000000000000..9212081df2c7ea8938cb1142a3b2ba5750b7329b --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp8.hpp @@ -0,0 +1,1546 @@ +/* + * Copyright 2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_FP8_HPP__) +#define __CUDA_FP8_HPP__ + +#if !defined(__CUDA_FP8_H__) +#error "Do not include this file directly. Instead, include cuda_fp8.h." +#endif + +/* C++ header for std::memcpy (used for type punning in host-side + * implementations). When compiling as a CUDA source file memcpy is provided + * implicitly. !defined(__CUDACC__) implies !defined(__CUDACC_RTC__). + */ +#if defined(__cplusplus) && !defined(__CUDACC__) +#include +#elif !defined(__cplusplus) && !defined(__CUDACC__) +#include +#endif /* defined(__cplusplus) && !defined(__CUDACC__) */ + +/* Set up structure-alignment attribute */ +#if !(defined __CUDA_ALIGN__) +#if defined(__CUDACC__) +#define __CUDA_ALIGN__(align) __align__(align) +#else +/* Define alignment macro based on compiler type (cannot assume C11 "_Alignas" + * is available) */ +#if __cplusplus >= 201103L +#define __CUDA_ALIGN__(n) \ + alignas(n) /* C++11 kindly gives us a keyword for this */ +#else /* !defined(__CPP_VERSION_AT_LEAST_11_FP8)*/ +#if defined(__GNUC__) +#define __CUDA_ALIGN__(n) __attribute__((aligned(n))) +#elif defined(_MSC_VER) +#define __CUDA_ALIGN__(n) __declspec(align(n)) +#else +#define __CUDA_ALIGN__(n) +#endif /* defined(__GNUC__) */ +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */ +#endif /* defined(__CUDACC__) */ +#endif /* !(defined __CUDA_ALIGN__) */ + +#if !(defined __CPP_VERSION_AT_LEAST_11_FP8) +/* need c++11 for explicit operators */ +#define __CUDA_NO_FP8_CONVERSION_OPERATORS__ +#endif + +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t +__nv_cvt_double_to_fp8(const double x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation) { + unsigned char res; + unsigned long long int xbits; + +#if defined(__CUDACC__) || (!defined __cplusplus) + (void)memcpy(&xbits, &x, sizeof(x)); +#else + (void)std::memcpy(&xbits, &x, sizeof(x)); +#endif + unsigned char FP8_MAXNORM; + unsigned char FP8_MANTISSA_MASK; + unsigned short int FP8_EXP_BIAS; + unsigned long long int FP8_SIGNIFICAND_BITS; + const unsigned long long int DP_INF_BITS = 0x7FF0000000000000ULL; + unsigned long long int FP8_MINDENORM_O2; + unsigned long long int FP8_OVERFLOW_THRESHOLD; + unsigned long long int FP8_MINNORM; + + if (fp8_interpretation == __NV_E4M3) { + FP8_EXP_BIAS = 7U; + FP8_SIGNIFICAND_BITS = 4ULL; + FP8_MANTISSA_MASK = 0x7U; + FP8_MINDENORM_O2 = 0x3F50000000000000ULL; // mindenorm/2 = 2^-10 + FP8_OVERFLOW_THRESHOLD = + 0x407D000000000000ULL; // maxnorm + 1/2ulp = 0x1.Cp+8 + 0x1p+4 + FP8_MAXNORM = 0x7EU; + FP8_MINNORM = 0x3F90000000000000ULL; // minnorm = 2^-6 + } else { //__NV_E5M2 + FP8_EXP_BIAS = 15U; + FP8_SIGNIFICAND_BITS = 3ULL; + FP8_MANTISSA_MASK = 0x3U; + FP8_MINDENORM_O2 = 0x3EE0000000000000ULL; // mindenorm/2 = 2^-17 + FP8_OVERFLOW_THRESHOLD = + 0x40EE000000000000ULL - + 1ULL; // maxnorm + 1/2ulp = 0x1.Ep+15, and -1 to have common code + FP8_MAXNORM = 0x7BU; + FP8_MINNORM = 0x3F10000000000000ULL; // minnorm = 2^-14 + } + + // 1/2 LSB of the target format, positioned in double precision mantissa + // helpful in midpoints detection during round-to-nearest-even step + const unsigned long long int FP8_DP_HALF_ULP = + (unsigned long long int)1ULL << (53ULL - FP8_SIGNIFICAND_BITS - 1ULL); + // prepare sign bit in target format + unsigned char sign = (unsigned char)((xbits >> 63ULL) << 7U); + // prepare exponent field in target format + unsigned char exp = + (unsigned char)((((unsigned short int)(xbits >> 52ULL)) & 0x7FFU) - + 1023U + FP8_EXP_BIAS); + // round mantissa to target format width, rounding towards zero + unsigned char mantissa = + (unsigned char)(xbits >> (53ULL - FP8_SIGNIFICAND_BITS)) & + FP8_MANTISSA_MASK; + unsigned long long int absx = xbits & 0x7FFFFFFFFFFFFFFFULL; + + if (absx <= FP8_MINDENORM_O2) { + // zero or underflow + res = 0U; + } else if (absx > DP_INF_BITS) { + // NaN + if (fp8_interpretation == __NV_E4M3) { + res = 0x7FU; + } else { + // NaN --> QNaN + res = 0x7EU | mantissa; + } + } else if (absx > FP8_OVERFLOW_THRESHOLD) { + if (saturate == __NV_SATFINITE) { + res = FP8_MAXNORM; + } else { + // __NV_NOSAT + if (fp8_interpretation == __NV_E4M3) { + // no Inf in E4M3 + res = 0x7FU; // NaN + } else { + res = 0x7CU; // Inf in E5M2 + } + } + } else if (absx >= FP8_MINNORM) { + res = (unsigned char)((exp << (FP8_SIGNIFICAND_BITS - 1U)) | mantissa); + // rounded-off bits + unsigned long long int round = + xbits & ((FP8_DP_HALF_ULP << 1ULL) - 1ULL); + // round-to-nearest-even adjustment + if ((round > FP8_DP_HALF_ULP) || + ((round == FP8_DP_HALF_ULP) && (mantissa & 1U))) { + res = (unsigned char)(res + 1U); + } + } else // Denormal range + { + unsigned char shift = (unsigned char)(1U - exp); + // add implicit leading bit + mantissa |= (unsigned char)(1U << (FP8_SIGNIFICAND_BITS - 1U)); + // additional round-off due to denormalization + res = (unsigned char)(mantissa >> shift); + + // rounded-off bits, including implicit leading bit + unsigned long long int round = + (xbits | ((unsigned long long int)1ULL << (53ULL - 1ULL))) & + ((FP8_DP_HALF_ULP << (shift + 1ULL)) - 1ULL); + // round-to-nearest-even adjustment + if ((round > (FP8_DP_HALF_ULP << shift)) || + ((round == (FP8_DP_HALF_ULP << shift)) && (res & 1U))) { + res = (unsigned char)(res + 1U); + } + } + + res |= sign; + + return (__nv_fp8_storage_t)res; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t +__nv_cvt_double2_to_fp8x2(const double2 x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation) { + __nv_fp8x2_storage_t storage = (__nv_fp8x2_storage_t)__nv_cvt_double_to_fp8( + x.y, saturate, fp8_interpretation); + storage = (__nv_fp8x2_storage_t)(storage << 8U); + storage = (__nv_fp8x2_storage_t)(storage | + __nv_cvt_double_to_fp8( + x.x, saturate, fp8_interpretation)); + return storage; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t +__nv_cvt_float_to_fp8(const float x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation) { + __nv_fp8_storage_t res = 0U; +#if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890) + if (saturate == __NV_SATFINITE) { + __nv_fp8x2_storage_t storage; + if (fp8_interpretation == __NV_E5M2) { + asm("{cvt.rn.satfinite.e5m2x2.f32 %0, %2, %1;}\n" + : "=h"(storage) + : "f"(x), "f"(0.0f)); + } else { + asm("{cvt.rn.satfinite.e4m3x2.f32 %0, %2, %1;}\n" + : "=h"(storage) + : "f"(x), "f"(0.0f)); + } + res = (__nv_fp8_storage_t)storage; + } else +#endif + { + unsigned int xbits; +#if defined(__CUDACC__) || (!defined __cplusplus) + (void)memcpy(&xbits, &x, sizeof(x)); +#else + (void)std::memcpy(&xbits, &x, sizeof(x)); +#endif + + // isnan + if ((xbits & 0x7FFFFFFFU) > 0x7F800000U) { + // Canonical NaN + xbits = 0x7FFFFFFFU; + } + + float fx; +#if defined(__CUDACC__) || (!defined __cplusplus) + (void)memcpy(&fx, &xbits, sizeof(xbits)); +#else + (void)std::memcpy(&fx, &xbits, sizeof(xbits)); +#endif + + const double dx = (double)fx; + res = __nv_cvt_double_to_fp8(dx, saturate, fp8_interpretation); + } + return res; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t +__nv_cvt_float2_to_fp8x2(const float2 x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation) { + __nv_fp8x2_storage_t storage; +#if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890) + if (saturate == __NV_SATFINITE) { + if (fp8_interpretation == __NV_E5M2) { + asm("{cvt.rn.satfinite.e5m2x2.f32 %0, %2, %1;}\n" + : "=h"(storage) + : "f"(x.x), "f"(x.y)); + } else { + asm("{cvt.rn.satfinite.e4m3x2.f32 %0, %2, %1;}\n" + : "=h"(storage) + : "f"(x.x), "f"(x.y)); + } + } else +#endif + { + storage = (__nv_fp8x2_storage_t)__nv_cvt_float_to_fp8( + x.y, saturate, fp8_interpretation); + storage = (__nv_fp8x2_storage_t)(storage << 8U); + storage = (__nv_fp8x2_storage_t)(storage | __nv_cvt_float_to_fp8( + x.x, saturate, + fp8_interpretation)); + } + return storage; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ float +__internal_halfraw_to_float(const __half_raw x) { + float f; +#if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) + asm("{cvt.f32.f16 %0, %1;}\n" : "=f"(f) : "h"(x.x)); +#else + const unsigned int ux = (unsigned int)x.x; + unsigned int sign = (ux >> 15U) & 1U; + unsigned int exponent = (ux >> 10U) & 0x1fU; + unsigned int mantissa = (ux & 0x3ffU) << 13U; + if (exponent == 0x1fU) { /* NaN or Inf */ + /* discard sign of a NaN */ + sign = ((mantissa != 0U) ? (sign >> 1U) : sign); + mantissa = ((mantissa != 0U) ? 0x7fffffU : 0U); + exponent = 0xffU; + } else if (exponent == 0U) { /* Denorm or Zero */ + if (mantissa != 0U) { + unsigned int msb; + exponent = 0x71U; + do { + msb = (mantissa & 0x400000U); + mantissa <<= 1U; /* normalize */ + --exponent; + } while (msb == 0U); + mantissa &= 0x7fffffU; /* 1.mantissa is implicit */ + } + } else { + exponent += 0x70U; + } + const unsigned int u = ((sign << 31U) | (exponent << 23U) | mantissa); +#if defined(__CUDACC__) || (!defined __cplusplus) + (void)memcpy(&f, &u, sizeof(u)); +#else + (void)std::memcpy(&f, &u, sizeof(u)); +#endif +#endif /* (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) */ + return f; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ float2 +__internal_halfraw2_to_float2(const __half2_raw x) { + __half_raw raw; + float2 res; + raw.x = x.x; + res.x = __internal_halfraw_to_float(raw); + raw.x = x.y; + res.y = __internal_halfraw_to_float(raw); + return res; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t +__nv_cvt_halfraw_to_fp8(const __half_raw x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation) { + __nv_fp8_storage_t res = 0U; +#if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890) + if (saturate == __NV_SATFINITE) { + unsigned int half2_storage = (unsigned int)(x.x); + __nv_fp8x2_storage_t tmp; + if (fp8_interpretation == __NV_E5M2) { + asm("{cvt.rn.satfinite.e5m2x2.f16x2 %0, %1;}\n" + : "=h"(tmp) + : "r"(half2_storage)); + } else { + asm("{cvt.rn.satfinite.e4m3x2.f16x2 %0, %1;}\n" + : "=h"(tmp) + : "r"(half2_storage)); + } + res = (__nv_fp8_storage_t)tmp; + } else +#endif + { + float fx = __internal_halfraw_to_float(x); + res = __nv_cvt_float_to_fp8(fx, saturate, fp8_interpretation); + } + return res; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t __nv_cvt_halfraw2_to_fp8x2( + const __half2_raw x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation) { + __nv_fp8x2_storage_t tmp; +#if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890) + if (saturate == __NV_SATFINITE) { + unsigned int half2_storage; + (void)memcpy(&half2_storage, &x, sizeof(x)); + + if (fp8_interpretation == __NV_E5M2) { + asm("{cvt.rn.satfinite.e5m2x2.f16x2 %0, %1;}\n" + : "=h"(tmp) + : "r"(half2_storage)); + } else { + asm("{cvt.rn.satfinite.e4m3x2.f16x2 %0, %1;}\n" + : "=h"(tmp) + : "r"(half2_storage)); + } + } else +#endif + { + __half_raw raw; + raw.x = x.x; + __nv_fp8_storage_t lo = + __nv_cvt_halfraw_to_fp8(raw, saturate, fp8_interpretation); + raw.x = x.y; + __nv_fp8_storage_t hi = + __nv_cvt_halfraw_to_fp8(raw, saturate, fp8_interpretation); + tmp = hi; + tmp = (__nv_fp8x2_storage_t)(tmp << 8U); + tmp = (__nv_fp8x2_storage_t)(tmp | lo); + } + return tmp; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ float +__internal_bf16raw_to_float(const __nv_bfloat16_raw x) { + const unsigned int ux = ((unsigned int)x.x) << 16U; + float fx; +#if defined(__CUDACC__) || (!defined __cplusplus) + (void)memcpy(&fx, &ux, sizeof(ux)); +#else + (void)std::memcpy(&fx, &ux, sizeof(ux)); +#endif + return fx; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_bfloat16_raw +__internal_float_to_bf16raw_rz(const float x) { + unsigned int ux; + __nv_bfloat16_raw r; +#if defined(__CUDACC__) || (!defined __cplusplus) + (void)memcpy(&ux, &x, sizeof(x)); +#else + (void)std::memcpy(&ux, &x, sizeof(x)); +#endif + r.x = (unsigned short int)(ux >> 16U); + return r; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t __nv_cvt_bfloat16raw_to_fp8( + const __nv_bfloat16_raw x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation) { + const float fx = __internal_bf16raw_to_float(x); + const __nv_fp8_storage_t res = + __nv_cvt_float_to_fp8(fx, saturate, fp8_interpretation); + return res; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t +__nv_cvt_bfloat16raw2_to_fp8x2( + const __nv_bfloat162_raw x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation) { + __nv_bfloat16_raw raw; + raw.x = x.y; + __nv_fp8x2_storage_t storage = + (__nv_fp8x2_storage_t)__nv_cvt_bfloat16raw_to_fp8(raw, saturate, + fp8_interpretation); + storage = (__nv_fp8x2_storage_t)(storage << 8U); + raw.x = x.x; + storage = (__nv_fp8x2_storage_t)(storage | + __nv_cvt_bfloat16raw_to_fp8( + raw, saturate, fp8_interpretation)); + return storage; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ __half2_raw +__nv_cvt_fp8x2_to_halfraw2(const __nv_fp8x2_storage_t x, + const __nv_fp8_interpretation_t fp8_interpretation); +__CUDA_HOSTDEVICE_FP8_DECL__ __half_raw +__nv_cvt_fp8_to_halfraw(const __nv_fp8_storage_t x, + const __nv_fp8_interpretation_t fp8_interpretation) { + __half_raw res; + res.x = 0U; +#if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890) + res.x = + __nv_cvt_fp8x2_to_halfraw2((__nv_fp8x2_storage_t)x, fp8_interpretation) + .x; +#else + unsigned short int ur = (unsigned short int)x; + ur = (unsigned short int)(ur << 8U); + + if (fp8_interpretation == __NV_E5M2) { + if ((ur & 0x7FFFU) > 0x7C00U) { + /* If NaN, return canonical NaN */ + ur = 0x7FFFU; + } + } else { // __NV_E4M3 + unsigned short int sign = ur & 0x8000U; + unsigned short int exponent = + (unsigned short int)(((ur & 0x7800U) >> 1U) + 0x2000U); + unsigned short int mantissa = (ur & 0x0700U) >> 1U; + unsigned char absx = 0x7FU & (unsigned char)x; + + if (absx == 0x7FU) // NaN + { + ur = 0x7FFFU; // fp16 canonical NaN, discard sign + } else if (exponent == 0x2000U) { + // zero or denormal + if (mantissa != 0U) { + // normalize + mantissa = (unsigned short int)(mantissa << 1U); + while ((mantissa & 0x0400U) == 0U) { + mantissa = (unsigned short int)(mantissa << 1U); + exponent = (unsigned short int)(exponent - 0x0400U); + } + // discard implicit leading bit + mantissa &= 0x03FFU; + } else { // Zero + exponent = 0U; + } + + ur = (sign | exponent) | mantissa; + } else { + ur = (sign | exponent) | mantissa; + } + } + res.x = ur; +#endif + return res; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ __half2_raw +__nv_cvt_fp8x2_to_halfraw2(const __nv_fp8x2_storage_t x, + const __nv_fp8_interpretation_t fp8_interpretation) { + __half2_raw res; +#if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890) + unsigned int half2_storage; + if (fp8_interpretation == __NV_E5M2) { + asm("{cvt.rn.f16x2.e5m2x2 %0, %1;}\n" : "=r"(half2_storage) : "h"(x)); + } else { + asm("{cvt.rn.f16x2.e4m3x2 %0, %1;}\n" : "=r"(half2_storage) : "h"(x)); + } + (void)memcpy(&res, &half2_storage, sizeof(half2_storage)); +#else + res.x = + __nv_cvt_fp8_to_halfraw((__nv_fp8_storage_t)x, fp8_interpretation).x; + res.y = __nv_cvt_fp8_to_halfraw((__nv_fp8_storage_t)(x >> 8U), + fp8_interpretation) + .x; +#endif + return res; +} + +/* All other definitions in this file are only visible to C++ compilers */ +#if defined(__cplusplus) + +/** + * \defgroup CUDA_MATH_FP8_E5M2_STRUCT C++ struct for handling fp8 data type of e5m2 kind. + * \ingroup CUDA_MATH_INTRINSIC_FP8 + */ + +/** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * \brief __nv_fp8_e5m2 datatype + * + * \details This structure implements the datatype for handling + * \p fp8 floating-point numbers of \p e5m2 kind: + * with 1 sign, 5 exponent, 1 implicit and 2 explicit mantissa bits. + * + * The structure implements converting constructors and operators. + */ +struct __CUDA_ALIGN__(1) __nv_fp8_e5m2 { + public: + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Storage variable contains the \p fp8 floating-point data. + */ + __nv_fp8_storage_t __x; + + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Constructor by default. + */ +#if defined(__CPP_VERSION_AT_LEAST_11_FP8) + __nv_fp8_e5m2() = default; +#else + __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2() {} +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */ + +#if !defined(__CUDA_NO_FP8_CONVERSIONS__) + + /* Construct from wider FP types */ + /* Note we do avoid constructor init-list because of special host/device + * compilation rules */ + + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Constructor from \p __half data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const __half f) { + __x = __nv_cvt_halfraw_to_fp8(static_cast<__half_raw>(f), + __NV_SATFINITE, __NV_E5M2); + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Constructor from \p __nv_bfloat16 data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const __nv_bfloat16 f) { + __x = __nv_cvt_bfloat16raw_to_fp8(static_cast<__nv_bfloat16_raw>(f), + __NV_SATFINITE, __NV_E5M2); + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Constructor from \p float data type, relies on \p __NV_SATFINITE behavior + * for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const float f) { + __x = __nv_cvt_float_to_fp8(f, __NV_SATFINITE, __NV_E5M2); + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Constructor from \p double data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const double f) { + __x = __nv_cvt_double_to_fp8(f, __NV_SATFINITE, __NV_E5M2); + } + + /* Converts from integral */ + + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Constructor from \p unsigned \p short \p int data type, relies on \p + * __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ + __nv_fp8_e5m2(const unsigned short int val) { + __x = static_cast<__nv_fp8_e5m2>(static_cast(val)).__x; + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Constructor from \p unsigned \p int data type, relies on \p + * __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const unsigned int val) { + __x = static_cast<__nv_fp8_e5m2>(static_cast(val)).__x; + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Constructor from \p unsigned \p long \p long \p int data type, relies on + * \p __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ + __nv_fp8_e5m2(const unsigned long long int val) { + __x = static_cast<__nv_fp8_e5m2>(static_cast(val)).__x; + } + + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Constructor from \p short \p int data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const short int val) { + __x = static_cast<__nv_fp8_e5m2>(static_cast(val)).__x; + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Constructor from \p int data type, relies on \p __NV_SATFINITE behavior + * for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const int val) { + __x = static_cast<__nv_fp8_e5m2>(static_cast(val)).__x; + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Constructor from \p long \p long \p int data type, relies on \p + * __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const long long int val) { + __x = static_cast<__nv_fp8_e5m2>(static_cast(val)).__x; + } + +#if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) + /* Widening FP converts */ + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p __half data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator __half() const { + return static_cast<__half>(__nv_cvt_fp8_to_halfraw(__x, __NV_E5M2)); + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p float data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator float() const { + return __internal_halfraw_to_float( + __nv_cvt_fp8_to_halfraw(__x, __NV_E5M2)); + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p __nv_bfloat16 data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator __nv_bfloat16() const { + return static_cast<__nv_bfloat16>( + __internal_float_to_bf16raw_rz(float(*this))); + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p double data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator double() const { + return static_cast(float(*this)); + } + + /* Convert to integral */ + + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p unsigned \p char data type. + * Clamps negative and too large inputs to the output range. + * \p NaN inputs convert to \p zero. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned char() const { + unsigned char i; + const float f = float(*this); + const unsigned char max_val = 0xFFU; + const unsigned char min_val = 0U; + const unsigned char bits = (*this).__x; + // saturation fixup + if ((bits & 0x7FU) > 0x7CU) { + // NaN + i = 0; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + // normal value + i = static_cast(f); + } + return i; + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p unsigned \p short \p int data type. + * Clamps negative and too large inputs to the output range. + * \p NaN inputs convert to \p zero. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned short int() const { + return __half2ushort_rz(__half(*this)); + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p unsigned \p int data type. + * Clamps negative and too large inputs to the output range. + * \p NaN inputs convert to \p zero. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned int() const { + return __half2uint_rz(__half(*this)); + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p unsigned \p long \p long \p int data type. + * Clamps negative and too large inputs to the output range. + * \p NaN inputs convert to \p 0x8000000000000000ULL. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned long long int() const { + return __half2ull_rz(__half(*this)); + } + + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p signed \p char data type. + * Clamps too large inputs to the output range. + * \p NaN inputs convert to \p zero. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator signed char() const { + signed char i; + const float f = float(*this); + const signed char max_val = (signed char)0x7FU; + const signed char min_val = (signed char)0x80U; + const unsigned char bits = (*this).__x; + // saturation fixup + if ((bits & 0x7FU) > 0x7CU) { + // NaN + i = 0; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + // normal value + i = static_cast(f); + } + return i; + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p short \p int data type. + * Clamps too large inputs to the output range. + * \p NaN inputs convert to \p zero. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator short int() const { + return __half2short_rz(__half(*this)); + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p int data type. + * Clamps too large inputs to the output range. + * \p NaN inputs convert to \p zero. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator int() const { + return __half2int_rz(__half(*this)); + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p long \p long \p int data type. + * Clamps too large inputs to the output range. + * \p NaN inputs convert to \p 0x8000000000000000LL. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator long long int() const { + return __half2ll_rz(__half(*this)); + } + + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p bool data type. + * +0 and -0 inputs convert to \p false. + * Non-zero inputs convert to \p true. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator bool() const { + return (__x & 0x7FU) != 0U; + } +#endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */ +#endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */ +}; + +/** + * \defgroup CUDA_MATH_FP8X2_E5M2_STRUCT C++ struct for handling vector type of two fp8 values of e5m2 kind. + * \ingroup CUDA_MATH_INTRINSIC_FP8 + */ + +/** + * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT + * \brief __nv_fp8x2_e5m2 datatype + * + * \details This structure implements the datatype for handling two + * \p fp8 floating-point numbers of \p e5m2 kind each: + * with 1 sign, 5 exponent, 1 implicit and 2 explicit mantissa bits. + * + * The structure implements converting constructors and operators. + */ +struct __CUDA_ALIGN__(2) __nv_fp8x2_e5m2 { + public: + /** + * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT + * Storage variable contains the vector of two \p fp8 floating-point data + * values. + */ + __nv_fp8x2_storage_t __x; + + /** + * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT + * Constructor by default. + */ +#if defined(__CPP_VERSION_AT_LEAST_11_FP8) + __nv_fp8x2_e5m2() = default; +#else + __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2() {} +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */ + +#if !defined(__CUDA_NO_FP8_CONVERSIONS__) + + /* Construct from wider types */ + + /** + * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT + * Constructor from \p __half2 data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2(const __half2 f) { + __x = __nv_cvt_halfraw2_to_fp8x2(static_cast<__half2_raw>(f), + __NV_SATFINITE, __NV_E5M2); + } + /** + * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT + * Constructor from \p __nv_bfloat162 data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2(const __nv_bfloat162 f) { + __x = __nv_cvt_bfloat16raw2_to_fp8x2(static_cast<__nv_bfloat162_raw>(f), + __NV_SATFINITE, __NV_E5M2); + } + /** + * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT + * Constructor from \p float2 data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2(const float2 f) { + __x = __nv_cvt_float2_to_fp8x2(f, __NV_SATFINITE, __NV_E5M2); + } + /** + * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT + * Constructor from \p double2 data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2(const double2 f) { + __x = __nv_cvt_double2_to_fp8x2(f, __NV_SATFINITE, __NV_E5M2); + } + +#if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) + /* Widening converts */ + /** + * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT + * Conversion operator to \p __half2 data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator __half2() const { + return static_cast<__half2>(__nv_cvt_fp8x2_to_halfraw2(__x, __NV_E5M2)); + } + /** + * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT + * Conversion operator to \p float2 data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator float2() const { + return __internal_halfraw2_to_float2( + __nv_cvt_fp8x2_to_halfraw2(__x, __NV_E5M2)); + } +#endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */ +#endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */ +}; + +__CUDA_HOSTDEVICE_FP8_DECL__ unsigned int +__internal_pack_u16x2_to_u32(const unsigned short int src_lo, + const unsigned short int src_hi) { + unsigned int dst; +#if (defined __CUDACC__) && (defined __CUDA_ARCH__) + asm("{ mov.b32 %0, {%1,%2};}\n" : "=r"(dst) : "h"(src_lo), "h"(src_hi)); +#else + dst = (static_cast(src_hi) << 16U) | + static_cast(src_lo); +#endif + return dst; +} + +/** + * \defgroup CUDA_MATH_FP8X4_E5M2_STRUCT C++ struct for handling vector type of four fp8 values of e5m2 kind. + * \ingroup CUDA_MATH_INTRINSIC_FP8 + */ + +/** + * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT + * \brief __nv_fp8x4_e5m2 datatype + * + * \details This structure implements the datatype for handling four + * \p fp8 floating-point numbers of \p e5m2 kind each: + * with 1 sign, 5 exponent, 1 implicit and 2 explicit mantissa bits. + * + * The structure implements converting constructors and operators. + */ +struct __CUDA_ALIGN__(4) __nv_fp8x4_e5m2 { + public: + /** + * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT + * Storage variable contains the vector of four \p fp8 floating-point data + * values. + */ + __nv_fp8x4_storage_t __x; + + /** + * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT + * Constructor by default. + */ +#if defined(__CPP_VERSION_AT_LEAST_11_FP8) + __nv_fp8x4_e5m2() = default; +#else + __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2() {} +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */ + +#if !defined(__CUDA_NO_FP8_CONVERSIONS__) + + /* Construct from wider types */ + + /** + * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT + * Constructor from a pair of \p __half2 data type values, + * relies on \p __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2(const __half2 flo, + const __half2 fhi) { + const __nv_fp8x2_storage_t rlo = __nv_cvt_halfraw2_to_fp8x2( + static_cast<__half2_raw>(flo), __NV_SATFINITE, __NV_E5M2); + const __nv_fp8x2_storage_t rhi = __nv_cvt_halfraw2_to_fp8x2( + static_cast<__half2_raw>(fhi), __NV_SATFINITE, __NV_E5M2); + __x = __internal_pack_u16x2_to_u32(rlo, rhi); + } + /** + * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT + * Constructor from a pair of \p __nv_bfloat162 data type values, + * relies on \p __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2(const __nv_bfloat162 flo, + const __nv_bfloat162 fhi) { + const __nv_fp8x2_storage_t rlo = __nv_cvt_bfloat16raw2_to_fp8x2( + static_cast<__nv_bfloat162_raw>(flo), __NV_SATFINITE, __NV_E5M2); + const __nv_fp8x2_storage_t rhi = __nv_cvt_bfloat16raw2_to_fp8x2( + static_cast<__nv_bfloat162_raw>(fhi), __NV_SATFINITE, __NV_E5M2); + __x = __internal_pack_u16x2_to_u32(rlo, rhi); + } + /** + * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT + * Constructor from \p float4 vector data type, + * relies on \p __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2(const float4 f) { + const float2 flo = {f.x, f.y}; + const float2 fhi = {f.z, f.w}; + const __nv_fp8x2_storage_t rlo = + __nv_cvt_float2_to_fp8x2(flo, __NV_SATFINITE, __NV_E5M2); + const __nv_fp8x2_storage_t rhi = + __nv_cvt_float2_to_fp8x2(fhi, __NV_SATFINITE, __NV_E5M2); + __x = __internal_pack_u16x2_to_u32(rlo, rhi); + } + /** + * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT + * Constructor from \p double4 vector data type, + * relies on \p __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2(const double4 f) { + const double2 flo = {f.x, f.y}; + const double2 fhi = {f.z, f.w}; + const __nv_fp8x2_storage_t rlo = + __nv_cvt_double2_to_fp8x2(flo, __NV_SATFINITE, __NV_E5M2); + const __nv_fp8x2_storage_t rhi = + __nv_cvt_double2_to_fp8x2(fhi, __NV_SATFINITE, __NV_E5M2); + __x = __internal_pack_u16x2_to_u32(rlo, rhi); + } + +#if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) + /* Widening converts */ + + /** + * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT + * Conversion operator to \p float4 vector data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator float4() const { + const __nv_fp8x2_storage_t slo = static_cast<__nv_fp8x2_storage_t>(__x); + const __nv_fp8x2_storage_t shi = + static_cast<__nv_fp8x2_storage_t>(__x >> 16U); + float2 rlo = __internal_halfraw2_to_float2( + __nv_cvt_fp8x2_to_halfraw2(slo, __NV_E5M2)); + float2 rhi = __internal_halfraw2_to_float2( + __nv_cvt_fp8x2_to_halfraw2(shi, __NV_E5M2)); + float4 res = {rlo.x, rlo.y, rhi.x, rhi.y}; + return res; + } +#endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */ +#endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */ +}; + +/** + * \defgroup CUDA_MATH_FP8_E4M3_STRUCT C++ struct for handling fp8 data type of e4m3 kind. + * \ingroup CUDA_MATH_INTRINSIC_FP8 + */ + +/** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * \brief __nv_fp8_e4m3 datatype + * + * \details This structure implements the datatype for storing + * \p fp8 floating-point numbers of \p e4m3 kind: + * with 1 sign, 4 exponent, 1 implicit and 3 explicit mantissa bits. + * The encoding doesn't support Infinity. + * NaNs are limited to 0x7F and 0xFF values. + * + * The structure implements converting constructors and operators. + */ +struct __CUDA_ALIGN__(1) __nv_fp8_e4m3 { + public: + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Storage variable contains the \p fp8 floating-point data. + */ + __nv_fp8_storage_t __x; + + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Constructor by default. + */ +#if defined(__CPP_VERSION_AT_LEAST_11_FP8) + __nv_fp8_e4m3() = default; +#else + __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3() {} +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */ + +#if !defined(__CUDA_NO_FP8_CONVERSIONS__) + + /* Construct from wider FP types */ + /* Note we do avoid constructor init-list because of special host/device + * compilation rules */ + + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Constructor from \p __half data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const __half f) { + __x = __nv_cvt_halfraw_to_fp8(static_cast<__half_raw>(f), + __NV_SATFINITE, __NV_E4M3); + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Constructor from \p __nv_bfloat16 data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const __nv_bfloat16 f) { + __x = __nv_cvt_bfloat16raw_to_fp8(static_cast<__nv_bfloat16_raw>(f), + __NV_SATFINITE, __NV_E4M3); + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Constructor from \p float data type, relies on \p __NV_SATFINITE behavior + * for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const float f) { + __x = __nv_cvt_float_to_fp8(f, __NV_SATFINITE, __NV_E4M3); + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Constructor from \p double data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const double f) { + __x = __nv_cvt_double_to_fp8(f, __NV_SATFINITE, __NV_E4M3); + } + + /* Converts from integral */ + + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Constructor from \p unsigned \p short \p int data type, relies on \p + * __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ + __nv_fp8_e4m3(const unsigned short int val) { + __x = static_cast<__nv_fp8_e4m3>(static_cast(val)).__x; + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Constructor from \p unsigned \p int data type, relies on \p + * __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const unsigned int val) { + __x = static_cast<__nv_fp8_e4m3>(static_cast(val)).__x; + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Constructor from \p unsigned \p long \p long \p int data type, relies on + * \p __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ + __nv_fp8_e4m3(const unsigned long long int val) { + __x = static_cast<__nv_fp8_e4m3>(static_cast(val)).__x; + } + + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Constructor from \p short \p int data type, relies on \p + * __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const short int val) { + __x = static_cast<__nv_fp8_e4m3>(static_cast(val)).__x; + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Constructor from \p int data type, relies on \p __NV_SATFINITE behavior + * for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const int val) { + __x = static_cast<__nv_fp8_e4m3>(static_cast(val)).__x; + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Constructor from \p long \p long \p int data type, relies on \p + * __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const long long int val) { + __x = static_cast<__nv_fp8_e4m3>(static_cast(val)).__x; + } + +#if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) + /* Widening FP converts */ + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p __half data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator __half() const { + return static_cast<__half>(__nv_cvt_fp8_to_halfraw(__x, __NV_E4M3)); + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p float data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator float() const { + return __internal_halfraw_to_float( + __nv_cvt_fp8_to_halfraw(__x, __NV_E4M3)); + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p __nv_bfloat16 data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator __nv_bfloat16() const { + return static_cast<__nv_bfloat16>( + __internal_float_to_bf16raw_rz(float(*this))); + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p double data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator double() const { + return static_cast(float(*this)); + } + + /* Convert to integral */ + + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p unsigned \p char data type. + * Clamps negative and too large inputs to the output range. + * \p NaN inputs convert to \p zero. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned char() const { + unsigned char i; + const float f = float(*this); + const unsigned char max_val = 0xFFU; + const unsigned char min_val = 0U; + const unsigned char bits = (*this).__x; + // saturation fixup + if ((bits & 0x7FU) == 0x7FU) { + // NaN + i = 0; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + // normal value + i = static_cast(f); + } + return i; + } + + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p unsigned \p short \p int data type. + * Clamps negative inputs to zero. + * \p NaN inputs convert to \p zero. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned short int() const { + return __half2ushort_rz(__half(*this)); + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p unsigned \p int data type. + * Clamps negative inputs to zero. + * \p NaN inputs convert to \p zero. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned int() const { + return __half2uint_rz(__half(*this)); + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p unsigned \p long \p long \p int data type. + * Clamps negative inputs to zero. + * \p NaN inputs convert to \p 0x8000000000000000ULL. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned long long int() const { + return __half2ull_rz(__half(*this)); + } + + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p signed \p char data type. + * Clamps too large inputs to the output range. + * \p NaN inputs convert to \p zero. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator signed char() const { + signed char i; + const float f = float(*this); + const signed char max_val = (signed char)0x7FU; + const signed char min_val = (signed char)0x80U; + const unsigned char bits = (*this).__x; + // saturation fixup + if ((bits & 0x7FU) == 0x7FU) { + // NaN + i = 0; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + // normal value + i = static_cast(f); + } + return i; + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p short \p int data type. + * \p NaN inputs convert to \p zero. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator short int() const { + return __half2short_rz(__half(*this)); + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p int data type. + * \p NaN inputs convert to \p zero. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator int() const { + return __half2int_rz(__half(*this)); + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p long \p long \p int data type. + * \p NaN inputs convert to \p 0x8000000000000000LL. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator long long int() const { + return __half2ll_rz(__half(*this)); + } + + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p bool data type. + * +0 and -0 inputs convert to \p false. + * Non-zero inputs convert to \p true. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator bool() const { + return (__x & 0x7FU) != 0U; + } +#endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */ +#endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */ +}; + +/** + * \defgroup CUDA_MATH_FP8X2_E4M3_STRUCT C++ struct for handling vector type of two fp8 values of e4m3 kind. + * \ingroup CUDA_MATH_INTRINSIC_FP8 + */ + +/** + * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT + * \brief __nv_fp8x2_e4m3 datatype + * + * \details This structure implements the datatype for storage + * and operations on the vector of two \p fp8 values of \p e4m3 kind each: + * with 1 sign, 4 exponent, 1 implicit and 3 explicit mantissa bits. + * The encoding doesn't support Infinity. + * NaNs are limited to 0x7F and 0xFF values. + */ +struct __CUDA_ALIGN__(2) __nv_fp8x2_e4m3 { + public: + /** + * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT + * Storage variable contains the vector of two \p fp8 floating-point data + * values. + */ + __nv_fp8x2_storage_t __x; + + /** + * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT + * Constructor by default. + */ +#if defined(__CPP_VERSION_AT_LEAST_11_FP8) + __nv_fp8x2_e4m3() = default; +#else + __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3() {} +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */ + +#if !defined(__CUDA_NO_FP8_CONVERSIONS__) + + /* Construct from wider types */ + + /** + * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT + * Constructor from \p __half2 data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3(const __half2 f) { + __x = __nv_cvt_halfraw2_to_fp8x2(static_cast<__half2_raw>(f), + __NV_SATFINITE, __NV_E4M3); + } + /** + * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT + * Constructor from \p __nv_bfloat162 data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3(const __nv_bfloat162 f) { + __x = __nv_cvt_bfloat16raw2_to_fp8x2(static_cast<__nv_bfloat162_raw>(f), + __NV_SATFINITE, __NV_E4M3); + } + /** + * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT + * Constructor from \p float2 data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3(const float2 f) { + __x = __nv_cvt_float2_to_fp8x2(f, __NV_SATFINITE, __NV_E4M3); + } + /** + * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT + * Constructor from \p double2 data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3(const double2 f) { + __x = __nv_cvt_double2_to_fp8x2(f, __NV_SATFINITE, __NV_E4M3); + } + +#if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) + /* Widening converts */ + /** + * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT + * Conversion operator to \p __half2 data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator __half2() const { + return static_cast<__half2>(__nv_cvt_fp8x2_to_halfraw2(__x, __NV_E4M3)); + } + /** + * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT + * Conversion operator to \p float2 data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator float2() const { + return __internal_halfraw2_to_float2( + __nv_cvt_fp8x2_to_halfraw2(__x, __NV_E4M3)); + } +#endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */ +#endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */ +}; + +/** + * \defgroup CUDA_MATH_FP8X4_E4M3_STRUCT C++ struct for handling vector type of four fp8 values of e4m3 kind. + * \ingroup CUDA_MATH_INTRINSIC_FP8 + */ + +/** + * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT + * \brief __nv_fp8x4_e4m3 datatype + * + * \details This structure implements the datatype for storage + * and operations on the vector of four \p fp8 values of \p e4m3 kind each: + * with 1 sign, 4 exponent, 1 implicit and 3 explicit mantissa bits. + * The encoding doesn't support Infinity. + * NaNs are limited to 0x7F and 0xFF values. + */ +struct __CUDA_ALIGN__(4) __nv_fp8x4_e4m3 { + public: + /** + * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT + * Storage variable contains the vector of four \p fp8 floating-point data + * values. + */ + __nv_fp8x4_storage_t __x; + + /** + * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT + * Constructor by default. + */ +#if defined(__CPP_VERSION_AT_LEAST_11_FP8) + __nv_fp8x4_e4m3() = default; +#else + __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3() {} +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */ + +#if !defined(__CUDA_NO_FP8_CONVERSIONS__) + + /* Construct from wider types */ + + /** + * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT + * Constructor from a pair of \p __half2 data type values, + * relies on \p __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3(const __half2 flo, + const __half2 fhi) { + const __nv_fp8x2_storage_t rlo = __nv_cvt_halfraw2_to_fp8x2( + static_cast<__half2_raw>(flo), __NV_SATFINITE, __NV_E4M3); + const __nv_fp8x2_storage_t rhi = __nv_cvt_halfraw2_to_fp8x2( + static_cast<__half2_raw>(fhi), __NV_SATFINITE, __NV_E4M3); + __x = __internal_pack_u16x2_to_u32(rlo, rhi); + } + /** + * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT + * Constructor from a pair of \p __nv_bfloat162 data type values, + * relies on \p __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3(const __nv_bfloat162 flo, + const __nv_bfloat162 fhi) { + const __nv_fp8x2_storage_t rlo = __nv_cvt_bfloat16raw2_to_fp8x2( + static_cast<__nv_bfloat162_raw>(flo), __NV_SATFINITE, __NV_E4M3); + const __nv_fp8x2_storage_t rhi = __nv_cvt_bfloat16raw2_to_fp8x2( + static_cast<__nv_bfloat162_raw>(fhi), __NV_SATFINITE, __NV_E4M3); + __x = __internal_pack_u16x2_to_u32(rlo, rhi); + } + /** + * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT + * Constructor from \p float4 vector data type, + * relies on \p __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3(const float4 f) { + const float2 flo = {f.x, f.y}; + const float2 fhi = {f.z, f.w}; + const __nv_fp8x2_storage_t rlo = + __nv_cvt_float2_to_fp8x2(flo, __NV_SATFINITE, __NV_E4M3); + const __nv_fp8x2_storage_t rhi = + __nv_cvt_float2_to_fp8x2(fhi, __NV_SATFINITE, __NV_E4M3); + __x = __internal_pack_u16x2_to_u32(rlo, rhi); + } + /** + * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT + * Constructor from \p double4 vector data type, + * relies on \p __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3(const double4 f) { + const double2 flo = {f.x, f.y}; + const double2 fhi = {f.z, f.w}; + const __nv_fp8x2_storage_t rlo = + __nv_cvt_double2_to_fp8x2(flo, __NV_SATFINITE, __NV_E4M3); + const __nv_fp8x2_storage_t rhi = + __nv_cvt_double2_to_fp8x2(fhi, __NV_SATFINITE, __NV_E4M3); + __x = __internal_pack_u16x2_to_u32(rlo, rhi); + } + +#if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) + /* Widening converts */ + + /** + * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT + * Conversion operator to \p float4 vector data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator float4() const { + const __nv_fp8x2_storage_t slo = static_cast<__nv_fp8x2_storage_t>(__x); + const __nv_fp8x2_storage_t shi = + static_cast<__nv_fp8x2_storage_t>(__x >> 16U); + float2 rlo = __internal_halfraw2_to_float2( + __nv_cvt_fp8x2_to_halfraw2(slo, __NV_E4M3)); + float2 rhi = __internal_halfraw2_to_float2( + __nv_cvt_fp8x2_to_halfraw2(shi, __NV_E4M3)); + float4 res = {rlo.x, rlo.y, rhi.x, rhi.y}; + return res; + } +#endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */ +#endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */ +}; + +#endif /* defined(__cplusplus) */ + +#endif /* end of include guard: __CUDA_FP8_HPP__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_gl_interop.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_gl_interop.h new file mode 100644 index 0000000000000000000000000000000000000000..df64a8afa14f695bb05810266ac40b227c078cc5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_gl_interop.h @@ -0,0 +1,514 @@ +/* + * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_GL_INTEROP_H__) +#define __CUDA_GL_INTEROP_H__ + +#include "cuda_runtime_api.h" + +#if defined(__APPLE__) + +#include + +#else /* __APPLE__ */ + +#if defined(__arm__) || defined(__aarch64__) +#ifndef GL_VERSION +#error Please include the appropriate gl headers before including cuda_gl_interop.h +#endif +#else +#include +#endif + +#endif /* __APPLE__ */ + +/** \cond impl_private */ +#if defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED) +#define __CUDA_DEPRECATED +#elif defined(_MSC_VER) +#define __CUDA_DEPRECATED __declspec(deprecated) +#elif defined(__GNUC__) +#define __CUDA_DEPRECATED __attribute__((deprecated)) +#else +#define __CUDA_DEPRECATED +#endif +/** \endcond impl_private */ + +#if defined(__cplusplus) +extern "C" { +#endif /* __cplusplus */ + +/** + * \addtogroup CUDART_OPENGL OpenGL Interoperability + * This section describes the OpenGL interoperability functions of the CUDA + * runtime application programming interface. Note that mapping of OpenGL + * resources is performed with the graphics API agnostic, resource mapping + * interface described in \ref CUDART_INTEROP "Graphics Interopability". + * + * @{ + */ + +/** + * CUDA devices corresponding to the current OpenGL context + */ +enum cudaGLDeviceList +{ + cudaGLDeviceListAll = 1, /**< The CUDA devices for all GPUs used by the current OpenGL context */ + cudaGLDeviceListCurrentFrame = 2, /**< The CUDA devices for the GPUs used by the current OpenGL context in its currently rendering frame */ + cudaGLDeviceListNextFrame = 3 /**< The CUDA devices for the GPUs to be used by the current OpenGL context in the next frame */ +}; + +/** + * \brief Gets the CUDA devices associated with the current OpenGL context + * + * Returns in \p *pCudaDeviceCount the number of CUDA-compatible devices + * corresponding to the current OpenGL context. Also returns in \p *pCudaDevices + * at most \p cudaDeviceCount of the CUDA-compatible devices corresponding to + * the current OpenGL context. If any of the GPUs being used by the current OpenGL + * context are not CUDA capable then the call will return ::cudaErrorNoDevice. + * + * \param pCudaDeviceCount - Returned number of CUDA devices corresponding to the + * current OpenGL context + * \param pCudaDevices - Returned CUDA devices corresponding to the current + * OpenGL context + * \param cudaDeviceCount - The size of the output device array \p pCudaDevices + * \param deviceList - The set of devices to return. This set may be + * ::cudaGLDeviceListAll for all devices, + * ::cudaGLDeviceListCurrentFrame for the devices used to + * render the current frame (in SLI), or + * ::cudaGLDeviceListNextFrame for the devices used to + * render the next frame (in SLI). + * + * \return + * ::cudaSuccess, + * ::cudaErrorNoDevice, + * ::cudaErrorInvalidGraphicsContext, + * ::cudaErrorOperatingSystem, + * ::cudaErrorUnknown + * + * \note This function is not supported on Mac OS X. + * \notefnerr + * + * \sa + * ::cudaGraphicsUnregisterResource, + * ::cudaGraphicsMapResources, + * ::cudaGraphicsSubResourceGetMappedArray, + * ::cudaGraphicsResourceGetMappedPointer, + * ::cuGLGetDevices + */ +extern __host__ cudaError_t CUDARTAPI cudaGLGetDevices(unsigned int *pCudaDeviceCount, int *pCudaDevices, unsigned int cudaDeviceCount, enum cudaGLDeviceList deviceList); + +/** + * \brief Register an OpenGL texture or renderbuffer object + * + * Registers the texture or renderbuffer object specified by \p image for access by CUDA. + * A handle to the registered object is returned as \p resource. + * + * \p target must match the type of the object, and must be one of ::GL_TEXTURE_2D, + * ::GL_TEXTURE_RECTANGLE, ::GL_TEXTURE_CUBE_MAP, ::GL_TEXTURE_3D, ::GL_TEXTURE_2D_ARRAY, + * or ::GL_RENDERBUFFER. + * + * The register flags \p flags specify the intended usage, as follows: + * - ::cudaGraphicsRegisterFlagsNone: Specifies no hints about how this + * resource will be used. It is therefore assumed that this resource will be + * read from and written to by CUDA. This is the default value. + * - ::cudaGraphicsRegisterFlagsReadOnly: Specifies that CUDA + * will not write to this resource. + * - ::cudaGraphicsRegisterFlagsWriteDiscard: Specifies that + * CUDA will not read from this resource and will write over the + * entire contents of the resource, so none of the data previously + * stored in the resource will be preserved. + * - ::cudaGraphicsRegisterFlagsSurfaceLoadStore: Specifies that CUDA will + * bind this resource to a surface reference. + * - ::cudaGraphicsRegisterFlagsTextureGather: Specifies that CUDA will perform + * texture gather operations on this resource. + * + * The following image formats are supported. For brevity's sake, the list is abbreviated. + * For ex., {GL_R, GL_RG} X {8, 16} would expand to the following 4 formats + * {GL_R8, GL_R16, GL_RG8, GL_RG16} : + * - GL_RED, GL_RG, GL_RGBA, GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY + * - {GL_R, GL_RG, GL_RGBA} X {8, 16, 16F, 32F, 8UI, 16UI, 32UI, 8I, 16I, 32I} + * - {GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY} X + * {8, 16, 16F_ARB, 32F_ARB, 8UI_EXT, 16UI_EXT, 32UI_EXT, 8I_EXT, 16I_EXT, 32I_EXT} + * + * The following image classes are currently disallowed: + * - Textures with borders + * - Multisampled renderbuffers + * + * \param resource - Pointer to the returned object handle + * \param image - name of texture or renderbuffer object to be registered + * \param target - Identifies the type of object specified by \p image + * \param flags - Register flags + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorOperatingSystem, + * ::cudaErrorUnknown + * \notefnerr + * + * \sa + * ::cudaGraphicsUnregisterResource, + * ::cudaGraphicsMapResources, + * ::cudaGraphicsSubResourceGetMappedArray, + * ::cuGraphicsGLRegisterImage + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphicsGLRegisterImage(struct cudaGraphicsResource **resource, GLuint image, GLenum target, unsigned int flags); + +/** + * \brief Registers an OpenGL buffer object + * + * Registers the buffer object specified by \p buffer for access by + * CUDA. A handle to the registered object is returned as \p + * resource. The register flags \p flags specify the intended usage, + * as follows: + * + * - ::cudaGraphicsRegisterFlagsNone: Specifies no hints about how this + * resource will be used. It is therefore assumed that this resource will be + * read from and written to by CUDA. This is the default value. + * - ::cudaGraphicsRegisterFlagsReadOnly: Specifies that CUDA + * will not write to this resource. + * - ::cudaGraphicsRegisterFlagsWriteDiscard: Specifies that + * CUDA will not read from this resource and will write over the + * entire contents of the resource, so none of the data previously + * stored in the resource will be preserved. + * + * \param resource - Pointer to the returned object handle + * \param buffer - name of buffer object to be registered + * \param flags - Register flags + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorOperatingSystem, + * ::cudaErrorUnknown + * \notefnerr + * + * \sa + * ::cudaGraphicsUnregisterResource, + * ::cudaGraphicsMapResources, + * ::cudaGraphicsResourceGetMappedPointer, + * ::cuGraphicsGLRegisterBuffer + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphicsGLRegisterBuffer(struct cudaGraphicsResource **resource, GLuint buffer, unsigned int flags); + +#ifdef _WIN32 +#ifndef WGL_NV_gpu_affinity +typedef void* HGPUNV; +#endif + +/** + * \brief Gets the CUDA device associated with hGpu + * + * Returns the CUDA device associated with a hGpu, if applicable. + * + * \param device - Returns the device associated with hGpu, or -1 if hGpu is + * not a compute device. + * \param hGpu - Handle to a GPU, as queried via WGL_NV_gpu_affinity + * + * \return + * ::cudaSuccess + * \notefnerr + * + * \sa + * ::WGL_NV_gpu_affinity, + * ::cuWGLGetDevice + */ +extern __host__ cudaError_t CUDARTAPI cudaWGLGetDevice(int *device, HGPUNV hGpu); +#endif + +/** @} */ /* END CUDART_OPENGL */ + +/** + * \addtogroup CUDART_OPENGL_DEPRECATED OpenGL Interoperability [DEPRECATED] + * This section describes deprecated OpenGL interoperability functionality. + * + * @{ + */ + +/** + * CUDA GL Map Flags + */ +enum cudaGLMapFlags +{ + cudaGLMapFlagsNone = 0, /**< Default; Assume resource can be read/written */ + cudaGLMapFlagsReadOnly = 1, /**< CUDA kernels will not write to this resource */ + cudaGLMapFlagsWriteDiscard = 2 /**< CUDA kernels will only write to and will not read from this resource */ +}; + +/** + * \brief Sets a CUDA device to use OpenGL interoperability + * + * \deprecated This function is deprecated as of CUDA 5.0. + * + * This function is deprecated and should no longer be used. It is + * no longer necessary to associate a CUDA device with an OpenGL + * context in order to achieve maximum interoperability performance. + * + * This function will immediately initialize the primary context on + * \p device if needed. + * + * \param device - Device to use for OpenGL interoperability + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorSetOnActiveProcess + * \notefnerr + * + * \sa ::cudaGraphicsGLRegisterBuffer, ::cudaGraphicsGLRegisterImage + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLSetGLDevice(int device); + +/** + * \brief Registers a buffer object for access by CUDA + * + * \deprecated This function is deprecated as of CUDA 3.0. + * + * Registers the buffer object of ID \p bufObj for access by + * CUDA. This function must be called before CUDA can map the buffer + * object. The OpenGL context used to create the buffer, or another + * context from the same share group, must be bound to the current + * thread when this is called. + * + * \param bufObj - Buffer object ID to register + * + * \return + * ::cudaSuccess, + * ::cudaErrorInitializationError + * \notefnerr + * + * \sa ::cudaGraphicsGLRegisterBuffer + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLRegisterBufferObject(GLuint bufObj); + +/** + * \brief Maps a buffer object for access by CUDA + * + * \deprecated This function is deprecated as of CUDA 3.0. + * + * Maps the buffer object of ID \p bufObj into the address space of + * CUDA and returns in \p *devPtr the base pointer of the resulting + * mapping. The buffer must have previously been registered by + * calling ::cudaGLRegisterBufferObject(). While a buffer is mapped + * by CUDA, any OpenGL operation which references the buffer will + * result in undefined behavior. The OpenGL context used to create + * the buffer, or another context from the same share group, must be + * bound to the current thread when this is called. + * + * All streams in the current thread are synchronized with the current + * GL context. + * + * \param devPtr - Returned device pointer to CUDA object + * \param bufObj - Buffer object ID to map + * + * \return + * ::cudaSuccess, + * ::cudaErrorMapBufferObjectFailed + * \notefnerr + * + * \sa ::cudaGraphicsMapResources + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLMapBufferObject(void **devPtr, GLuint bufObj); + +/** + * \brief Unmaps a buffer object for access by CUDA + * + * \deprecated This function is deprecated as of CUDA 3.0. + * + * Unmaps the buffer object of ID \p bufObj for access by CUDA. When + * a buffer is unmapped, the base address returned by + * ::cudaGLMapBufferObject() is invalid and subsequent references to + * the address result in undefined behavior. The OpenGL context used + * to create the buffer, or another context from the same share group, + * must be bound to the current thread when this is called. + * + * All streams in the current thread are synchronized with the current + * GL context. + * + * \param bufObj - Buffer object to unmap + * + * \return + * ::cudaSuccess, + * ::cudaErrorUnmapBufferObjectFailed + * \notefnerr + * + * \sa ::cudaGraphicsUnmapResources + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLUnmapBufferObject(GLuint bufObj); + +/** + * \brief Unregisters a buffer object for access by CUDA + * + * \deprecated This function is deprecated as of CUDA 3.0. + * + * Unregisters the buffer object of ID \p bufObj for access by CUDA + * and releases any CUDA resources associated with the buffer. Once a + * buffer is unregistered, it may no longer be mapped by CUDA. The GL + * context used to create the buffer, or another context from the + * same share group, must be bound to the current thread when this is + * called. + * + * \param bufObj - Buffer object to unregister + * + * \return + * ::cudaSuccess + * \notefnerr + * + * \sa ::cudaGraphicsUnregisterResource + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLUnregisterBufferObject(GLuint bufObj); + +/** + * \brief Set usage flags for mapping an OpenGL buffer + * + * \deprecated This function is deprecated as of CUDA 3.0. + * + * Set flags for mapping the OpenGL buffer \p bufObj + * + * Changes to flags will take effect the next time \p bufObj is mapped. + * The \p flags argument may be any of the following: + * + * - ::cudaGLMapFlagsNone: Specifies no hints about how this buffer will + * be used. It is therefore assumed that this buffer will be read from and + * written to by CUDA kernels. This is the default value. + * - ::cudaGLMapFlagsReadOnly: Specifies that CUDA kernels which access this + * buffer will not write to the buffer. + * - ::cudaGLMapFlagsWriteDiscard: Specifies that CUDA kernels which access + * this buffer will not read from the buffer and will write over the + * entire contents of the buffer, so none of the data previously stored in + * the buffer will be preserved. + * + * If \p bufObj has not been registered for use with CUDA, then + * ::cudaErrorInvalidResourceHandle is returned. If \p bufObj is presently + * mapped for access by CUDA, then ::cudaErrorUnknown is returned. + * + * \param bufObj - Registered buffer object to set flags for + * \param flags - Parameters for buffer mapping + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorUnknown + * \notefnerr + * + * \sa ::cudaGraphicsResourceSetMapFlags + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLSetBufferObjectMapFlags(GLuint bufObj, unsigned int flags); + +/** + * \brief Maps a buffer object for access by CUDA + * + * \deprecated This function is deprecated as of CUDA 3.0. + * + * Maps the buffer object of ID \p bufObj into the address space of + * CUDA and returns in \p *devPtr the base pointer of the resulting + * mapping. The buffer must have previously been registered by + * calling ::cudaGLRegisterBufferObject(). While a buffer is mapped + * by CUDA, any OpenGL operation which references the buffer will + * result in undefined behavior. The OpenGL context used to create + * the buffer, or another context from the same share group, must be + * bound to the current thread when this is called. + * + * Stream /p stream is synchronized with the current GL context. + * + * \param devPtr - Returned device pointer to CUDA object + * \param bufObj - Buffer object ID to map + * \param stream - Stream to synchronize + * + * \return + * ::cudaSuccess, + * ::cudaErrorMapBufferObjectFailed + * \notefnerr + * + * \sa ::cudaGraphicsMapResources + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLMapBufferObjectAsync(void **devPtr, GLuint bufObj, cudaStream_t stream); + +/** + * \brief Unmaps a buffer object for access by CUDA + * + * \deprecated This function is deprecated as of CUDA 3.0. + * + * Unmaps the buffer object of ID \p bufObj for access by CUDA. When + * a buffer is unmapped, the base address returned by + * ::cudaGLMapBufferObject() is invalid and subsequent references to + * the address result in undefined behavior. The OpenGL context used + * to create the buffer, or another context from the same share group, + * must be bound to the current thread when this is called. + * + * Stream /p stream is synchronized with the current GL context. + * + * \param bufObj - Buffer object to unmap + * \param stream - Stream to synchronize + * + * \return + * ::cudaSuccess, + * ::cudaErrorUnmapBufferObjectFailed + * \notefnerr + * + * \sa ::cudaGraphicsUnmapResources + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLUnmapBufferObjectAsync(GLuint bufObj, cudaStream_t stream); + +/** @} */ /* END CUDART_OPENGL_DEPRECATED */ + +#if defined(__cplusplus) +} +#endif /* __cplusplus */ + +#undef __CUDA_DEPRECATED + +#endif /* __CUDA_GL_INTEROP_H__ */ + diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_occupancy.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_occupancy.h new file mode 100644 index 0000000000000000000000000000000000000000..ffe55709f8ccdebf7341180f043006b68c08e104 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_occupancy.h @@ -0,0 +1,1958 @@ +/* + * Copyright 1993-2017 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/** + * CUDA Occupancy Calculator + * + * NAME + * + * cudaOccMaxActiveBlocksPerMultiprocessor, + * cudaOccMaxPotentialOccupancyBlockSize, + * cudaOccMaxPotentialOccupancyBlockSizeVariableSMem + * cudaOccAvailableDynamicSMemPerBlock + * + * DESCRIPTION + * + * The CUDA occupancy calculator provides a standalone, programmatical + * interface to compute the occupancy of a function on a device. It can also + * provide occupancy-oriented launch configuration suggestions. + * + * The function and device are defined by the user through + * cudaOccFuncAttributes, cudaOccDeviceProp, and cudaOccDeviceState + * structures. All APIs require all 3 of them. + * + * See the structure definition for more details about the device / function + * descriptors. + * + * See each API's prototype for API usage. + * + * COMPATIBILITY + * + * The occupancy calculator will be updated on each major CUDA toolkit + * release. It does not provide forward compatibility, i.e. new hardwares + * released after this implementation's release will not be supported. + * + * NOTE + * + * If there is access to CUDA runtime, and the sole intent is to calculate + * occupancy related values on one of the accessible CUDA devices, using CUDA + * runtime's occupancy calculation APIs is recommended. + * + */ + +#ifndef __cuda_occupancy_h__ +#define __cuda_occupancy_h__ + +#include +#include +#include + + +// __OCC_INLINE will be undefined at the end of this header +// +#ifdef __CUDACC__ +#define __OCC_INLINE inline __host__ __device__ +#elif defined _MSC_VER +#define __OCC_INLINE __inline +#else // GNUCC assumed +#define __OCC_INLINE inline +#endif + +enum cudaOccError_enum { + CUDA_OCC_SUCCESS = 0, // no error encountered + CUDA_OCC_ERROR_INVALID_INPUT = 1, // input parameter is invalid + CUDA_OCC_ERROR_UNKNOWN_DEVICE = 2, // requested device is not supported in + // current implementation or device is + // invalid +}; +typedef enum cudaOccError_enum cudaOccError; + +typedef struct cudaOccResult cudaOccResult; +typedef struct cudaOccDeviceProp cudaOccDeviceProp; +typedef struct cudaOccFuncAttributes cudaOccFuncAttributes; +typedef struct cudaOccDeviceState cudaOccDeviceState; + +/** + * The CUDA occupancy calculator computes the occupancy of the function + * described by attributes with the given block size (blockSize), static device + * properties (properties), dynamic device states (states) and per-block dynamic + * shared memory allocation (dynamicSMemSize) in bytes, and output it through + * result along with other useful information. The occupancy is computed in + * terms of the maximum number of active blocks per multiprocessor. The user can + * then convert it to other metrics, such as number of active warps. + * + * RETURN VALUE + * + * The occupancy and related information is returned through result. + * + * If result->activeBlocksPerMultiprocessor is 0, then the given parameter + * combination cannot run on the device. + * + * ERRORS + * + * CUDA_OCC_ERROR_INVALID_INPUT input parameter is invalid. + * CUDA_OCC_ERROR_UNKNOWN_DEVICE requested device is not supported in + * current implementation or device is invalid + */ +static __OCC_INLINE +cudaOccError cudaOccMaxActiveBlocksPerMultiprocessor( + cudaOccResult *result, // out + const cudaOccDeviceProp *properties, // in + const cudaOccFuncAttributes *attributes, // in + const cudaOccDeviceState *state, // in + int blockSize, // in + size_t dynamicSmemSize); // in + +/** + * The CUDA launch configurator C API suggests a grid / block size pair (in + * minGridSize and blockSize) that achieves the best potential occupancy + * (i.e. maximum number of active warps with the smallest number of blocks) for + * the given function described by attributes, on a device described by + * properties with settings in state. + * + * If per-block dynamic shared memory allocation is not needed, the user should + * leave both blockSizeToDynamicSMemSize and dynamicSMemSize as 0. + * + * If per-block dynamic shared memory allocation is needed, then if the dynamic + * shared memory size is constant regardless of block size, the size should be + * passed through dynamicSMemSize, and blockSizeToDynamicSMemSize should be + * NULL. + * + * Otherwise, if the per-block dynamic shared memory size varies with different + * block sizes, the user needs to provide a pointer to an unary function through + * blockSizeToDynamicSMemSize that computes the dynamic shared memory needed by + * a block of the function for any given block size. dynamicSMemSize is + * ignored. An example signature is: + * + * // Take block size, returns dynamic shared memory needed + * size_t blockToSmem(int blockSize); + * + * RETURN VALUE + * + * The suggested block size and the minimum number of blocks needed to achieve + * the maximum occupancy are returned through blockSize and minGridSize. + * + * If *blockSize is 0, then the given combination cannot run on the device. + * + * ERRORS + * + * CUDA_OCC_ERROR_INVALID_INPUT input parameter is invalid. + * CUDA_OCC_ERROR_UNKNOWN_DEVICE requested device is not supported in + * current implementation or device is invalid + * + */ +static __OCC_INLINE +cudaOccError cudaOccMaxPotentialOccupancyBlockSize( + int *minGridSize, // out + int *blockSize, // out + const cudaOccDeviceProp *properties, // in + const cudaOccFuncAttributes *attributes, // in + const cudaOccDeviceState *state, // in + size_t (*blockSizeToDynamicSMemSize)(int), // in + size_t dynamicSMemSize); // in + +/** + * The CUDA launch configurator C++ API suggests a grid / block size pair (in + * minGridSize and blockSize) that achieves the best potential occupancy + * (i.e. the maximum number of active warps with the smallest number of blocks) + * for the given function described by attributes, on a device described by + * properties with settings in state. + * + * If per-block dynamic shared memory allocation is 0 or constant regardless of + * block size, the user can use cudaOccMaxPotentialOccupancyBlockSize to + * configure the launch. A constant dynamic shared memory allocation size in + * bytes can be passed through dynamicSMemSize. + * + * Otherwise, if the per-block dynamic shared memory size varies with different + * block sizes, the user needs to use + * cudaOccMaxPotentialOccupancyBlockSizeVariableSmem instead, and provide a + * functor / pointer to an unary function (blockSizeToDynamicSMemSize) that + * computes the dynamic shared memory needed by func for any given block + * size. An example signature is: + * + * // Take block size, returns per-block dynamic shared memory needed + * size_t blockToSmem(int blockSize); + * + * RETURN VALUE + * + * The suggested block size and the minimum number of blocks needed to achieve + * the maximum occupancy are returned through blockSize and minGridSize. + * + * If *blockSize is 0, then the given combination cannot run on the device. + * + * ERRORS + * + * CUDA_OCC_ERROR_INVALID_INPUT input parameter is invalid. + * CUDA_OCC_ERROR_UNKNOWN_DEVICE requested device is not supported in + * current implementation or device is invalid + * + */ + +#if defined(__cplusplus) +namespace { + +__OCC_INLINE +cudaOccError cudaOccMaxPotentialOccupancyBlockSize( + int *minGridSize, // out + int *blockSize, // out + const cudaOccDeviceProp *properties, // in + const cudaOccFuncAttributes *attributes, // in + const cudaOccDeviceState *state, // in + size_t dynamicSMemSize = 0); // in + +template +__OCC_INLINE +cudaOccError cudaOccMaxPotentialOccupancyBlockSizeVariableSMem( + int *minGridSize, // out + int *blockSize, // out + const cudaOccDeviceProp *properties, // in + const cudaOccFuncAttributes *attributes, // in + const cudaOccDeviceState *state, // in + UnaryFunction blockSizeToDynamicSMemSize); // in + +} // namespace anonymous +#endif // defined(__cplusplus) + +/** + * + * The CUDA dynamic shared memory calculator computes the maximum size of + * per-block dynamic shared memory if we want to place numBlocks blocks + * on an SM. + * + * RETURN VALUE + * + * Returns in *dynamicSmemSize the maximum size of dynamic shared memory to allow + * numBlocks blocks per SM. + * + * ERRORS + * + * CUDA_OCC_ERROR_INVALID_INPUT input parameter is invalid. + * CUDA_OCC_ERROR_UNKNOWN_DEVICE requested device is not supported in + * current implementation or device is invalid + * + */ +static __OCC_INLINE +cudaOccError cudaOccAvailableDynamicSMemPerBlock( + size_t *dynamicSmemSize, + const cudaOccDeviceProp *properties, + const cudaOccFuncAttributes *attributes, + const cudaOccDeviceState *state, + int numBlocks, + int blockSize); + +/** + * Data structures + * + * These structures are subject to change for future architecture and CUDA + * releases. C users should initialize the structure as {0}. + * + */ + +/** + * Device descriptor + * + * This structure describes a device. + */ +struct cudaOccDeviceProp { + int computeMajor; // Compute capability major version + int computeMinor; // Compute capability minor + // version. None supported minor version + // may cause error + int maxThreadsPerBlock; // Maximum number of threads per block + int maxThreadsPerMultiprocessor; // Maximum number of threads per SM + // i.e. (Max. number of warps) x (warp + // size) + int regsPerBlock; // Maximum number of registers per block + int regsPerMultiprocessor; // Maximum number of registers per SM + int warpSize; // Warp size + size_t sharedMemPerBlock; // Maximum shared memory size per block + size_t sharedMemPerMultiprocessor; // Maximum shared memory size per SM + int numSms; // Number of SMs available + size_t sharedMemPerBlockOptin; // Maximum optin shared memory size per block + size_t reservedSharedMemPerBlock; // Shared memory per block reserved by driver + +#ifdef __cplusplus + // This structure can be converted from a cudaDeviceProp structure for users + // that use this header in their CUDA applications. + // + // If the application have access to the CUDA Runtime API, the application + // can obtain the device properties of a CUDA device through + // cudaGetDeviceProperties, and initialize a cudaOccDeviceProp with the + // cudaDeviceProp structure. + // + // Example: + /* + { + cudaDeviceProp prop; + + cudaGetDeviceProperties(&prop, ...); + + cudaOccDeviceProp occProp = prop; + + ... + + cudaOccMaxPotentialOccupancyBlockSize(..., &occProp, ...); + } + */ + // + template + __OCC_INLINE + cudaOccDeviceProp(const DeviceProp &props) + : computeMajor (props.major), + computeMinor (props.minor), + maxThreadsPerBlock (props.maxThreadsPerBlock), + maxThreadsPerMultiprocessor (props.maxThreadsPerMultiProcessor), + regsPerBlock (props.regsPerBlock), + regsPerMultiprocessor (props.regsPerMultiprocessor), + warpSize (props.warpSize), + sharedMemPerBlock (props.sharedMemPerBlock), + sharedMemPerMultiprocessor (props.sharedMemPerMultiprocessor), + numSms (props.multiProcessorCount), + sharedMemPerBlockOptin (props.sharedMemPerBlockOptin), + reservedSharedMemPerBlock (props.reservedSharedMemPerBlock) + {} + + __OCC_INLINE + cudaOccDeviceProp() + : computeMajor (0), + computeMinor (0), + maxThreadsPerBlock (0), + maxThreadsPerMultiprocessor (0), + regsPerBlock (0), + regsPerMultiprocessor (0), + warpSize (0), + sharedMemPerBlock (0), + sharedMemPerMultiprocessor (0), + numSms (0), + sharedMemPerBlockOptin (0), + reservedSharedMemPerBlock (0) + {} +#endif // __cplusplus +}; + +/** + * Partitioned global caching option + */ +typedef enum cudaOccPartitionedGCConfig_enum { + PARTITIONED_GC_OFF, // Disable partitioned global caching + PARTITIONED_GC_ON, // Prefer partitioned global caching + PARTITIONED_GC_ON_STRICT // Force partitioned global caching +} cudaOccPartitionedGCConfig; + +/** + * Per function opt in maximum dynamic shared memory limit + */ +typedef enum cudaOccFuncShmemConfig_enum { + FUNC_SHMEM_LIMIT_DEFAULT, // Default shmem limit + FUNC_SHMEM_LIMIT_OPTIN, // Use the optin shmem limit +} cudaOccFuncShmemConfig; + +/** + * Function descriptor + * + * This structure describes a CUDA function. + */ +struct cudaOccFuncAttributes { + int maxThreadsPerBlock; // Maximum block size the function can work with. If + // unlimited, use INT_MAX or any value greater than + // or equal to maxThreadsPerBlock of the device + int numRegs; // Number of registers used. When the function is + // launched on device, the register count may change + // due to internal tools requirements. + size_t sharedSizeBytes; // Number of static shared memory used + + cudaOccPartitionedGCConfig partitionedGCConfig; + // Partitioned global caching is required to enable + // caching on certain chips, such as sm_52 + // devices. Partitioned global caching can be + // automatically disabled if the occupancy + // requirement of the launch cannot support caching. + // + // To override this behavior with caching on and + // calculate occupancy strictly according to the + // preference, set partitionedGCConfig to + // PARTITIONED_GC_ON_STRICT. This is especially + // useful for experimenting and finding launch + // configurations (MaxPotentialOccupancyBlockSize) + // that allow global caching to take effect. + // + // This flag only affects the occupancy calculation. + + cudaOccFuncShmemConfig shmemLimitConfig; + // Certain chips like sm_70 allow a user to opt into + // a higher per block limit of dynamic shared memory + // This optin is performed on a per function basis + // using the cuFuncSetAttribute function + + size_t maxDynamicSharedSizeBytes; + // User set limit on maximum dynamic shared memory + // usable by the kernel + // This limit is set using the cuFuncSetAttribute + // function. + + int numBlockBarriers; // Number of block barriers used (default to 1) +#ifdef __cplusplus + // This structure can be converted from a cudaFuncAttributes structure for + // users that use this header in their CUDA applications. + // + // If the application have access to the CUDA Runtime API, the application + // can obtain the function attributes of a CUDA kernel function through + // cudaFuncGetAttributes, and initialize a cudaOccFuncAttributes with the + // cudaFuncAttributes structure. + // + // Example: + /* + __global__ void foo() {...} + + ... + + { + cudaFuncAttributes attr; + + cudaFuncGetAttributes(&attr, foo); + + cudaOccFuncAttributes occAttr = attr; + + ... + + cudaOccMaxPotentialOccupancyBlockSize(..., &occAttr, ...); + } + */ + // + template + __OCC_INLINE + cudaOccFuncAttributes(const FuncAttributes &attr) + : maxThreadsPerBlock (attr.maxThreadsPerBlock), + numRegs (attr.numRegs), + sharedSizeBytes (attr.sharedSizeBytes), + partitionedGCConfig (PARTITIONED_GC_OFF), + shmemLimitConfig (FUNC_SHMEM_LIMIT_OPTIN), + maxDynamicSharedSizeBytes (attr.maxDynamicSharedSizeBytes), + numBlockBarriers (1) + {} + + __OCC_INLINE + cudaOccFuncAttributes() + : maxThreadsPerBlock (0), + numRegs (0), + sharedSizeBytes (0), + partitionedGCConfig (PARTITIONED_GC_OFF), + shmemLimitConfig (FUNC_SHMEM_LIMIT_DEFAULT), + maxDynamicSharedSizeBytes (0), + numBlockBarriers (0) + {} +#endif +}; + +typedef enum cudaOccCacheConfig_enum { + CACHE_PREFER_NONE = 0x00, // no preference for shared memory or L1 (default) + CACHE_PREFER_SHARED = 0x01, // prefer larger shared memory and smaller L1 cache + CACHE_PREFER_L1 = 0x02, // prefer larger L1 cache and smaller shared memory + CACHE_PREFER_EQUAL = 0x03 // prefer equal sized L1 cache and shared memory +} cudaOccCacheConfig; + +typedef enum cudaOccCarveoutConfig_enum { + SHAREDMEM_CARVEOUT_DEFAULT = -1, // no preference for shared memory or L1 (default) + SHAREDMEM_CARVEOUT_MAX_SHARED = 100, // prefer maximum available shared memory, minimum L1 cache + SHAREDMEM_CARVEOUT_MAX_L1 = 0, // prefer maximum available L1 cache, minimum shared memory + SHAREDMEM_CARVEOUT_HALF = 50 // prefer half of maximum available shared memory, with the rest as L1 cache +} cudaOccCarveoutConfig; + +/** + * Device state descriptor + * + * This structure describes device settings that affect occupancy calculation. + */ +struct cudaOccDeviceState +{ + // Cache / shared memory split preference. Deprecated on Volta + cudaOccCacheConfig cacheConfig; + // Shared memory / L1 split preference. Supported on only Volta + int carveoutConfig; + +#ifdef __cplusplus + __OCC_INLINE + cudaOccDeviceState() + : cacheConfig (CACHE_PREFER_NONE), + carveoutConfig (SHAREDMEM_CARVEOUT_DEFAULT) + {} +#endif +}; + +typedef enum cudaOccLimitingFactor_enum { + // Occupancy limited due to: + OCC_LIMIT_WARPS = 0x01, // - warps available + OCC_LIMIT_REGISTERS = 0x02, // - registers available + OCC_LIMIT_SHARED_MEMORY = 0x04, // - shared memory available + OCC_LIMIT_BLOCKS = 0x08, // - blocks available + OCC_LIMIT_BARRIERS = 0x10 // - barrier available +} cudaOccLimitingFactor; + +/** + * Occupancy output + * + * This structure contains occupancy calculator's output. + */ +struct cudaOccResult { + int activeBlocksPerMultiprocessor; // Occupancy + unsigned int limitingFactors; // Factors that limited occupancy. A bit + // field that counts the limiting + // factors, see cudaOccLimitingFactor + int blockLimitRegs; // Occupancy due to register + // usage, INT_MAX if the kernel does not + // use any register. + int blockLimitSharedMem; // Occupancy due to shared memory + // usage, INT_MAX if the kernel does not + // use shared memory. + int blockLimitWarps; // Occupancy due to block size limit + int blockLimitBlocks; // Occupancy due to maximum number of blocks + // managable per SM + int blockLimitBarriers; // Occupancy due to block barrier usage + int allocatedRegistersPerBlock; // Actual number of registers allocated per + // block + size_t allocatedSharedMemPerBlock; // Actual size of shared memory allocated + // per block + cudaOccPartitionedGCConfig partitionedGCConfig; + // Report if partitioned global caching + // is actually enabled. +}; + +/** + * Partitioned global caching support + * + * See cudaOccPartitionedGlobalCachingModeSupport + */ +typedef enum cudaOccPartitionedGCSupport_enum { + PARTITIONED_GC_NOT_SUPPORTED, // Partitioned global caching is not supported + PARTITIONED_GC_SUPPORTED, // Partitioned global caching is supported +} cudaOccPartitionedGCSupport; + +/** + * Implementation + */ + +/** + * Max compute capability supported + */ +#define __CUDA_OCC_MAJOR__ 9 +#define __CUDA_OCC_MINOR__ 0 + +////////////////////////////////////////// +// Mathematical Helper Functions // +////////////////////////////////////////// + +static __OCC_INLINE int __occMin(int lhs, int rhs) +{ + return rhs < lhs ? rhs : lhs; +} + +static __OCC_INLINE int __occDivideRoundUp(int x, int y) +{ + return (x + (y - 1)) / y; +} + +static __OCC_INLINE int __occRoundUp(int x, int y) +{ + return y * __occDivideRoundUp(x, y); +} + +////////////////////////////////////////// +// Architectural Properties // +////////////////////////////////////////// + +/** + * Granularity of shared memory allocation + */ +static __OCC_INLINE cudaOccError cudaOccSMemAllocationGranularity(int *limit, const cudaOccDeviceProp *properties) +{ + int value; + + switch(properties->computeMajor) { + case 3: + case 5: + case 6: + case 7: + value = 256; + break; + case 8: + case 9: + value = 128; + break; + default: + return CUDA_OCC_ERROR_UNKNOWN_DEVICE; + } + + *limit = value; + + return CUDA_OCC_SUCCESS; +} + +/** + * Maximum number of registers per thread + */ +static __OCC_INLINE cudaOccError cudaOccRegAllocationMaxPerThread(int *limit, const cudaOccDeviceProp *properties) +{ + int value; + + switch(properties->computeMajor) { + case 3: + case 5: + case 6: + value = 255; + break; + case 7: + case 8: + case 9: + value = 256; + break; + default: + return CUDA_OCC_ERROR_UNKNOWN_DEVICE; + } + + *limit = value; + + return CUDA_OCC_SUCCESS; +} + +/** + * Granularity of register allocation + */ +static __OCC_INLINE cudaOccError cudaOccRegAllocationGranularity(int *limit, const cudaOccDeviceProp *properties) +{ + int value; + + switch(properties->computeMajor) { + case 3: + case 5: + case 6: + case 7: + case 8: + case 9: + value = 256; + break; + default: + return CUDA_OCC_ERROR_UNKNOWN_DEVICE; + } + + *limit = value; + + return CUDA_OCC_SUCCESS; +} + +/** + * Number of sub-partitions + */ +static __OCC_INLINE cudaOccError cudaOccSubPartitionsPerMultiprocessor(int *limit, const cudaOccDeviceProp *properties) +{ + int value; + + switch(properties->computeMajor) { + case 3: + case 5: + case 7: + case 8: + case 9: + value = 4; + break; + case 6: + value = properties->computeMinor ? 4 : 2; + break; + default: + return CUDA_OCC_ERROR_UNKNOWN_DEVICE; + } + + *limit = value; + + return CUDA_OCC_SUCCESS; +} + + +/** + * Maximum number of blocks that can run simultaneously on a multiprocessor + */ +static __OCC_INLINE cudaOccError cudaOccMaxBlocksPerMultiprocessor(int* limit, const cudaOccDeviceProp *properties) +{ + int value; + + switch(properties->computeMajor) { + case 3: + value = 16; + break; + case 5: + case 6: + value = 32; + break; + case 7: { + int isTuring = properties->computeMinor == 5; + value = (isTuring) ? 16 : 32; + break; + } + case 8: + if (properties->computeMinor == 0) { + value = 32; + } + else if (properties->computeMinor == 9) { + value = 24; + } + else { + value = 16; + } + break; + case 9: + value = 32; + break; + default: + return CUDA_OCC_ERROR_UNKNOWN_DEVICE; + } + + *limit = value; + + return CUDA_OCC_SUCCESS; +} + +/** + * Align up shared memory based on compute major configurations + */ +static __OCC_INLINE cudaOccError cudaOccAlignUpShmemSizeVoltaPlus(size_t *shMemSize, const cudaOccDeviceProp *properties) +{ + // Volta and Turing have shared L1 cache / shared memory, and support cache + // configuration to trade one for the other. These values are needed to + // map carveout config ratio to the next available architecture size + size_t size = *shMemSize; + + switch (properties->computeMajor) { + case 7: { + // Turing supports 32KB and 64KB shared mem. + int isTuring = properties->computeMinor == 5; + if (isTuring) { + if (size <= 32 * 1024) { + *shMemSize = 32 * 1024; + } + else if (size <= 64 * 1024) { + *shMemSize = 64 * 1024; + } + else { + return CUDA_OCC_ERROR_INVALID_INPUT; + } + } + // Volta supports 0KB, 8KB, 16KB, 32KB, 64KB, and 96KB shared mem. + else { + if (size == 0) { + *shMemSize = 0; + } + else if (size <= 8 * 1024) { + *shMemSize = 8 * 1024; + } + else if (size <= 16 * 1024) { + *shMemSize = 16 * 1024; + } + else if (size <= 32 * 1024) { + *shMemSize = 32 * 1024; + } + else if (size <= 64 * 1024) { + *shMemSize = 64 * 1024; + } + else if (size <= 96 * 1024) { + *shMemSize = 96 * 1024; + } + else { + return CUDA_OCC_ERROR_INVALID_INPUT; + } + } + break; + } + case 8: + if (properties->computeMinor == 0 || properties->computeMinor == 7) { + if (size == 0) { + *shMemSize = 0; + } + else if (size <= 8 * 1024) { + *shMemSize = 8 * 1024; + } + else if (size <= 16 * 1024) { + *shMemSize = 16 * 1024; + } + else if (size <= 32 * 1024) { + *shMemSize = 32 * 1024; + } + else if (size <= 64 * 1024) { + *shMemSize = 64 * 1024; + } + else if (size <= 100 * 1024) { + *shMemSize = 100 * 1024; + } + else if (size <= 132 * 1024) { + *shMemSize = 132 * 1024; + } + else if (size <= 164 * 1024) { + *shMemSize = 164 * 1024; + } + else { + return CUDA_OCC_ERROR_INVALID_INPUT; + } + } + else { + if (size == 0) { + *shMemSize = 0; + } + else if (size <= 8 * 1024) { + *shMemSize = 8 * 1024; + } + else if (size <= 16 * 1024) { + *shMemSize = 16 * 1024; + } + else if (size <= 32 * 1024) { + *shMemSize = 32 * 1024; + } + else if (size <= 64 * 1024) { + *shMemSize = 64 * 1024; + } + else if (size <= 100 * 1024) { + *shMemSize = 100 * 1024; + } + else { + return CUDA_OCC_ERROR_INVALID_INPUT; + } + } + break; + case 9: { + if (size == 0) { + *shMemSize = 0; + } + else if (size <= 8 * 1024) { + *shMemSize = 8 * 1024; + } + else if (size <= 16 * 1024) { + *shMemSize = 16 * 1024; + } + else if (size <= 32 * 1024) { + *shMemSize = 32 * 1024; + } + else if (size <= 64 * 1024) { + *shMemSize = 64 * 1024; + } + else if (size <= 100 * 1024) { + *shMemSize = 100 * 1024; + } + else if (size <= 132 * 1024) { + *shMemSize = 132 * 1024; + } + else if (size <= 164 * 1024) { + *shMemSize = 164 * 1024; + } + else if (size <= 196 * 1024) { + *shMemSize = 196 * 1024; + } + else if (size <= 228 * 1024) { + *shMemSize = 228 * 1024; + } + else { + return CUDA_OCC_ERROR_INVALID_INPUT; + } + break; + } + default: + return CUDA_OCC_ERROR_UNKNOWN_DEVICE; + } + + return CUDA_OCC_SUCCESS; +} + +/** + * Shared memory based on the new carveoutConfig API introduced with Volta + */ +static __OCC_INLINE cudaOccError cudaOccSMemPreferenceVoltaPlus(size_t *limit, const cudaOccDeviceProp *properties, const cudaOccDeviceState *state) +{ + cudaOccError status = CUDA_OCC_SUCCESS; + size_t preferenceShmemSize; + + // CUDA 9.0 introduces a new API to set shared memory - L1 configuration on supported + // devices. This preference will take precedence over the older cacheConfig setting. + // Map cacheConfig to its effective preference value. + int effectivePreference = state->carveoutConfig; + if ((effectivePreference < SHAREDMEM_CARVEOUT_DEFAULT) || (effectivePreference > SHAREDMEM_CARVEOUT_MAX_SHARED)) { + return CUDA_OCC_ERROR_INVALID_INPUT; + } + + if (effectivePreference == SHAREDMEM_CARVEOUT_DEFAULT) { + switch (state->cacheConfig) + { + case CACHE_PREFER_L1: + effectivePreference = SHAREDMEM_CARVEOUT_MAX_L1; + break; + case CACHE_PREFER_SHARED: + effectivePreference = SHAREDMEM_CARVEOUT_MAX_SHARED; + break; + case CACHE_PREFER_EQUAL: + effectivePreference = SHAREDMEM_CARVEOUT_HALF; + break; + default: + effectivePreference = SHAREDMEM_CARVEOUT_DEFAULT; + break; + } + } + + if (effectivePreference == SHAREDMEM_CARVEOUT_DEFAULT) { + preferenceShmemSize = properties->sharedMemPerMultiprocessor; + } + else { + preferenceShmemSize = (size_t) (effectivePreference * properties->sharedMemPerMultiprocessor) / 100; + } + + status = cudaOccAlignUpShmemSizeVoltaPlus(&preferenceShmemSize, properties); + *limit = preferenceShmemSize; + return status; +} + +/** + * Shared memory based on the cacheConfig + */ +static __OCC_INLINE cudaOccError cudaOccSMemPreference(size_t *limit, const cudaOccDeviceProp *properties, const cudaOccDeviceState *state) +{ + size_t bytes = 0; + size_t sharedMemPerMultiprocessorHigh = properties->sharedMemPerMultiprocessor; + cudaOccCacheConfig cacheConfig = state->cacheConfig; + + // Kepler has shared L1 cache / shared memory, and support cache + // configuration to trade one for the other. These values are needed to + // calculate the correct shared memory size for user requested cache + // configuration. + // + size_t minCacheSize = 16384; + size_t maxCacheSize = 49152; + size_t cacheAndSharedTotal = sharedMemPerMultiprocessorHigh + minCacheSize; + size_t sharedMemPerMultiprocessorLow = cacheAndSharedTotal - maxCacheSize; + + switch (properties->computeMajor) { + case 3: + // Kepler supports 16KB, 32KB, or 48KB partitions for L1. The rest + // is shared memory. + // + switch (cacheConfig) { + default : + case CACHE_PREFER_NONE: + case CACHE_PREFER_SHARED: + bytes = sharedMemPerMultiprocessorHigh; + break; + case CACHE_PREFER_L1: + bytes = sharedMemPerMultiprocessorLow; + break; + case CACHE_PREFER_EQUAL: + // Equal is the mid-point between high and low. It should be + // equivalent to low + 16KB. + // + bytes = (sharedMemPerMultiprocessorHigh + sharedMemPerMultiprocessorLow) / 2; + break; + } + break; + case 5: + case 6: + // Maxwell and Pascal have dedicated shared memory. + // + bytes = sharedMemPerMultiprocessorHigh; + break; + default: + return CUDA_OCC_ERROR_UNKNOWN_DEVICE; + } + + *limit = bytes; + + return CUDA_OCC_SUCCESS; +} + +/** + * Shared memory based on config requested by User + */ +static __OCC_INLINE cudaOccError cudaOccSMemPerMultiprocessor(size_t *limit, const cudaOccDeviceProp *properties, const cudaOccDeviceState *state) +{ + // Volta introduces a new API that allows for shared memory carveout preference. Because it is a shared memory preference, + // it is handled separately from the cache config preference. + if (properties->computeMajor >= 7) { + return cudaOccSMemPreferenceVoltaPlus(limit, properties, state); + } + return cudaOccSMemPreference(limit, properties, state); +} + +/** + * Return the per block shared memory limit based on function config + */ +static __OCC_INLINE cudaOccError cudaOccSMemPerBlock(size_t *limit, const cudaOccDeviceProp *properties, cudaOccFuncShmemConfig shmemLimitConfig, size_t smemPerCta) +{ + switch (properties->computeMajor) { + case 2: + case 3: + case 4: + case 5: + case 6: + *limit = properties->sharedMemPerBlock; + break; + case 7: + case 8: + case 9: + switch (shmemLimitConfig) { + default: + case FUNC_SHMEM_LIMIT_DEFAULT: + *limit = properties->sharedMemPerBlock; + break; + case FUNC_SHMEM_LIMIT_OPTIN: + if (smemPerCta > properties->sharedMemPerBlock) { + *limit = properties->sharedMemPerBlockOptin; + } + else { + *limit = properties->sharedMemPerBlock; + } + break; + } + break; + default: + return CUDA_OCC_ERROR_UNKNOWN_DEVICE; + } + + // Starting Ampere, CUDA driver reserves additional shared memory per block + if (properties->computeMajor >= 8) { + *limit += properties->reservedSharedMemPerBlock; + } + + return CUDA_OCC_SUCCESS; +} + +/** + * Partitioned global caching mode support + */ +static __OCC_INLINE cudaOccError cudaOccPartitionedGlobalCachingModeSupport(cudaOccPartitionedGCSupport *limit, const cudaOccDeviceProp *properties) +{ + *limit = PARTITIONED_GC_NOT_SUPPORTED; + + if ((properties->computeMajor == 5 && (properties->computeMinor == 2 || properties->computeMinor == 3)) || + properties->computeMajor == 6) { + *limit = PARTITIONED_GC_SUPPORTED; + } + + if (properties->computeMajor == 6 && properties->computeMinor == 0) { + *limit = PARTITIONED_GC_NOT_SUPPORTED; + } + + return CUDA_OCC_SUCCESS; +} + +/////////////////////////////////////////////// +// User Input Sanity // +/////////////////////////////////////////////// + +static __OCC_INLINE cudaOccError cudaOccDevicePropCheck(const cudaOccDeviceProp *properties) +{ + // Verify device properties + // + // Each of these limits must be a positive number. + // + // Compute capacity is checked during the occupancy calculation + // + if (properties->maxThreadsPerBlock <= 0 || + properties->maxThreadsPerMultiprocessor <= 0 || + properties->regsPerBlock <= 0 || + properties->regsPerMultiprocessor <= 0 || + properties->warpSize <= 0 || + properties->sharedMemPerBlock <= 0 || + properties->sharedMemPerMultiprocessor <= 0 || + properties->numSms <= 0) { + return CUDA_OCC_ERROR_INVALID_INPUT; + } + + return CUDA_OCC_SUCCESS; +} + +static __OCC_INLINE cudaOccError cudaOccFuncAttributesCheck(const cudaOccFuncAttributes *attributes) +{ + // Verify function attributes + // + if (attributes->maxThreadsPerBlock <= 0 || + attributes->numRegs < 0) { // Compiler may choose not to use + // any register (empty kernels, + // etc.) + return CUDA_OCC_ERROR_INVALID_INPUT; + } + + return CUDA_OCC_SUCCESS; +} + +static __OCC_INLINE cudaOccError cudaOccDeviceStateCheck(const cudaOccDeviceState *state) +{ + (void)state; // silence unused-variable warning + // Placeholder + // + + return CUDA_OCC_SUCCESS; +} + +static __OCC_INLINE cudaOccError cudaOccInputCheck( + const cudaOccDeviceProp *properties, + const cudaOccFuncAttributes *attributes, + const cudaOccDeviceState *state) +{ + cudaOccError status = CUDA_OCC_SUCCESS; + + status = cudaOccDevicePropCheck(properties); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + status = cudaOccFuncAttributesCheck(attributes); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + status = cudaOccDeviceStateCheck(state); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + return status; +} + +/////////////////////////////////////////////// +// Occupancy calculation Functions // +/////////////////////////////////////////////// + +static __OCC_INLINE cudaOccPartitionedGCConfig cudaOccPartitionedGCExpected( + const cudaOccDeviceProp *properties, + const cudaOccFuncAttributes *attributes) +{ + cudaOccPartitionedGCSupport gcSupport; + cudaOccPartitionedGCConfig gcConfig; + + cudaOccPartitionedGlobalCachingModeSupport(&gcSupport, properties); + + gcConfig = attributes->partitionedGCConfig; + + if (gcSupport == PARTITIONED_GC_NOT_SUPPORTED) { + gcConfig = PARTITIONED_GC_OFF; + } + + return gcConfig; +} + +// Warp limit +// +static __OCC_INLINE cudaOccError cudaOccMaxBlocksPerSMWarpsLimit( + int *limit, + cudaOccPartitionedGCConfig gcConfig, + const cudaOccDeviceProp *properties, + const cudaOccFuncAttributes *attributes, + int blockSize) +{ + cudaOccError status = CUDA_OCC_SUCCESS; + int maxWarpsPerSm; + int warpsAllocatedPerCTA; + int maxBlocks; + (void)attributes; // silence unused-variable warning + + if (blockSize > properties->maxThreadsPerBlock) { + maxBlocks = 0; + } + else { + maxWarpsPerSm = properties->maxThreadsPerMultiprocessor / properties->warpSize; + warpsAllocatedPerCTA = __occDivideRoundUp(blockSize, properties->warpSize); + maxBlocks = 0; + + if (gcConfig != PARTITIONED_GC_OFF) { + int maxBlocksPerSmPartition; + int maxWarpsPerSmPartition; + + // If partitioned global caching is on, then a CTA can only use a SM + // partition (a half SM), and thus a half of the warp slots + // available per SM + // + maxWarpsPerSmPartition = maxWarpsPerSm / 2; + maxBlocksPerSmPartition = maxWarpsPerSmPartition / warpsAllocatedPerCTA; + maxBlocks = maxBlocksPerSmPartition * 2; + } + // On hardware that supports partitioned global caching, each half SM is + // guaranteed to support at least 32 warps (maximum number of warps of a + // CTA), so caching will not cause 0 occupancy due to insufficient warp + // allocation slots. + // + else { + maxBlocks = maxWarpsPerSm / warpsAllocatedPerCTA; + } + } + + *limit = maxBlocks; + + return status; +} + +// Shared memory limit +// +static __OCC_INLINE cudaOccError cudaOccMaxBlocksPerSMSmemLimit( + int *limit, + cudaOccResult *result, + const cudaOccDeviceProp *properties, + const cudaOccFuncAttributes *attributes, + const cudaOccDeviceState *state, + int blockSize, + size_t dynamicSmemSize) +{ + cudaOccError status = CUDA_OCC_SUCCESS; + int allocationGranularity; + size_t userSmemPreference = 0; + size_t totalSmemUsagePerCTA; + size_t maxSmemUsagePerCTA; + size_t smemAllocatedPerCTA; + size_t staticSmemSize; + size_t sharedMemPerMultiprocessor; + size_t smemLimitPerCTA; + int maxBlocks; + int dynamicSmemSizeExceeded = 0; + int totalSmemSizeExceeded = 0; + (void)blockSize; // silence unused-variable warning + + status = cudaOccSMemAllocationGranularity(&allocationGranularity, properties); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + // Obtain the user preferred shared memory size. This setting is ignored if + // user requests more shared memory than preferred. + // + status = cudaOccSMemPerMultiprocessor(&userSmemPreference, properties, state); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + staticSmemSize = attributes->sharedSizeBytes + properties->reservedSharedMemPerBlock; + totalSmemUsagePerCTA = staticSmemSize + dynamicSmemSize; + smemAllocatedPerCTA = __occRoundUp((int)totalSmemUsagePerCTA, (int)allocationGranularity); + + maxSmemUsagePerCTA = staticSmemSize + attributes->maxDynamicSharedSizeBytes; + + dynamicSmemSizeExceeded = 0; + totalSmemSizeExceeded = 0; + + // Obtain the user set maximum dynamic size if it exists + // If so, the current launch dynamic shared memory must not + // exceed the set limit + if (attributes->shmemLimitConfig != FUNC_SHMEM_LIMIT_DEFAULT && + dynamicSmemSize > attributes->maxDynamicSharedSizeBytes) { + dynamicSmemSizeExceeded = 1; + } + + status = cudaOccSMemPerBlock(&smemLimitPerCTA, properties, attributes->shmemLimitConfig, maxSmemUsagePerCTA); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + if (smemAllocatedPerCTA > smemLimitPerCTA) { + totalSmemSizeExceeded = 1; + } + + if (dynamicSmemSizeExceeded || totalSmemSizeExceeded) { + maxBlocks = 0; + } + else { + // User requested shared memory limit is used as long as it is greater + // than the total shared memory used per CTA, i.e. as long as at least + // one CTA can be launched. + if (userSmemPreference >= smemAllocatedPerCTA) { + sharedMemPerMultiprocessor = userSmemPreference; + } + else { + // On Volta+, user requested shared memory will limit occupancy + // if it's less than shared memory per CTA. Otherwise, the + // maximum shared memory limit is used. + if (properties->computeMajor >= 7) { + sharedMemPerMultiprocessor = smemAllocatedPerCTA; + status = cudaOccAlignUpShmemSizeVoltaPlus(&sharedMemPerMultiprocessor, properties); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + } + else { + sharedMemPerMultiprocessor = properties->sharedMemPerMultiprocessor; + } + } + + if (smemAllocatedPerCTA > 0) { + maxBlocks = (int)(sharedMemPerMultiprocessor / smemAllocatedPerCTA); + } + else { + maxBlocks = INT_MAX; + } + } + + result->allocatedSharedMemPerBlock = smemAllocatedPerCTA; + + *limit = maxBlocks; + + return status; +} + +static __OCC_INLINE +cudaOccError cudaOccMaxBlocksPerSMRegsLimit( + int *limit, + cudaOccPartitionedGCConfig *gcConfig, + cudaOccResult *result, + const cudaOccDeviceProp *properties, + const cudaOccFuncAttributes *attributes, + int blockSize) +{ + cudaOccError status = CUDA_OCC_SUCCESS; + int allocationGranularity; + int warpsAllocatedPerCTA; + int regsAllocatedPerCTA; + int regsAssumedPerCTA; + int regsPerWarp; + int regsAllocatedPerWarp; + int numSubPartitions; + int numRegsPerSubPartition; + int numWarpsPerSubPartition; + int numWarpsPerSM; + int maxBlocks; + int maxRegsPerThread; + + status = cudaOccRegAllocationGranularity( + &allocationGranularity, + properties); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + status = cudaOccRegAllocationMaxPerThread( + &maxRegsPerThread, + properties); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + status = cudaOccSubPartitionsPerMultiprocessor(&numSubPartitions, properties); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + warpsAllocatedPerCTA = __occDivideRoundUp(blockSize, properties->warpSize); + + // GPUs of compute capability 2.x and higher allocate registers to warps + // + // Number of regs per warp is regs per thread x warp size, rounded up to + // register allocation granularity + // + regsPerWarp = attributes->numRegs * properties->warpSize; + regsAllocatedPerWarp = __occRoundUp(regsPerWarp, allocationGranularity); + regsAllocatedPerCTA = regsAllocatedPerWarp * warpsAllocatedPerCTA; + + // Hardware verifies if a launch fits the per-CTA register limit. For + // historical reasons, the verification logic assumes register + // allocations are made to all partitions simultaneously. Therefore, to + // simulate the hardware check, the warp allocation needs to be rounded + // up to the number of partitions. + // + regsAssumedPerCTA = regsAllocatedPerWarp * __occRoundUp(warpsAllocatedPerCTA, numSubPartitions); + + if (properties->regsPerBlock < regsAssumedPerCTA || // Hardware check + properties->regsPerBlock < regsAllocatedPerCTA || // Software check + attributes->numRegs > maxRegsPerThread) { // Per thread limit check + maxBlocks = 0; + } + else { + if (regsAllocatedPerWarp > 0) { + // Registers are allocated in each sub-partition. The max number + // of warps that can fit on an SM is equal to the max number of + // warps per sub-partition x number of sub-partitions. + // + numRegsPerSubPartition = properties->regsPerMultiprocessor / numSubPartitions; + numWarpsPerSubPartition = numRegsPerSubPartition / regsAllocatedPerWarp; + + maxBlocks = 0; + + if (*gcConfig != PARTITIONED_GC_OFF) { + int numSubPartitionsPerSmPartition; + int numWarpsPerSmPartition; + int maxBlocksPerSmPartition; + + // If partitioned global caching is on, then a CTA can only + // use a half SM, and thus a half of the registers available + // per SM + // + numSubPartitionsPerSmPartition = numSubPartitions / 2; + numWarpsPerSmPartition = numWarpsPerSubPartition * numSubPartitionsPerSmPartition; + maxBlocksPerSmPartition = numWarpsPerSmPartition / warpsAllocatedPerCTA; + maxBlocks = maxBlocksPerSmPartition * 2; + } + + // Try again if partitioned global caching is not enabled, or if + // the CTA cannot fit on the SM with caching on (maxBlocks == 0). In the latter + // case, the device will automatically turn off caching, except + // if the user forces enablement via PARTITIONED_GC_ON_STRICT to calculate + // occupancy and launch configuration. + // + if (maxBlocks == 0 && *gcConfig != PARTITIONED_GC_ON_STRICT) { + // In case *gcConfig was PARTITIONED_GC_ON flip it OFF since + // this is what it will be if we spread CTA across partitions. + // + *gcConfig = PARTITIONED_GC_OFF; + numWarpsPerSM = numWarpsPerSubPartition * numSubPartitions; + maxBlocks = numWarpsPerSM / warpsAllocatedPerCTA; + } + } + else { + maxBlocks = INT_MAX; + } + } + + + result->allocatedRegistersPerBlock = regsAllocatedPerCTA; + + *limit = maxBlocks; + + return status; +} + +// Barrier limit +// +static __OCC_INLINE cudaOccError cudaOccMaxBlocksPerSMBlockBarrierLimit( + int *limit, + int ctaLimitBlocks, + const cudaOccFuncAttributes *attributes) +{ + cudaOccError status = CUDA_OCC_SUCCESS; + int numBarriersAvailable = ctaLimitBlocks * 2; + int numBarriersUsed = attributes->numBlockBarriers; + int maxBlocks = INT_MAX; + + if (numBarriersUsed) { + maxBlocks = numBarriersAvailable / numBarriersUsed; + } + + *limit = maxBlocks; + + return status; +} + +/////////////////////////////////// +// API Implementations // +/////////////////////////////////// + +static __OCC_INLINE +cudaOccError cudaOccMaxActiveBlocksPerMultiprocessor( + cudaOccResult *result, + const cudaOccDeviceProp *properties, + const cudaOccFuncAttributes *attributes, + const cudaOccDeviceState *state, + int blockSize, + size_t dynamicSmemSize) +{ + cudaOccError status = CUDA_OCC_SUCCESS; + int ctaLimitWarps = 0; + int ctaLimitBlocks = 0; + int ctaLimitSMem = 0; + int ctaLimitRegs = 0; + int ctaLimitBars = 0; + int ctaLimit = 0; + unsigned int limitingFactors = 0; + + cudaOccPartitionedGCConfig gcConfig = PARTITIONED_GC_OFF; + + if (!result || !properties || !attributes || !state || blockSize <= 0) { + return CUDA_OCC_ERROR_INVALID_INPUT; + } + + /////////////////////////// + // Check user input + /////////////////////////// + + status = cudaOccInputCheck(properties, attributes, state); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + /////////////////////////// + // Initialization + /////////////////////////// + + gcConfig = cudaOccPartitionedGCExpected(properties, attributes); + + /////////////////////////// + // Compute occupancy + /////////////////////////// + + // Limits due to registers/SM + // Also compute if partitioned global caching has to be turned off + // + status = cudaOccMaxBlocksPerSMRegsLimit(&ctaLimitRegs, &gcConfig, result, properties, attributes, blockSize); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + // SMs on GP100 (6.0) have 2 subpartitions, while those on GP10x have 4. + // As a result, an SM on GP100 may be able to run more CTAs than the one on GP10x. + // For forward compatibility within Pascal family, if a function cannot run on GP10x (maxBlock == 0), + // we do not let it run on any Pascal processor, even though it may be able to run on GP100. + // Therefore, we check the occupancy on GP10x when it can run on GP100 + // + if (properties->computeMajor == 6 && properties->computeMinor == 0 && ctaLimitRegs) { + cudaOccDeviceProp propertiesGP10x; + cudaOccPartitionedGCConfig gcConfigGP10x = gcConfig; + int ctaLimitRegsGP10x = 0; + + // Set up properties for GP10x + memcpy(&propertiesGP10x, properties, sizeof(propertiesGP10x)); + propertiesGP10x.computeMinor = 1; + + status = cudaOccMaxBlocksPerSMRegsLimit(&ctaLimitRegsGP10x, &gcConfigGP10x, result, &propertiesGP10x, attributes, blockSize); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + if (ctaLimitRegsGP10x == 0) { + ctaLimitRegs = 0; + } + } + + // Limits due to warps/SM + // + status = cudaOccMaxBlocksPerSMWarpsLimit(&ctaLimitWarps, gcConfig, properties, attributes, blockSize); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + // Limits due to blocks/SM + // + status = cudaOccMaxBlocksPerMultiprocessor(&ctaLimitBlocks, properties); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + // Limits due to shared memory/SM + // + status = cudaOccMaxBlocksPerSMSmemLimit(&ctaLimitSMem, result, properties, attributes, state, blockSize, dynamicSmemSize); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + /////////////////////////// + // Overall occupancy + /////////////////////////// + + // Overall limit is min() of limits due to above reasons + // + ctaLimit = __occMin(ctaLimitRegs, __occMin(ctaLimitSMem, __occMin(ctaLimitWarps, ctaLimitBlocks))); + + // Determine occupancy limiting factors + // + if (ctaLimit == ctaLimitWarps) { + limitingFactors |= OCC_LIMIT_WARPS; + } + if (ctaLimit == ctaLimitRegs) { + limitingFactors |= OCC_LIMIT_REGISTERS; + } + if (ctaLimit == ctaLimitSMem) { + limitingFactors |= OCC_LIMIT_SHARED_MEMORY; + } + if (ctaLimit == ctaLimitBlocks) { + limitingFactors |= OCC_LIMIT_BLOCKS; + } + + // For Hopper onwards compute the limits to occupancy based on block barrier count + // + if (properties->computeMajor >= 9 && attributes->numBlockBarriers > 0) { + // Limits due to barrier/SM + // + status = cudaOccMaxBlocksPerSMBlockBarrierLimit(&ctaLimitBars, ctaLimitBlocks, attributes); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + // Recompute overall limit based on barrier/SM + // + ctaLimit = __occMin(ctaLimitBars, ctaLimit); + + // Determine if this is occupancy limiting factor + // + if (ctaLimit == ctaLimitBars) { + limitingFactors |= OCC_LIMIT_BARRIERS; + } + } + else { + ctaLimitBars = INT_MAX; + } + + // Fill in the return values + // + result->limitingFactors = limitingFactors; + + result->blockLimitRegs = ctaLimitRegs; + result->blockLimitSharedMem = ctaLimitSMem; + result->blockLimitWarps = ctaLimitWarps; + result->blockLimitBlocks = ctaLimitBlocks; + result->blockLimitBarriers = ctaLimitBars; + result->partitionedGCConfig = gcConfig; + + // Final occupancy + result->activeBlocksPerMultiprocessor = ctaLimit; + + return CUDA_OCC_SUCCESS; +} + +static __OCC_INLINE +cudaOccError cudaOccAvailableDynamicSMemPerBlock( + size_t *bytesAvailable, + const cudaOccDeviceProp *properties, + const cudaOccFuncAttributes *attributes, + const cudaOccDeviceState *state, + int numBlocks, + int blockSize) +{ + int allocationGranularity; + size_t smemLimitPerBlock; + size_t smemAvailableForDynamic; + size_t userSmemPreference = 0; + size_t sharedMemPerMultiprocessor; + cudaOccResult result; + cudaOccError status = CUDA_OCC_SUCCESS; + + if (numBlocks <= 0) + return CUDA_OCC_ERROR_INVALID_INPUT; + + // First compute occupancy of potential kernel launch. + // + status = cudaOccMaxActiveBlocksPerMultiprocessor(&result, properties, attributes, state, blockSize, 0); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + // Check if occupancy is achievable given user requested number of blocks. + // + if (result.activeBlocksPerMultiprocessor < numBlocks) { + return CUDA_OCC_ERROR_INVALID_INPUT; + } + + status = cudaOccSMemAllocationGranularity(&allocationGranularity, properties); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + // Return the per block shared memory limit based on function config. + // + status = cudaOccSMemPerBlock(&smemLimitPerBlock, properties, attributes->shmemLimitConfig, properties->sharedMemPerMultiprocessor); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + // If there is only a single block needed per SM, then the user preference can be ignored and the fully SW + // limit is allowed to be used as shared memory otherwise if more than one block is needed, then the user + // preference sets the total limit of available shared memory. + // + cudaOccSMemPerMultiprocessor(&userSmemPreference, properties, state); + if (numBlocks == 1) { + sharedMemPerMultiprocessor = smemLimitPerBlock; + } + else { + if (!userSmemPreference) { + userSmemPreference = 1 ; + status = cudaOccAlignUpShmemSizeVoltaPlus(&userSmemPreference, properties); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + } + sharedMemPerMultiprocessor = userSmemPreference; + } + + // Compute total shared memory available per SM + // + smemAvailableForDynamic = sharedMemPerMultiprocessor / numBlocks; + smemAvailableForDynamic = (smemAvailableForDynamic / allocationGranularity) * allocationGranularity; + + // Cap shared memory + // + if (smemAvailableForDynamic > smemLimitPerBlock) { + smemAvailableForDynamic = smemLimitPerBlock; + } + + // Now compute dynamic shared memory size + smemAvailableForDynamic = smemAvailableForDynamic - attributes->sharedSizeBytes; + + // Cap computed dynamic SM by user requested limit specified via cuFuncSetAttribute() + // + if (smemAvailableForDynamic > attributes->maxDynamicSharedSizeBytes) + smemAvailableForDynamic = attributes->maxDynamicSharedSizeBytes; + + *bytesAvailable = smemAvailableForDynamic; + return CUDA_OCC_SUCCESS; +} + +static __OCC_INLINE +cudaOccError cudaOccMaxPotentialOccupancyBlockSize( + int *minGridSize, + int *blockSize, + const cudaOccDeviceProp *properties, + const cudaOccFuncAttributes *attributes, + const cudaOccDeviceState *state, + size_t (*blockSizeToDynamicSMemSize)(int), + size_t dynamicSMemSize) +{ + cudaOccError status = CUDA_OCC_SUCCESS; + cudaOccResult result; + + // Limits + int occupancyLimit; + int granularity; + int blockSizeLimit; + + // Recorded maximum + int maxBlockSize = 0; + int numBlocks = 0; + int maxOccupancy = 0; + + // Temporary + int blockSizeToTryAligned; + int blockSizeToTry; + int blockSizeLimitAligned; + int occupancyInBlocks; + int occupancyInThreads; + + /////////////////////////// + // Check user input + /////////////////////////// + + if (!minGridSize || !blockSize || !properties || !attributes || !state) { + return CUDA_OCC_ERROR_INVALID_INPUT; + } + + status = cudaOccInputCheck(properties, attributes, state); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + ///////////////////////////////////////////////////////////////////////////////// + // Try each block size, and pick the block size with maximum occupancy + ///////////////////////////////////////////////////////////////////////////////// + + occupancyLimit = properties->maxThreadsPerMultiprocessor; + granularity = properties->warpSize; + + blockSizeLimit = __occMin(properties->maxThreadsPerBlock, attributes->maxThreadsPerBlock); + blockSizeLimitAligned = __occRoundUp(blockSizeLimit, granularity); + + for (blockSizeToTryAligned = blockSizeLimitAligned; blockSizeToTryAligned > 0; blockSizeToTryAligned -= granularity) { + blockSizeToTry = __occMin(blockSizeLimit, blockSizeToTryAligned); + + // Ignore dynamicSMemSize if the user provides a mapping + // + if (blockSizeToDynamicSMemSize) { + dynamicSMemSize = (*blockSizeToDynamicSMemSize)(blockSizeToTry); + } + + status = cudaOccMaxActiveBlocksPerMultiprocessor( + &result, + properties, + attributes, + state, + blockSizeToTry, + dynamicSMemSize); + + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + occupancyInBlocks = result.activeBlocksPerMultiprocessor; + occupancyInThreads = blockSizeToTry * occupancyInBlocks; + + if (occupancyInThreads > maxOccupancy) { + maxBlockSize = blockSizeToTry; + numBlocks = occupancyInBlocks; + maxOccupancy = occupancyInThreads; + } + + // Early out if we have reached the maximum + // + if (occupancyLimit == maxOccupancy) { + break; + } + } + + /////////////////////////// + // Return best available + /////////////////////////// + + // Suggested min grid size to achieve a full machine launch + // + *minGridSize = numBlocks * properties->numSms; + *blockSize = maxBlockSize; + + return status; +} + + +#if defined(__cplusplus) + +namespace { + +__OCC_INLINE +cudaOccError cudaOccMaxPotentialOccupancyBlockSize( + int *minGridSize, + int *blockSize, + const cudaOccDeviceProp *properties, + const cudaOccFuncAttributes *attributes, + const cudaOccDeviceState *state, + size_t dynamicSMemSize) +{ + return cudaOccMaxPotentialOccupancyBlockSize( + minGridSize, + blockSize, + properties, + attributes, + state, + NULL, + dynamicSMemSize); +} + +template +__OCC_INLINE +cudaOccError cudaOccMaxPotentialOccupancyBlockSizeVariableSMem( + int *minGridSize, + int *blockSize, + const cudaOccDeviceProp *properties, + const cudaOccFuncAttributes *attributes, + const cudaOccDeviceState *state, + UnaryFunction blockSizeToDynamicSMemSize) +{ + cudaOccError status = CUDA_OCC_SUCCESS; + cudaOccResult result; + + // Limits + int occupancyLimit; + int granularity; + int blockSizeLimit; + + // Recorded maximum + int maxBlockSize = 0; + int numBlocks = 0; + int maxOccupancy = 0; + + // Temporary + int blockSizeToTryAligned; + int blockSizeToTry; + int blockSizeLimitAligned; + int occupancyInBlocks; + int occupancyInThreads; + size_t dynamicSMemSize; + + /////////////////////////// + // Check user input + /////////////////////////// + + if (!minGridSize || !blockSize || !properties || !attributes || !state) { + return CUDA_OCC_ERROR_INVALID_INPUT; + } + + status = cudaOccInputCheck(properties, attributes, state); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + ///////////////////////////////////////////////////////////////////////////////// + // Try each block size, and pick the block size with maximum occupancy + ///////////////////////////////////////////////////////////////////////////////// + + occupancyLimit = properties->maxThreadsPerMultiprocessor; + granularity = properties->warpSize; + blockSizeLimit = __occMin(properties->maxThreadsPerBlock, attributes->maxThreadsPerBlock); + blockSizeLimitAligned = __occRoundUp(blockSizeLimit, granularity); + + for (blockSizeToTryAligned = blockSizeLimitAligned; blockSizeToTryAligned > 0; blockSizeToTryAligned -= granularity) { + blockSizeToTry = __occMin(blockSizeLimit, blockSizeToTryAligned); + + dynamicSMemSize = blockSizeToDynamicSMemSize(blockSizeToTry); + + status = cudaOccMaxActiveBlocksPerMultiprocessor( + &result, + properties, + attributes, + state, + blockSizeToTry, + dynamicSMemSize); + + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + occupancyInBlocks = result.activeBlocksPerMultiprocessor; + + occupancyInThreads = blockSizeToTry * occupancyInBlocks; + + if (occupancyInThreads > maxOccupancy) { + maxBlockSize = blockSizeToTry; + numBlocks = occupancyInBlocks; + maxOccupancy = occupancyInThreads; + } + + // Early out if we have reached the maximum + // + if (occupancyLimit == maxOccupancy) { + break; + } + } + + /////////////////////////// + // Return best available + /////////////////////////// + + // Suggested min grid size to achieve a full machine launch + // + *minGridSize = numBlocks * properties->numSms; + *blockSize = maxBlockSize; + + return status; +} + +} // namespace anonymous + +#endif /*__cplusplus */ + +#undef __OCC_INLINE + +#endif /*__cuda_occupancy_h__*/ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline.h new file mode 100644 index 0000000000000000000000000000000000000000..46bc89e4499576f1ae58848cd8684ba3e32420cf --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline.h @@ -0,0 +1,224 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CUDA_PIPELINE_H_ +# define _CUDA_PIPELINE_H_ + +# include "cuda_pipeline_primitives.h" + +# if !defined(_CUDA_PIPELINE_CPLUSPLUS_11_OR_LATER) +# error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \ + -std=c++11 compiler option. +# endif + +# if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER) +# include "cuda_awbarrier.h" +# endif + +// Integration with libcu++'s cuda::barrier. + +# if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER) +# if defined(_LIBCUDACXX_CUDA_ABI_VERSION) +# define _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION _LIBCUDACXX_CUDA_ABI_VERSION +# else +# define _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION 4 +# endif + +# define _LIBCUDACXX_PIPELINE_CONCAT(X, Y) X ## Y +# define _LIBCUDACXX_PIPELINE_CONCAT2(X, Y) _LIBCUDACXX_PIPELINE_CONCAT(X, Y) +# define _LIBCUDACXX_PIPELINE_INLINE_NAMESPACE _LIBCUDACXX_PIPELINE_CONCAT2(__, _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION) + +namespace cuda { inline namespace _LIBCUDACXX_PIPELINE_INLINE_NAMESPACE { + struct __block_scope_barrier_base; +}} + +# endif + +_CUDA_PIPELINE_BEGIN_NAMESPACE + +template +_CUDA_PIPELINE_QUALIFIER +auto segment(T* ptr) -> T(*)[N]; + +class pipeline { +public: + pipeline(const pipeline&) = delete; + pipeline(pipeline&&) = delete; + pipeline& operator=(const pipeline&) = delete; + pipeline& operator=(pipeline&&) = delete; + + _CUDA_PIPELINE_QUALIFIER pipeline(); + _CUDA_PIPELINE_QUALIFIER size_t commit(); + _CUDA_PIPELINE_QUALIFIER void commit_and_wait(); + _CUDA_PIPELINE_QUALIFIER void wait(size_t batch); + template + _CUDA_PIPELINE_QUALIFIER void wait_prior(); + +# if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER) + _CUDA_PIPELINE_QUALIFIER void arrive_on(awbarrier& barrier); + _CUDA_PIPELINE_QUALIFIER void arrive_on(cuda::__block_scope_barrier_base& barrier); +# endif + +private: + size_t current_batch; +}; + +template +_CUDA_PIPELINE_QUALIFIER +void memcpy_async(T& dst, const T& src, pipeline& pipe); + +template +_CUDA_PIPELINE_QUALIFIER +void memcpy_async(T(*dst)[DstN], const T(*src)[SrcN], pipeline& pipe); + +template +_CUDA_PIPELINE_QUALIFIER +auto segment(T* ptr) -> T(*)[N] +{ + return (T(*)[N])ptr; +} + +_CUDA_PIPELINE_QUALIFIER +pipeline::pipeline() + : current_batch(0) +{ +} + +_CUDA_PIPELINE_QUALIFIER +size_t pipeline::commit() +{ + _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_commit(); + return this->current_batch++; +} + +_CUDA_PIPELINE_QUALIFIER +void pipeline::commit_and_wait() +{ + (void)pipeline::commit(); + pipeline::wait_prior<0>(); +} + +_CUDA_PIPELINE_QUALIFIER +void pipeline::wait(size_t batch) +{ + const size_t prior = this->current_batch > batch ? this->current_batch - batch : 0; + + switch (prior) { + case 0 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<0>(); break; + case 1 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<1>(); break; + case 2 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<2>(); break; + case 3 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<3>(); break; + case 4 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<4>(); break; + case 5 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<5>(); break; + case 6 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<6>(); break; + case 7 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<7>(); break; + default : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<8>(); break; + } +} + +template +_CUDA_PIPELINE_QUALIFIER +void pipeline::wait_prior() +{ + _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior(); +} + +# if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER) +_CUDA_PIPELINE_QUALIFIER +void pipeline::arrive_on(awbarrier& barrier) +{ + _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_arrive_on(&barrier.barrier); +} + +_CUDA_PIPELINE_QUALIFIER +void pipeline::arrive_on(cuda::__block_scope_barrier_base & barrier) +{ + _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_arrive_on(reinterpret_cast(&barrier)); +} +# endif + +template +_CUDA_PIPELINE_QUALIFIER +void memcpy_async(T& dst, const T& src, pipeline& pipe) +{ + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(&src) & (alignof(T) - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(&dst) & (alignof(T) - 1))); + + if (__is_trivially_copyable(T)) { + _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_copy_relaxed( + reinterpret_cast(&dst), reinterpret_cast(&src)); + } else { + dst = src; + } +} + +template +_CUDA_PIPELINE_QUALIFIER +void memcpy_async(T(*dst)[DstN], const T(*src)[SrcN], pipeline& pipe) +{ + constexpr size_t dst_size = sizeof(*dst); + constexpr size_t src_size = sizeof(*src); + static_assert(dst_size == 4 || dst_size == 8 || dst_size == 16, "Unsupported copy size."); + static_assert(src_size <= dst_size, "Source size must be less than or equal to destination size."); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(src) & (dst_size - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(dst) & (dst_size - 1))); + + if (__is_trivially_copyable(T)) { + _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_copy_strict( + reinterpret_cast(*dst), reinterpret_cast(*src)); + } else { + for (size_t i = 0; i < DstN; ++i) { + (*dst)[i] = (i < SrcN) ? (*src)[i] : T(); + } + } +} + +_CUDA_PIPELINE_END_NAMESPACE + +#endif /* !_CUDA_PIPELINE_H_ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_helpers.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..01882b6b976347b9bfdf276c3d0adcec1d8a55fa --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_helpers.h @@ -0,0 +1,373 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CUDA_PIPELINE_HELPERS_H_ +# define _CUDA_PIPELINE_HELPERS_H_ + +# define _CUDA_PIPELINE_NAMESPACE nvcuda::experimental +# define _CUDA_PIPELINE_BEGIN_NAMESPACE namespace nvcuda { namespace experimental { +# define _CUDA_PIPELINE_END_NAMESPACE } } + +# define _CUDA_PIPELINE_INTERNAL_NAMESPACE _CUDA_PIPELINE_NAMESPACE::__pipeline_internal +# define _CUDA_PIPELINE_BEGIN_INTERNAL_NAMESPACE _CUDA_PIPELINE_BEGIN_NAMESPACE namespace __pipeline_internal { +# define _CUDA_PIPELINE_END_INTERNAL_NAMESPACE } _CUDA_PIPELINE_END_NAMESPACE + +# if !defined(_CUDA_PIPELINE_QUALIFIER) +# define _CUDA_PIPELINE_QUALIFIER inline __device__ +# endif +# if !defined(_CUDA_PIPELINE_STATIC_QUALIFIER) +# define _CUDA_PIPELINE_STATIC_QUALIFIER static inline __device__ +# endif + +# if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) +# define _CUDA_PIPELINE_ARCH_700_OR_LATER +# endif + +# if (__CUDA_ARCH__ >= 800) +# define _CUDA_PIPELINE_HAS_ASYNC_COPY 1 +# else +# define _CUDA_PIPELINE_HAS_ASYNC_COPY 0 +# endif + +# if !defined(_CUDA_PIPELINE_MAX_STAGES) +# define _CUDA_PIPELINE_MAX_STAGES 8 +# endif + +# if defined(__cplusplus) && ((__cplusplus >= 201103L) || (defined(_MSC_VER) && (_MSC_VER >= 1900))) +# define _CUDA_PIPELINE_CPLUSPLUS_11_OR_LATER +# endif + +# if !defined(_CUDA_PIPELINE_DEBUG) +# if defined(__CUDACC_DEBUG__) +# define _CUDA_PIPELINE_DEBUG 1 +# else +# define _CUDA_PIPELINE_DEBUG 0 +# endif +# endif + +# if defined(_CUDA_PIPELINE_DEBUG) && (_CUDA_PIPELINE_DEBUG == 1) && !defined(NDEBUG) +# if !defined(__CUDACC_RTC__) +# include +# endif +# define _CUDA_PIPELINE_ASSERT(x) assert((x)); +# define _CUDA_PIPELINE_ABORT() assert(0); +# else +# define _CUDA_PIPELINE_ASSERT(x) +# define _CUDA_PIPELINE_ABORT() __trap(); +# endif + +# if defined(_CUDA_PIPELINE_CPLUSPLUS_11_OR_LATER) +# define _CUDA_PIPELINE_STATIC_ASSERT(c, m) static_assert(c, m) +# else +# define _CUDA_PIPELINE_STATIC_ASSERT(c, m) +# endif + +# if (defined(_MSC_VER) && !defined(_WIN64)) || defined(__arm__) +# define _CUDA_PIPELINE_ASM_PTR_CONSTRAINT "r" +# else +# define _CUDA_PIPELINE_ASM_PTR_CONSTRAINT "l" +# endif + +# if defined(__CUDACC_RTC__) +typedef unsigned int uint32_t; +typedef unsigned long long uint64_t; +typedef uint64_t uintptr_t; +# else +# include +# endif + +_CUDA_PIPELINE_BEGIN_INTERNAL_NAMESPACE + +_CUDA_PIPELINE_STATIC_ASSERT(sizeof(short) == 2, "Size mismatch for type 'short'"); +_CUDA_PIPELINE_STATIC_ASSERT(sizeof(int) == 4, "Size mismatch for type 'int'"); +_CUDA_PIPELINE_STATIC_ASSERT(sizeof(int2) == 8, "Size mismatch for type 'int2'"); +_CUDA_PIPELINE_STATIC_ASSERT(sizeof(int4) == 16, "Size mismatch for type 'int4'"); + +extern "C" __device__ uint32_t __nvvm_get_smem_pointer(void *); + +template +_CUDA_PIPELINE_QUALIFIER +void pipeline_memcpy_sync(void* __restrict__ dst, const void* __restrict__ src) +{ + _CUDA_PIPELINE_STATIC_ASSERT(CopySize == 4 || CopySize == 8 || CopySize == 16, "Unsupported copy size."); + _CUDA_PIPELINE_STATIC_ASSERT(SourceSize <= CopySize, "Source size must be less than or equal to copy size"); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(dst) & (CopySize - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(src) & (CopySize - 1))); + + char* const d = reinterpret_cast(dst); + const char* const s = reinterpret_cast(src); + + size_t copy_step_size; + if (SourceSize == 0) { + copy_step_size = CopySize; + } else if (SourceSize == 2 || SourceSize == 4 || SourceSize == 8 || SourceSize == 16) { + copy_step_size = SourceSize; + } else { + copy_step_size = 1; + } + + for (size_t i = 0; i < CopySize; i += copy_step_size) { + const bool copy_source = SourceSize && (i < SourceSize); + + switch (copy_step_size) { + case 1: + d[i] = copy_source ? s[i] : char(); + break; + case 2: + *reinterpret_cast(d + i) = copy_source ? *reinterpret_cast(s + i) : short(); + break; + case 4: + *reinterpret_cast(d + i) = copy_source ? *reinterpret_cast(s + i) : int(); + break; + case 8: + *reinterpret_cast(d + i) = copy_source ? *reinterpret_cast(s + i) : int2(); + break; + case 16: + *reinterpret_cast(d + i) = copy_source ? *reinterpret_cast(s + i) : int4(); + break; + } + } +} + +template +struct ImplementationChooser; + +template<> +struct ImplementationChooser { + template + struct CpAsyncChooser { + _CUDA_PIPELINE_STATIC_QUALIFIER + void cp_async(void* __restrict__ dst, const void* __restrict__ src) + { + asm volatile ("cp.async.ca.shared.global [%0], [%1], %2, %3;" + : + : "r"(__nvvm_get_smem_pointer(dst)), _CUDA_PIPELINE_ASM_PTR_CONSTRAINT(src), "n"(CopySize), + "n"(SourceSize) + : "memory"); + } + }; + + template + struct CpAsyncChooser<16, SourceSize> { + _CUDA_PIPELINE_STATIC_QUALIFIER + void cp_async(void* __restrict__ dst, const void* __restrict__ src) + { + asm volatile ("cp.async.cg.shared.global [%0], [%1], %2, %3;" + : + : "r"(__nvvm_get_smem_pointer(dst)), _CUDA_PIPELINE_ASM_PTR_CONSTRAINT(src), "n"(16), "n"(SourceSize) + : "memory"); + } + }; + + template + _CUDA_PIPELINE_STATIC_QUALIFIER + void pipeline_memcpy_async(void* __restrict__ dst, const void* __restrict__ src) + { + _CUDA_PIPELINE_STATIC_ASSERT(CopySize == 4 || CopySize == 8 || CopySize == 16, "Unsupported copy size."); + _CUDA_PIPELINE_STATIC_ASSERT(SourceSize <= CopySize, "Source size must be less than or equal to copy size"); + _CUDA_PIPELINE_ASSERT(__isShared(dst)); + _CUDA_PIPELINE_ASSERT(__isGlobal(src)); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(dst) & (CopySize - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(src) & (CopySize - 1))); + + CpAsyncChooser::cp_async(dst, src); + } + + _CUDA_PIPELINE_STATIC_QUALIFIER + void pipeline_commit() + { + asm volatile ("cp.async.commit_group;"); + } + + template + _CUDA_PIPELINE_STATIC_QUALIFIER + void pipeline_wait_prior() + { + asm volatile ("cp.async.wait_group %0;" + : + : "n"(N < _CUDA_PIPELINE_MAX_STAGES ? N : _CUDA_PIPELINE_MAX_STAGES)); + } + + _CUDA_PIPELINE_STATIC_QUALIFIER + void pipeline_arrive_on(uint64_t* barrier) + { + _CUDA_PIPELINE_ASSERT(__isShared(barrier)); + + asm volatile ("cp.async.mbarrier.arrive.shared.b64 [%0];" + : + : "r"(__nvvm_get_smem_pointer(barrier))); + } +}; + +template<> +struct ImplementationChooser { + template + _CUDA_PIPELINE_STATIC_QUALIFIER + void pipeline_memcpy_async(void* __restrict__ dst, const void* __restrict__ src) + { + _CUDA_PIPELINE_STATIC_ASSERT(CopySize == 4 || CopySize == 8 || CopySize == 16, "Unsupported copy size."); + _CUDA_PIPELINE_STATIC_ASSERT(SourceSize <= CopySize, "Source size must be less than or equal to copy size"); + _CUDA_PIPELINE_ASSERT(__isShared(dst)); + _CUDA_PIPELINE_ASSERT(__isGlobal(src)); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(dst) & (CopySize - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(src) & (CopySize - 1))); + + pipeline_memcpy_sync(dst, src); + } + + _CUDA_PIPELINE_STATIC_QUALIFIER + void pipeline_commit() + { + } + + template + _CUDA_PIPELINE_STATIC_QUALIFIER + void pipeline_wait_prior() + { + } + + _CUDA_PIPELINE_STATIC_QUALIFIER + void pipeline_arrive_on(uint64_t* barrier) + { + } +}; + +template +_CUDA_PIPELINE_QUALIFIER +void pipeline_memcpy_async(void* __restrict__ dst, const void* __restrict__ src) +{ + _CUDA_PIPELINE_STATIC_ASSERT(CopySize == 4 || CopySize == 8 || CopySize == 16, "Unsupported copy size."); + _CUDA_PIPELINE_STATIC_ASSERT(SourceSize <= CopySize, "Source size must be less than or equal to copy size"); + _CUDA_PIPELINE_ASSERT(__isShared(dst)); + _CUDA_PIPELINE_ASSERT(__isGlobal(src)); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(dst) & (CopySize - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(src) & (CopySize - 1))); + + ImplementationChooser<_CUDA_PIPELINE_HAS_ASYNC_COPY>::pipeline_memcpy_async(dst, src); +} + +_CUDA_PIPELINE_QUALIFIER +void pipeline_commit() +{ + ImplementationChooser<_CUDA_PIPELINE_HAS_ASYNC_COPY>::pipeline_commit(); +} + +template +_CUDA_PIPELINE_QUALIFIER +void pipeline_wait_prior() +{ + ImplementationChooser<_CUDA_PIPELINE_HAS_ASYNC_COPY>::pipeline_wait_prior(); +} + +_CUDA_PIPELINE_QUALIFIER +void pipeline_arrive_on(uint64_t* barrier) +{ + ImplementationChooser<_CUDA_PIPELINE_HAS_ASYNC_COPY>::pipeline_arrive_on(barrier); +} + +template +_CUDA_PIPELINE_QUALIFIER +void pipeline_copy_strict(void* __restrict__ dst, const void* __restrict__ src) +{ + _CUDA_PIPELINE_STATIC_ASSERT(CopySize == 4 || CopySize == 8 || CopySize == 16, "Unsupported copy size."); + _CUDA_PIPELINE_STATIC_ASSERT(SourceSize <= CopySize, "Source size must be less than or equal to copy size."); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(src) & (CopySize - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(dst) & (CopySize - 1))); + + if (__isGlobal(src) && __isShared(dst)) { + pipeline_memcpy_async(dst, src); + } else { + pipeline_memcpy_sync(dst, src); + } +} + +template +_CUDA_PIPELINE_QUALIFIER +void pipeline_copy_relaxed(void* __restrict__ dst, const void* __restrict__ src) +{ + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(src) & (Align - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(dst) & (Align - 1))); + + const char* s = reinterpret_cast(src); + char* d = reinterpret_cast(dst); + size_t remaining = CopySize; + + while (remaining) { + if ((Align >= 16) && (remaining >= 16)) { + pipeline_copy_strict<16, 16>(dst, src); + d += 16; + s += 16; + remaining -= 16; + } else if ((Align >= 8) && (remaining >= 8)) { + pipeline_copy_strict<8, 8>(dst, src); + d += 8; + s += 8; + remaining -= 8; + } else if ((Align >= 4) && (remaining >= 4)) { + pipeline_copy_strict<4, 4>(dst, src); + d += 4; + s += 4; + remaining -= 4; + } else if ((Align >= 2) && (remaining >= 2)) { + *reinterpret_cast(d) = *reinterpret_cast(s); + d += 2; + s += 2; + remaining -= 2; + } else { + *d = *s; + d += 1; + s += 1; + remaining -= 1; + } + } +} + +_CUDA_PIPELINE_END_INTERNAL_NAMESPACE + +#endif /* !_CUDA_PIPELINE_HELPERS_H_ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_primitives.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_primitives.h new file mode 100644 index 0000000000000000000000000000000000000000..eaba0cfb5ac9184bec5e837d2ec2f9db11d873ae --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_primitives.h @@ -0,0 +1,148 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CUDA_PIPELINE_PRIMITIVES_H_ +# define _CUDA_PIPELINE_PRIMITIVES_H_ + +# include "cuda_pipeline_helpers.h" + +_CUDA_PIPELINE_STATIC_QUALIFIER +void __pipeline_memcpy_async(void* __restrict__ dst_shared, const void* __restrict__ src_global, size_t size_and_align, + size_t zfill = 0) +{ + _CUDA_PIPELINE_ASSERT(size_and_align == 4 || size_and_align == 8 || size_and_align == 16); + _CUDA_PIPELINE_ASSERT(zfill <= size_and_align); + _CUDA_PIPELINE_ASSERT(__isShared(dst_shared)); + _CUDA_PIPELINE_ASSERT(__isGlobal(src_global)); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(dst_shared) & (size_and_align - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(src_global) & (size_and_align - 1))); + + switch (size_and_align) { + case 16: + switch (zfill) { + case 0: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 16>(dst_shared, src_global); return; + case 1: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 15>(dst_shared, src_global); return; + case 2: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 14>(dst_shared, src_global); return; + case 3: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 13>(dst_shared, src_global); return; + case 4: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 12>(dst_shared, src_global); return; + case 5: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 11>(dst_shared, src_global); return; + case 6: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 10>(dst_shared, src_global); return; + case 7: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 9>(dst_shared, src_global); return; + case 8: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 8>(dst_shared, src_global); return; + case 9: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 7>(dst_shared, src_global); return; + case 10: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 6>(dst_shared, src_global); return; + case 11: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 5>(dst_shared, src_global); return; + case 12: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 4>(dst_shared, src_global); return; + case 13: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 3>(dst_shared, src_global); return; + case 14: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 2>(dst_shared, src_global); return; + case 15: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 1>(dst_shared, src_global); return; + case 16: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 0>(dst_shared, src_global); return; + default: _CUDA_PIPELINE_ABORT(); return; + } + case 8: + switch (zfill) { + case 0: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 8>(dst_shared, src_global); return; + case 1: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 7>(dst_shared, src_global); return; + case 2: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 6>(dst_shared, src_global); return; + case 3: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 5>(dst_shared, src_global); return; + case 4: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 4>(dst_shared, src_global); return; + case 5: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 3>(dst_shared, src_global); return; + case 6: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 2>(dst_shared, src_global); return; + case 7: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 1>(dst_shared, src_global); return; + case 8: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 0>(dst_shared, src_global); return; + default: _CUDA_PIPELINE_ABORT(); return; + } + case 4: + switch (zfill) { + case 0: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 4>(dst_shared, src_global); return; + case 1: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 3>(dst_shared, src_global); return; + case 2: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 2>(dst_shared, src_global); return; + case 3: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 1>(dst_shared, src_global); return; + case 4: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 0>(dst_shared, src_global); return; + default: _CUDA_PIPELINE_ABORT(); return; + } + default: + _CUDA_PIPELINE_ABORT(); + return; + } +} + +_CUDA_PIPELINE_STATIC_QUALIFIER +void __pipeline_commit() +{ + _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_commit(); +} + +_CUDA_PIPELINE_STATIC_QUALIFIER +void __pipeline_wait_prior(size_t prior) +{ + switch (prior) { + case 0 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<0>(); return; + case 1 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<1>(); return; + case 2 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<2>(); return; + case 3 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<3>(); return; + case 4 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<4>(); return; + case 5 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<5>(); return; + case 6 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<6>(); return; + case 7 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<7>(); return; + default : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<8>(); return; + } +} + +# if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER) +# include "cuda_awbarrier_primitives.h" + +_CUDA_PIPELINE_STATIC_QUALIFIER +void __pipeline_arrive_on(__mbarrier_t* barrier) +{ + _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_arrive_on(barrier); +} +# endif + +#endif /* !_CUDA_PIPELINE_PRIMITIVES_H_ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime.h new file mode 100644 index 0000000000000000000000000000000000000000..782bff6caaf2c5991f7f14d2de45863cdb3a3fad --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime.h @@ -0,0 +1,2300 @@ +/* + * Copyright 1993-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_RUNTIME_H__) +#define __CUDA_RUNTIME_H__ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_RUNTIME_H__ +#endif + +#if !defined(__CUDACC_RTC__) +#if defined(__GNUC__) +#if defined(__clang__) || (!defined(__PGIC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) +#pragma GCC diagnostic push +#endif +#if defined(__clang__) || (!defined(__PGIC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2))) +#pragma GCC diagnostic ignored "-Wunused-function" +#endif +#elif defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable: 4820) +#endif +#endif + +#ifdef __QNX__ +#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 7) +typedef unsigned size_t; +#endif +#endif +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "crt/host_config.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "builtin_types.h" +#include "library_types.h" +#if !defined(__CUDACC_RTC__) +#define EXCLUDE_FROM_RTC +#include "channel_descriptor.h" +#include "cuda_runtime_api.h" +#include "driver_functions.h" +#undef EXCLUDE_FROM_RTC +#endif /* !__CUDACC_RTC__ */ +#include "crt/host_defines.h" +#ifdef __CUDACC_RTC__ +#include "target" +#endif /* defined(__CUDACC_RTC__) */ + + +#include "vector_functions.h" + +#if defined(__CUDACC__) + +#if defined(__CUDACC_RTC__) +#include "nvrtc_device_runtime.h" +#include "crt/device_functions.h" +#include "crt/common_functions.h" +#include "device_launch_parameters.h" + +#else /* !__CUDACC_RTC__ */ +#define EXCLUDE_FROM_RTC +#include "crt/common_functions.h" +#include "crt/device_functions.h" +#include "device_launch_parameters.h" + +#if defined(__CUDACC_EXTENDED_LAMBDA__) +#include +#include +struct __device_builtin__ __nv_lambda_preheader_injection { }; +#endif /* defined(__CUDACC_EXTENDED_LAMBDA__) */ + +#undef EXCLUDE_FROM_RTC +#endif /* __CUDACC_RTC__ */ + +#endif /* __CUDACC__ */ + +/** \cond impl_private */ +#if defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED) +#define __CUDA_DEPRECATED +#elif defined(_MSC_VER) +#define __CUDA_DEPRECATED __declspec(deprecated) +#elif defined(__GNUC__) +#define __CUDA_DEPRECATED __attribute__((deprecated)) +#else +#define __CUDA_DEPRECATED +#endif +/** \endcond impl_private */ + +#if defined(__cplusplus) && !defined(__CUDACC_RTC__) + +#if __cplusplus >= 201103 +#include +#endif + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +/** + * \addtogroup CUDART_HIGHLEVEL + * @{ + */ + +/** + *\brief Launches a device function + * + * The function invokes kernel \p func on \p gridDim (\p gridDim.x × \p gridDim.y + * × \p gridDim.z) grid of blocks. Each block contains \p blockDim (\p blockDim.x × + * \p blockDim.y × \p blockDim.z) threads. + * + * If the kernel has N parameters the \p args should point to array of N pointers. + * Each pointer, from args[0] to args[N - 1], point to the region + * of memory from which the actual parameter will be copied. + * + * \p sharedMem sets the amount of dynamic shared memory that will be available to + * each thread block. + * + * \p stream specifies a stream the invocation is associated to. + * + * \param func - Device function symbol + * \param gridDim - Grid dimentions + * \param blockDim - Block dimentions + * \param args - Arguments + * \param sharedMem - Shared memory (defaults to 0) + * \param stream - Stream identifier (defaults to NULL) + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidConfiguration, + * ::cudaErrorLaunchFailure, + * ::cudaErrorLaunchTimeout, + * ::cudaErrorLaunchOutOfResources, + * ::cudaErrorSharedObjectInitFailed, + * ::cudaErrorInvalidPtx, + * ::cudaErrorUnsupportedPtxVersion, + * ::cudaErrorNoKernelImageForDevice, + * ::cudaErrorJitCompilerNotFound, + * ::cudaErrorJitCompilationDisabled + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \ref ::cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C API)" + */ +template +static __inline__ __host__ cudaError_t cudaLaunchKernel( + const T *func, + dim3 gridDim, + dim3 blockDim, + void **args, + size_t sharedMem = 0, + cudaStream_t stream = 0 +) +{ + return ::cudaLaunchKernel((const void *)func, gridDim, blockDim, args, sharedMem, stream); +} + + +#if __cplusplus >= 201103 || defined(__DOXYGEN_ONLY__) +/** + * \brief Launches a CUDA function with launch-time configuration + * + * Invokes the kernel \p func on \p config->gridDim (\p config->gridDim.x + * × \p config->gridDim.y × \p config->gridDim.z) grid of blocks. + * Each block contains \p config->blockDim (\p config->blockDim.x × + * \p config->blockDim.y × \p config->blockDim.z) threads. + * + * \p config->dynamicSmemBytes sets the amount of dynamic shared memory that + * will be available to each thread block. + * + * \p config->stream specifies a stream the invocation is associated to. + * + * Configuration beyond grid and block dimensions, dynamic shared memory size, + * and stream can be provided with the following two fields of \p config: + * + * \p config->attrs is an array of \p config->numAttrs contiguous + * ::cudaLaunchAttribute elements. The value of this pointer is not considered + * if \p config->numAttrs is zero. However, in that case, it is recommended to + * set the pointer to NULL. + * \p config->numAttrs is the number of attributes populating the first + * \p config->numAttrs positions of the \p config->attrs array. + * + * The kernel arguments should be passed as arguments to this function via the + * \p args parameter pack. + * + * The C API version of this function, \p cudaLaunchKernelExC, is also available + * for pre-C++11 compilers and for use cases where the ability to pass kernel + * parameters via void* array is preferable. + * + * \param config - Launch configuration + * \param func - Kernel to launch + * \param args - Parameter pack of kernel parameters + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidConfiguration, + * ::cudaErrorLaunchFailure, + * ::cudaErrorLaunchTimeout, + * ::cudaErrorLaunchOutOfResources, + * ::cudaErrorSharedObjectInitFailed, + * ::cudaErrorInvalidPtx, + * ::cudaErrorUnsupportedPtxVersion, + * ::cudaErrorNoKernelImageForDevice, + * ::cudaErrorJitCompilerNotFound, + * ::cudaErrorJitCompilationDisabled + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * \ref ::cudaLaunchKernelExC(const cudaLaunchConfig_t *config, const void *func, void **args) "cudaLaunchKernelEx (C API)", + * ::cuLaunchKernelEx + */ +template +static __inline__ __host__ cudaError_t cudaLaunchKernelEx( + const cudaLaunchConfig_t *config, + void (*kernel)(ExpTypes...), + ActTypes &&... args +) +{ + return [&](ExpTypes... coercedArgs){ + void *pArgs[] = { &coercedArgs... }; + return ::cudaLaunchKernelExC(config, (const void *)kernel, pArgs); + }(std::forward(args)...); +} +#endif + +/** + *\brief Launches a device function + * + * The function invokes kernel \p func on \p gridDim (\p gridDim.x × \p gridDim.y + * × \p gridDim.z) grid of blocks. Each block contains \p blockDim (\p blockDim.x × + * \p blockDim.y × \p blockDim.z) threads. + * + * The device on which this kernel is invoked must have a non-zero value for + * the device attribute ::cudaDevAttrCooperativeLaunch. + * + * The total number of blocks launched cannot exceed the maximum number of blocks per + * multiprocessor as returned by ::cudaOccupancyMaxActiveBlocksPerMultiprocessor (or + * ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times the number of multiprocessors + * as specified by the device attribute ::cudaDevAttrMultiProcessorCount. + * + * The kernel cannot make use of CUDA dynamic parallelism. + * + * If the kernel has N parameters the \p args should point to array of N pointers. + * Each pointer, from args[0] to args[N - 1], point to the region + * of memory from which the actual parameter will be copied. + * + * \p sharedMem sets the amount of dynamic shared memory that will be available to + * each thread block. + * + * \p stream specifies a stream the invocation is associated to. + * + * \param func - Device function symbol + * \param gridDim - Grid dimentions + * \param blockDim - Block dimentions + * \param args - Arguments + * \param sharedMem - Shared memory (defaults to 0) + * \param stream - Stream identifier (defaults to NULL) + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidConfiguration, + * ::cudaErrorLaunchFailure, + * ::cudaErrorLaunchTimeout, + * ::cudaErrorLaunchOutOfResources, + * ::cudaErrorSharedObjectInitFailed + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \ref ::cudaLaunchCooperativeKernel(const void *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchCooperativeKernel (C API)" + */ +template +static __inline__ __host__ cudaError_t cudaLaunchCooperativeKernel( + const T *func, + dim3 gridDim, + dim3 blockDim, + void **args, + size_t sharedMem = 0, + cudaStream_t stream = 0 +) +{ + return ::cudaLaunchCooperativeKernel((const void *)func, gridDim, blockDim, args, sharedMem, stream); +} + +/** + * \brief \hl Creates an event object with the specified flags + * + * Creates an event object with the specified flags. Valid flags include: + * - ::cudaEventDefault: Default event creation flag. + * - ::cudaEventBlockingSync: Specifies that event should use blocking + * synchronization. A host thread that uses ::cudaEventSynchronize() to wait + * on an event created with this flag will block until the event actually + * completes. + * - ::cudaEventDisableTiming: Specifies that the created event does not need + * to record timing data. Events created with this flag specified and + * the ::cudaEventBlockingSync flag not specified will provide the best + * performance when used with ::cudaStreamWaitEvent() and ::cudaEventQuery(). + * + * \param event - Newly created event + * \param flags - Flags for new event + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorLaunchFailure, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa \ref ::cudaEventCreate(cudaEvent_t*) "cudaEventCreate (C API)", + * ::cudaEventCreateWithFlags, ::cudaEventRecord, ::cudaEventQuery, + * ::cudaEventSynchronize, ::cudaEventDestroy, ::cudaEventElapsedTime, + * ::cudaStreamWaitEvent + */ +static __inline__ __host__ cudaError_t cudaEventCreate( + cudaEvent_t *event, + unsigned int flags +) +{ + return ::cudaEventCreateWithFlags(event, flags); +} + +/** + * \brief Creates an executable graph from a graph + * + * Instantiates \p graph as an executable graph. The graph is validated for any + * structural constraints or intra-node constraints which were not previously + * validated. If instantiation is successful, a handle to the instantiated graph + * is returned in \p pGraphExec. + * + * If there are any errors, diagnostic information may be returned in \p pErrorNode and + * \p pLogBuffer. This is the primary way to inspect instantiation errors. The output + * will be null terminated unless the diagnostics overflow + * the buffer. In this case, they will be truncated, and the last byte can be + * inspected to determine if truncation occurred. + * + * \param pGraphExec - Returns instantiated graph + * \param graph - Graph to instantiate + * \param pErrorNode - In case of an instantiation error, this may be modified to + * indicate a node contributing to the error + * \param pLogBuffer - A character buffer to store diagnostic messages + * \param bufferSize - Size of the log buffer in bytes + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphInstantiateWithFlags, + * ::cudaGraphCreate, + * ::cudaGraphUpload, + * ::cudaGraphLaunch, + * ::cudaGraphExecDestroy + */ +static __inline__ __host__ cudaError_t cudaGraphInstantiate( + cudaGraphExec_t *pGraphExec, + cudaGraph_t graph, + cudaGraphNode_t *pErrorNode, + char *pLogBuffer, + size_t bufferSize +) +{ + (void)pErrorNode; + (void)pLogBuffer; + (void)bufferSize; + return ::cudaGraphInstantiate(pGraphExec, graph, 0); +} + +/** + * \brief \hl Allocates page-locked memory on the host + * + * Allocates \p size bytes of host memory that is page-locked and accessible + * to the device. The driver tracks the virtual memory ranges allocated with + * this function and automatically accelerates calls to functions such as + * ::cudaMemcpy(). Since the memory can be accessed directly by the device, it + * can be read or written with much higher bandwidth than pageable memory + * obtained with functions such as ::malloc(). Allocating excessive amounts of + * pinned memory may degrade system performance, since it reduces the amount + * of memory available to the system for paging. As a result, this function is + * best used sparingly to allocate staging areas for data exchange between host + * and device. + * + * The \p flags parameter enables different options to be specified that affect + * the allocation, as follows. + * - ::cudaHostAllocDefault: This flag's value is defined to be 0. + * - ::cudaHostAllocPortable: The memory returned by this call will be + * considered as pinned memory by all CUDA contexts, not just the one that + * performed the allocation. + * - ::cudaHostAllocMapped: Maps the allocation into the CUDA address space. + * The device pointer to the memory may be obtained by calling + * ::cudaHostGetDevicePointer(). + * - ::cudaHostAllocWriteCombined: Allocates the memory as write-combined (WC). + * WC memory can be transferred across the PCI Express bus more quickly on some + * system configurations, but cannot be read efficiently by most CPUs. WC + * memory is a good option for buffers that will be written by the CPU and read + * by the device via mapped pinned memory or host->device transfers. + * + * All of these flags are orthogonal to one another: a developer may allocate + * memory that is portable, mapped and/or write-combined with no restrictions. + * + * ::cudaSetDeviceFlags() must have been called with the ::cudaDeviceMapHost + * flag in order for the ::cudaHostAllocMapped flag to have any effect. + * + * The ::cudaHostAllocMapped flag may be specified on CUDA contexts for devices + * that do not support mapped pinned memory. The failure is deferred to + * ::cudaHostGetDevicePointer() because the memory may be mapped into other + * CUDA contexts via the ::cudaHostAllocPortable flag. + * + * Memory allocated by this function must be freed with ::cudaFreeHost(). + * + * \param ptr - Device pointer to allocated memory + * \param size - Requested allocation size in bytes + * \param flags - Requested properties of allocated memory + * + * \return + * ::cudaSuccess, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaSetDeviceFlags, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaHostAlloc + */ +static __inline__ __host__ cudaError_t cudaMallocHost( + void **ptr, + size_t size, + unsigned int flags +) +{ + return ::cudaHostAlloc(ptr, size, flags); +} + +template +static __inline__ __host__ cudaError_t cudaHostAlloc( + T **ptr, + size_t size, + unsigned int flags +) +{ + return ::cudaHostAlloc((void**)(void*)ptr, size, flags); +} + +template +static __inline__ __host__ cudaError_t cudaHostGetDevicePointer( + T **pDevice, + void *pHost, + unsigned int flags +) +{ + return ::cudaHostGetDevicePointer((void**)(void*)pDevice, pHost, flags); +} + +/** + * \brief Allocates memory that will be automatically managed by the Unified Memory system + * + * Allocates \p size bytes of managed memory on the device and returns in + * \p *devPtr a pointer to the allocated memory. If the device doesn't support + * allocating managed memory, ::cudaErrorNotSupported is returned. Support + * for managed memory can be queried using the device attribute + * ::cudaDevAttrManagedMemory. The allocated memory is suitably + * aligned for any kind of variable. The memory is not cleared. If \p size + * is 0, ::cudaMallocManaged returns ::cudaErrorInvalidValue. The pointer + * is valid on the CPU and on all GPUs in the system that support managed memory. + * All accesses to this pointer must obey the Unified Memory programming model. + * + * \p flags specifies the default stream association for this allocation. + * \p flags must be one of ::cudaMemAttachGlobal or ::cudaMemAttachHost. The + * default value for \p flags is ::cudaMemAttachGlobal. + * If ::cudaMemAttachGlobal is specified, then this memory is accessible from + * any stream on any device. If ::cudaMemAttachHost is specified, then the + * allocation should not be accessed from devices that have a zero value for the + * device attribute ::cudaDevAttrConcurrentManagedAccess; an explicit call to + * ::cudaStreamAttachMemAsync will be required to enable access on such devices. + * + * If the association is later changed via ::cudaStreamAttachMemAsync to + * a single stream, the default association, as specifed during ::cudaMallocManaged, + * is restored when that stream is destroyed. For __managed__ variables, the + * default association is always ::cudaMemAttachGlobal. Note that destroying a + * stream is an asynchronous operation, and as a result, the change to default + * association won't happen until all work in the stream has completed. + * + * Memory allocated with ::cudaMallocManaged should be released with ::cudaFree. + * + * Device memory oversubscription is possible for GPUs that have a non-zero value for the + * device attribute ::cudaDevAttrConcurrentManagedAccess. Managed memory on + * such GPUs may be evicted from device memory to host memory at any time by the Unified + * Memory driver in order to make room for other allocations. + * + * In a multi-GPU system where all GPUs have a non-zero value for the device attribute + * ::cudaDevAttrConcurrentManagedAccess, managed memory may not be populated when this + * API returns and instead may be populated on access. In such systems, managed memory can + * migrate to any processor's memory at any time. The Unified Memory driver will employ heuristics to + * maintain data locality and prevent excessive page faults to the extent possible. The application + * can also guide the driver about memory usage patterns via ::cudaMemAdvise. The application + * can also explicitly migrate memory to a desired processor's memory via + * ::cudaMemPrefetchAsync. + * + * In a multi-GPU system where all of the GPUs have a zero value for the device attribute + * ::cudaDevAttrConcurrentManagedAccess and all the GPUs have peer-to-peer support + * with each other, the physical storage for managed memory is created on the GPU which is active + * at the time ::cudaMallocManaged is called. All other GPUs will reference the data at reduced + * bandwidth via peer mappings over the PCIe bus. The Unified Memory driver does not migrate + * memory among such GPUs. + * + * In a multi-GPU system where not all GPUs have peer-to-peer support with each other and + * where the value of the device attribute ::cudaDevAttrConcurrentManagedAccess + * is zero for at least one of those GPUs, the location chosen for physical storage of managed + * memory is system-dependent. + * - On Linux, the location chosen will be device memory as long as the current set of active + * contexts are on devices that either have peer-to-peer support with each other or have a + * non-zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess. + * If there is an active context on a GPU that does not have a non-zero value for that device + * attribute and it does not have peer-to-peer support with the other devices that have active + * contexts on them, then the location for physical storage will be 'zero-copy' or host memory. + * Note that this means that managed memory that is located in device memory is migrated to + * host memory if a new context is created on a GPU that doesn't have a non-zero value for + * the device attribute and does not support peer-to-peer with at least one of the other devices + * that has an active context. This in turn implies that context creation may fail if there is + * insufficient host memory to migrate all managed allocations. + * - On Windows, the physical storage is always created in 'zero-copy' or host memory. + * All GPUs will reference the data at reduced bandwidth over the PCIe bus. In these + * circumstances, use of the environment variable CUDA_VISIBLE_DEVICES is recommended to + * restrict CUDA to only use those GPUs that have peer-to-peer support. + * Alternatively, users can also set CUDA_MANAGED_FORCE_DEVICE_ALLOC to a non-zero + * value to force the driver to always use device memory for physical storage. + * When this environment variable is set to a non-zero value, all devices used in + * that process that support managed memory have to be peer-to-peer compatible + * with each other. The error ::cudaErrorInvalidDevice will be returned if a device + * that supports managed memory is used and it is not peer-to-peer compatible with + * any of the other managed memory supporting devices that were previously used in + * that process, even if ::cudaDeviceReset has been called on those devices. These + * environment variables are described in the CUDA programming guide under the + * "CUDA environment variables" section. + * - On ARM, managed memory is not available on discrete gpu with Drive PX-2. + * + * \param devPtr - Pointer to allocated device memory + * \param size - Requested allocation size in bytes + * \param flags - Must be either ::cudaMemAttachGlobal or ::cudaMemAttachHost (defaults to ::cudaMemAttachGlobal) + * + * \return + * ::cudaSuccess, + * ::cudaErrorMemoryAllocation, + * ::cudaErrorNotSupported, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * + * \sa ::cudaMallocPitch, ::cudaFree, ::cudaMallocArray, ::cudaFreeArray, + * ::cudaMalloc3D, ::cudaMalloc3DArray, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaHostAlloc, ::cudaDeviceGetAttribute, ::cudaStreamAttachMemAsync + */ +template +static __inline__ __host__ cudaError_t cudaMallocManaged( + T **devPtr, + size_t size, + unsigned int flags = cudaMemAttachGlobal +) +{ + return ::cudaMallocManaged((void**)(void*)devPtr, size, flags); +} + +/** + * \brief Attach memory to a stream asynchronously + * + * Enqueues an operation in \p stream to specify stream association of + * \p length bytes of memory starting from \p devPtr. This function is a + * stream-ordered operation, meaning that it is dependent on, and will + * only take effect when, previous work in stream has completed. Any + * previous association is automatically replaced. + * + * \p devPtr must point to an one of the following types of memories: + * - managed memory declared using the __managed__ keyword or allocated with + * ::cudaMallocManaged. + * - a valid host-accessible region of system-allocated pageable memory. This + * type of memory may only be specified if the device associated with the + * stream reports a non-zero value for the device attribute + * ::cudaDevAttrPageableMemoryAccess. + * + * For managed allocations, \p length must be either zero or the entire + * allocation's size. Both indicate that the entire allocation's stream + * association is being changed. Currently, it is not possible to change stream + * association for a portion of a managed allocation. + * + * For pageable allocations, \p length must be non-zero. + * + * The stream association is specified using \p flags which must be + * one of ::cudaMemAttachGlobal, ::cudaMemAttachHost or ::cudaMemAttachSingle. + * The default value for \p flags is ::cudaMemAttachSingle + * If the ::cudaMemAttachGlobal flag is specified, the memory can be accessed + * by any stream on any device. + * If the ::cudaMemAttachHost flag is specified, the program makes a guarantee + * that it won't access the memory on the device from any stream on a device that + * has a zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess. + * If the ::cudaMemAttachSingle flag is specified and \p stream is associated with + * a device that has a zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess, + * the program makes a guarantee that it will only access the memory on the device + * from \p stream. It is illegal to attach singly to the NULL stream, because the + * NULL stream is a virtual global stream and not a specific stream. An error will + * be returned in this case. + * + * When memory is associated with a single stream, the Unified Memory system will + * allow CPU access to this memory region so long as all operations in \p stream + * have completed, regardless of whether other streams are active. In effect, + * this constrains exclusive ownership of the managed memory region by + * an active GPU to per-stream activity instead of whole-GPU activity. + * + * Accessing memory on the device from streams that are not associated with + * it will produce undefined results. No error checking is performed by the + * Unified Memory system to ensure that kernels launched into other streams + * do not access this region. + * + * It is a program's responsibility to order calls to ::cudaStreamAttachMemAsync + * via events, synchronization or other means to ensure legal access to memory + * at all times. Data visibility and coherency will be changed appropriately + * for all kernels which follow a stream-association change. + * + * If \p stream is destroyed while data is associated with it, the association is + * removed and the association reverts to the default visibility of the allocation + * as specified at ::cudaMallocManaged. For __managed__ variables, the default + * association is always ::cudaMemAttachGlobal. Note that destroying a stream is an + * asynchronous operation, and as a result, the change to default association won't + * happen until all work in the stream has completed. + * + * \param stream - Stream in which to enqueue the attach operation + * \param devPtr - Pointer to memory (must be a pointer to managed memory or + * to a valid host-accessible region of system-allocated + * memory) + * \param length - Length of memory (defaults to zero) + * \param flags - Must be one of ::cudaMemAttachGlobal, ::cudaMemAttachHost or ::cudaMemAttachSingle (defaults to ::cudaMemAttachSingle) + * + * \return + * ::cudaSuccess, + * ::cudaErrorNotReady, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreate, ::cudaStreamCreateWithFlags, ::cudaStreamWaitEvent, ::cudaStreamSynchronize, ::cudaStreamAddCallback, ::cudaStreamDestroy, ::cudaMallocManaged + */ +template +static __inline__ __host__ cudaError_t cudaStreamAttachMemAsync( + cudaStream_t stream, + T *devPtr, + size_t length = 0, + unsigned int flags = cudaMemAttachSingle +) +{ + return ::cudaStreamAttachMemAsync(stream, (void*)devPtr, length, flags); +} + +template +static __inline__ __host__ cudaError_t cudaMalloc( + T **devPtr, + size_t size +) +{ + return ::cudaMalloc((void**)(void*)devPtr, size); +} + +template +static __inline__ __host__ cudaError_t cudaMallocHost( + T **ptr, + size_t size, + unsigned int flags = 0 +) +{ + return cudaMallocHost((void**)(void*)ptr, size, flags); +} + +template +static __inline__ __host__ cudaError_t cudaMallocPitch( + T **devPtr, + size_t *pitch, + size_t width, + size_t height +) +{ + return ::cudaMallocPitch((void**)(void*)devPtr, pitch, width, height); +} + +/** + * \brief Allocate from a pool + * + * This is an alternate spelling for cudaMallocFromPoolAsync + * made available through operator overloading. + * + * \sa ::cudaMallocFromPoolAsync, + * \ref ::cudaMallocAsync(void** ptr, size_t size, cudaStream_t hStream) "cudaMallocAsync (C API)" + */ +static __inline__ __host__ cudaError_t cudaMallocAsync( + void **ptr, + size_t size, + cudaMemPool_t memPool, + cudaStream_t stream +) +{ + return ::cudaMallocFromPoolAsync(ptr, size, memPool, stream); +} + +template +static __inline__ __host__ cudaError_t cudaMallocAsync( + T **ptr, + size_t size, + cudaMemPool_t memPool, + cudaStream_t stream +) +{ + return ::cudaMallocFromPoolAsync((void**)(void*)ptr, size, memPool, stream); +} + +template +static __inline__ __host__ cudaError_t cudaMallocAsync( + T **ptr, + size_t size, + cudaStream_t stream +) +{ + return ::cudaMallocAsync((void**)(void*)ptr, size, stream); +} + +template +static __inline__ __host__ cudaError_t cudaMallocFromPoolAsync( + T **ptr, + size_t size, + cudaMemPool_t memPool, + cudaStream_t stream +) +{ + return ::cudaMallocFromPoolAsync((void**)(void*)ptr, size, memPool, stream); +} + +#if defined(__CUDACC__) + +/** + * \brief \hl Copies data to the given symbol on the device + * + * Copies \p count bytes from the memory area pointed to by \p src + * to the memory area \p offset bytes from the start of symbol + * \p symbol. The memory areas may not overlap. \p symbol is a variable that + * resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToDevice. + * + * \param symbol - Device symbol reference + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorInvalidMemcpyDirection, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_sync + * \note_string_api_deprecation + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync + */ +template +static __inline__ __host__ cudaError_t cudaMemcpyToSymbol( + const T &symbol, + const void *src, + size_t count, + size_t offset = 0, + enum cudaMemcpyKind kind = cudaMemcpyHostToDevice +) +{ + return ::cudaMemcpyToSymbol((const void*)&symbol, src, count, offset, kind); +} + +/** + * \brief \hl Copies data to the given symbol on the device + * + * Copies \p count bytes from the memory area pointed to by \p src + * to the memory area \p offset bytes from the start of symbol + * \p symbol. The memory areas may not overlap. \p symbol is a variable that + * resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToDevice. + * + * ::cudaMemcpyToSymbolAsync() is asynchronous with respect to the host, so + * the call may return before the copy is complete. The copy can optionally + * be associated to a stream by passing a non-zero \p stream argument. If + * \p kind is ::cudaMemcpyHostToDevice and \p stream is non-zero, the copy + * may overlap with operations in other streams. + * + * \param symbol - Device symbol reference + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorInvalidMemcpyDirection, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_async + * \note_string_api_deprecation + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyFromSymbolAsync + */ +template +static __inline__ __host__ cudaError_t cudaMemcpyToSymbolAsync( + const T &symbol, + const void *src, + size_t count, + size_t offset = 0, + enum cudaMemcpyKind kind = cudaMemcpyHostToDevice, + cudaStream_t stream = 0 +) +{ + return ::cudaMemcpyToSymbolAsync((const void*)&symbol, src, count, offset, kind, stream); +} + +/** + * \brief \hl Copies data from the given symbol on the device + * + * Copies \p count bytes from the memory area \p offset bytes + * from the start of symbol \p symbol to the memory area pointed to by \p dst. + * The memory areas may not overlap. \p symbol is a variable that + * resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyDeviceToHost or ::cudaMemcpyDeviceToDevice. + * + * \param dst - Destination memory address + * \param symbol - Device symbol reference + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorInvalidMemcpyDirection, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_sync + * \note_string_api_deprecation + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync + */ +template +static __inline__ __host__ cudaError_t cudaMemcpyFromSymbol( + void *dst, + const T &symbol, + size_t count, + size_t offset = 0, + enum cudaMemcpyKind kind = cudaMemcpyDeviceToHost +) +{ + return ::cudaMemcpyFromSymbol(dst, (const void*)&symbol, count, offset, kind); +} + +/** + * \brief \hl Copies data from the given symbol on the device + * + * Copies \p count bytes from the memory area \p offset bytes + * from the start of symbol \p symbol to the memory area pointed to by \p dst. + * The memory areas may not overlap. \p symbol is a variable that resides in + * global or constant memory space. \p kind can be either + * ::cudaMemcpyDeviceToHost or ::cudaMemcpyDeviceToDevice. + * + * ::cudaMemcpyFromSymbolAsync() is asynchronous with respect to the host, so + * the call may return before the copy is complete. The copy can optionally be + * associated to a stream by passing a non-zero \p stream argument. If \p kind + * is ::cudaMemcpyDeviceToHost and \p stream is non-zero, the copy may overlap + * with operations in other streams. + * + * \param dst - Destination memory address + * \param symbol - Device symbol reference + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorInvalidMemcpyDirection, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_async + * \note_string_api_deprecation + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync + */ +template +static __inline__ __host__ cudaError_t cudaMemcpyFromSymbolAsync( + void *dst, + const T &symbol, + size_t count, + size_t offset = 0, + enum cudaMemcpyKind kind = cudaMemcpyDeviceToHost, + cudaStream_t stream = 0 +) +{ + return ::cudaMemcpyFromSymbolAsync(dst, (const void*)&symbol, count, offset, kind, stream); +} + +/** + * \brief Creates a memcpy node to copy to a symbol on the device and adds it to a graph + * + * Creates a new memcpy node to copy to \p symbol and adds it to \p graph with + * \p numDependencies dependencies specified via \p pDependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p pGraphNode. + * + * When the graph is launched, the node will copy \p count bytes from the memory area + * pointed to by \p src to the memory area pointed to by \p offset bytes from the start + * of symbol \p symbol. The memory areas may not overlap. \p symbol is a variable that + * resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of + * transfer is inferred from the pointer values. However, ::cudaMemcpyDefault + * is only allowed on systems that support unified virtual addressing. + * + * Memcpy nodes have some additional restrictions with regards to managed memory, if the + * system contains at least one device which has a zero value for the device attribute + * ::cudaDevAttrConcurrentManagedAccess. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param symbol - Device symbol address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpyToSymbol, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemcpyNodeFromSymbol, + * ::cudaGraphMemcpyNodeGetParams, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsToSymbol, + * ::cudaGraphMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemsetNode + */ +template +static __inline__ __host__ cudaError_t cudaGraphAddMemcpyNodeToSymbol( + cudaGraphNode_t *pGraphNode, + cudaGraph_t graph, + const cudaGraphNode_t *pDependencies, + size_t numDependencies, + const T &symbol, + const void* src, + size_t count, + size_t offset, + enum cudaMemcpyKind kind) +{ + return ::cudaGraphAddMemcpyNodeToSymbol(pGraphNode, graph, pDependencies, numDependencies, (const void*)&symbol, src, count, offset, kind); +} + +/** + * \brief Creates a memcpy node to copy from a symbol on the device and adds it to a graph + * + * Creates a new memcpy node to copy from \p symbol and adds it to \p graph with + * \p numDependencies dependencies specified via \p pDependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p pGraphNode. + * + * When the graph is launched, the node will copy \p count bytes from the memory area + * pointed to by \p offset bytes from the start of symbol \p symbol to the memory area + * pointed to by \p dst. The memory areas may not overlap. \p symbol is a variable + * that resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyDeviceToHost, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of transfer + * is inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * Memcpy nodes have some additional restrictions with regards to managed memory, if the + * system contains at least one device which has a zero value for the device attribute + * ::cudaDevAttrConcurrentManagedAccess. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param dst - Destination memory address + * \param symbol - Device symbol address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpyFromSymbol, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemcpyNodeToSymbol, + * ::cudaGraphMemcpyNodeGetParams, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphMemcpyNodeSetParamsToSymbol, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemsetNode + */ +template +static __inline__ __host__ cudaError_t cudaGraphAddMemcpyNodeFromSymbol( + cudaGraphNode_t* pGraphNode, + cudaGraph_t graph, + const cudaGraphNode_t* pDependencies, + size_t numDependencies, + void* dst, + const T &symbol, + size_t count, + size_t offset, + enum cudaMemcpyKind kind) +{ + return ::cudaGraphAddMemcpyNodeFromSymbol(pGraphNode, graph, pDependencies, numDependencies, dst, (const void*)&symbol, count, offset, kind); +} + +/** + * \brief Sets a memcpy node's parameters to copy to a symbol on the device + * + * Sets the parameters of memcpy node \p node to the copy described by the provided parameters. + * + * When the graph is launched, the node will copy \p count bytes from the memory area + * pointed to by \p src to the memory area pointed to by \p offset bytes from the start + * of symbol \p symbol. The memory areas may not overlap. \p symbol is a variable that + * resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of + * transfer is inferred from the pointer values. However, ::cudaMemcpyDefault + * is only allowed on systems that support unified virtual addressing. + * + * \param node - Node to set the parameters for + * \param symbol - Device symbol address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpyToSymbol, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphMemcpyNodeGetParams + */ +template +static __inline__ __host__ cudaError_t cudaGraphMemcpyNodeSetParamsToSymbol( + cudaGraphNode_t node, + const T &symbol, + const void* src, + size_t count, + size_t offset, + enum cudaMemcpyKind kind) +{ + return ::cudaGraphMemcpyNodeSetParamsToSymbol(node, (const void*)&symbol, src, count, offset, kind); +} + +/** + * \brief Sets a memcpy node's parameters to copy from a symbol on the device + * + * Sets the parameters of memcpy node \p node to the copy described by the provided parameters. + * + * When the graph is launched, the node will copy \p count bytes from the memory area + * pointed to by \p offset bytes from the start of symbol \p symbol to the memory area + * pointed to by \p dst. The memory areas may not overlap. \p symbol is a variable + * that resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyDeviceToHost, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of transfer + * is inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * \param node - Node to set the parameters for + * \param dst - Destination memory address + * \param symbol - Device symbol address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpyFromSymbol, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsToSymbol, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphMemcpyNodeGetParams + */ +template +static __inline__ __host__ cudaError_t cudaGraphMemcpyNodeSetParamsFromSymbol( + cudaGraphNode_t node, + void* dst, + const T &symbol, + size_t count, + size_t offset, + enum cudaMemcpyKind kind) +{ + return ::cudaGraphMemcpyNodeSetParamsFromSymbol(node, dst, (const void*)&symbol, count, offset, kind); +} + +/** + * \brief Sets the parameters for a memcpy node in the given graphExec to copy to a symbol on the device + * + * Updates the work represented by \p node in \p hGraphExec as though \p node had + * contained the given params at instantiation. \p node must remain in the graph which was + * used to instantiate \p hGraphExec. Changed edges to and from \p node are ignored. + * + * \p src and \p symbol must be allocated from the same contexts as the original source and + * destination memory. The instantiation-time memory operands must be 1-dimensional. + * Zero-length operations are not supported. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. \p node is also + * not modified by this call. + * + * Returns ::cudaErrorInvalidValue if the memory operands' mappings changed or + * the original memory operands are multidimensional. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param node - Memcpy node from the graph which was used to instantiate graphExec + * \param symbol - Device symbol address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemcpyNodeToSymbol, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsToSymbol, + * ::cudaGraphInstantiate, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams + */ +template +static __inline__ __host__ cudaError_t cudaGraphExecMemcpyNodeSetParamsToSymbol( + cudaGraphExec_t hGraphExec, + cudaGraphNode_t node, + const T &symbol, + const void* src, + size_t count, + size_t offset, + enum cudaMemcpyKind kind) +{ + return ::cudaGraphExecMemcpyNodeSetParamsToSymbol(hGraphExec, node, (const void*)&symbol, src, count, offset, kind); +} + +/** + * \brief Sets the parameters for a memcpy node in the given graphExec to copy from a symbol on the device + * + * Updates the work represented by \p node in \p hGraphExec as though \p node had + * contained the given params at instantiation. \p node must remain in the graph which was + * used to instantiate \p hGraphExec. Changed edges to and from \p node are ignored. + * + * \p symbol and \p dst must be allocated from the same contexts as the original source and + * destination memory. The instantiation-time memory operands must be 1-dimensional. + * Zero-length operations are not supported. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. \p node is also + * not modified by this call. + * + * Returns ::cudaErrorInvalidValue if the memory operands' mappings changed or + * the original memory operands are multidimensional. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param node - Memcpy node from the graph which was used to instantiate graphExec + * \param dst - Destination memory address + * \param symbol - Device symbol address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemcpyNodeFromSymbol, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphInstantiate, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParamsToSymbol, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams + */ +template +static __inline__ __host__ cudaError_t cudaGraphExecMemcpyNodeSetParamsFromSymbol( + cudaGraphExec_t hGraphExec, + cudaGraphNode_t node, + void* dst, + const T &symbol, + size_t count, + size_t offset, + enum cudaMemcpyKind kind) +{ + return ::cudaGraphExecMemcpyNodeSetParamsFromSymbol(hGraphExec, node, dst, (const void*)&symbol, count, offset, kind); +} + +// convenience function to avoid source breakage in c++ code +static __inline__ __host__ cudaError_t CUDARTAPI cudaGraphExecUpdate(cudaGraphExec_t hGraphExec, cudaGraph_t hGraph, cudaGraphNode_t *hErrorNode_out, enum cudaGraphExecUpdateResult *updateResult_out) +{ + cudaGraphExecUpdateResultInfo resultInfo; + cudaError_t status = cudaGraphExecUpdate(hGraphExec, hGraph, &resultInfo); + if (hErrorNode_out) { + *hErrorNode_out = resultInfo.errorNode; + } + if (updateResult_out) { + *updateResult_out = resultInfo.result; + } + return status; +} + +#if __cplusplus >= 201103 + +/** + * \brief Creates a user object by wrapping a C++ object + * + * TODO detail + * + * \param object_out - Location to return the user object handle + * \param objectToWrap - This becomes the \ptr argument to ::cudaUserObjectCreate. A + * lambda will be passed for the \p destroy argument, which calls + * delete on this object pointer. + * \param initialRefcount - The initial refcount to create the object with, typically 1. The + * initial references are owned by the calling thread. + * \param flags - Currently it is required to pass cudaUserObjectNoDestructorSync, + * which is the only defined flag. This indicates that the destroy + * callback cannot be waited on by any CUDA API. Users requiring + * synchronization of the callback should signal its completion + * manually. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * + * \sa + * ::cudaUserObjectCreate + */ +template +static __inline__ __host__ cudaError_t cudaUserObjectCreate( + cudaUserObject_t *object_out, + T *objectToWrap, + unsigned int initialRefcount, + unsigned int flags) +{ + return ::cudaUserObjectCreate( + object_out, + objectToWrap, + [](void *vpObj) { delete reinterpret_cast(vpObj); }, + initialRefcount, + flags); +} + +template +static __inline__ __host__ cudaError_t cudaUserObjectCreate( + cudaUserObject_t *object_out, + T *objectToWrap, + unsigned int initialRefcount, + cudaUserObjectFlags flags) +{ + return cudaUserObjectCreate(object_out, objectToWrap, initialRefcount, (unsigned int)flags); +} + +#endif + +/** + * \brief \hl Finds the address associated with a CUDA symbol + * + * Returns in \p *devPtr the address of symbol \p symbol on the device. + * \p symbol can either be a variable that resides in global or constant memory space. + * If \p symbol cannot be found, or if \p symbol is not declared + * in the global or constant memory space, \p *devPtr is unchanged and the error + * ::cudaErrorInvalidSymbol is returned. + * + * \param devPtr - Return device pointer associated with symbol + * \param symbol - Device symbol reference + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa \ref ::cudaGetSymbolAddress(void**, const void*) "cudaGetSymbolAddress (C API)", + * \ref ::cudaGetSymbolSize(size_t*, const T&) "cudaGetSymbolSize (C++ API)" + */ +template +static __inline__ __host__ cudaError_t cudaGetSymbolAddress( + void **devPtr, + const T &symbol +) +{ + return ::cudaGetSymbolAddress(devPtr, (const void*)&symbol); +} + +/** + * \brief \hl Finds the size of the object associated with a CUDA symbol + * + * Returns in \p *size the size of symbol \p symbol. \p symbol must be a + * variable that resides in global or constant memory space. + * If \p symbol cannot be found, or if \p symbol is not declared + * in global or constant memory space, \p *size is unchanged and the error + * ::cudaErrorInvalidSymbol is returned. + * + * \param size - Size of object associated with symbol + * \param symbol - Device symbol reference + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa \ref ::cudaGetSymbolAddress(void**, const T&) "cudaGetSymbolAddress (C++ API)", + * \ref ::cudaGetSymbolSize(size_t*, const void*) "cudaGetSymbolSize (C API)" + */ +template +static __inline__ __host__ cudaError_t cudaGetSymbolSize( + size_t *size, + const T &symbol +) +{ + return ::cudaGetSymbolSize(size, (const void*)&symbol); +} + +/** + * \brief \hl Sets the preferred cache configuration for a device function + * + * On devices where the L1 cache and shared memory use the same hardware + * resources, this sets through \p cacheConfig the preferred cache configuration + * for the function specified via \p func. This is only a preference. The + * runtime will use the requested configuration if possible, but it is free to + * choose a different configuration if required to execute \p func. + * + * \p func must be a pointer to a function that executes on the device. + * The parameter specified by \p func must be declared as a \p __global__ + * function. If the specified function does not exist, + * then ::cudaErrorInvalidDeviceFunction is returned. + * + * This setting does nothing on devices where the size of the L1 cache and + * shared memory are fixed. + * + * Launching a kernel with a different preference than the most recent + * preference setting may insert a device-side synchronization point. + * + * The supported cache configurations are: + * - ::cudaFuncCachePreferNone: no preference for shared memory or L1 (default) + * - ::cudaFuncCachePreferShared: prefer larger shared memory and smaller L1 cache + * - ::cudaFuncCachePreferL1: prefer larger L1 cache and smaller shared memory + * + * \param func - device function pointer + * \param cacheConfig - Requested cache configuration + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction + * \notefnerr + * \note_init_rt + * \note_callback + * + * \ref ::cudaLaunchKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C++ API)", + * \ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C API)", + * \ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, T*) "cudaFuncGetAttributes (C++ API)", + * ::cudaSetDoubleForDevice, + * ::cudaSetDoubleForHost, + * ::cudaThreadGetCacheConfig, + * ::cudaThreadSetCacheConfig + */ +template +static __inline__ __host__ cudaError_t cudaFuncSetCacheConfig( + T *func, + enum cudaFuncCache cacheConfig +) +{ + return ::cudaFuncSetCacheConfig((const void*)func, cacheConfig); +} + +template +static __inline__ __host__ cudaError_t cudaFuncSetSharedMemConfig( + T *func, + enum cudaSharedMemConfig config +) +{ + return ::cudaFuncSetSharedMemConfig((const void*)func, config); +} + +#endif // __CUDACC__ + +/** + * \brief Returns occupancy for a device function + * + * Returns in \p *numBlocks the maximum number of active blocks per + * streaming multiprocessor for the device function. + * + * \param numBlocks - Returned occupancy + * \param func - Kernel function for which occupancy is calulated + * \param blockSize - Block size the kernel is intended to be launched with + * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags + * \sa ::cudaOccupancyMaxPotentialBlockSize + * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags + * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock + */ +template +static __inline__ __host__ cudaError_t cudaOccupancyMaxActiveBlocksPerMultiprocessor( + int *numBlocks, + T func, + int blockSize, + size_t dynamicSMemSize) +{ + return ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(numBlocks, (const void*)func, blockSize, dynamicSMemSize, cudaOccupancyDefault); +} + +/** + * \brief Returns occupancy for a device function with the specified flags + * + * Returns in \p *numBlocks the maximum number of active blocks per + * streaming multiprocessor for the device function. + * + * The \p flags parameter controls how special cases are handled. Valid flags include: + * + * - ::cudaOccupancyDefault: keeps the default behavior as + * ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + * + * - ::cudaOccupancyDisableCachingOverride: suppresses the default behavior + * on platform where global caching affects occupancy. On such platforms, if caching + * is enabled, but per-block SM resource usage would result in zero occupancy, the + * occupancy calculator will calculate the occupancy as if caching is disabled. + * Setting this flag makes the occupancy calculator to return 0 in such cases. + * More information can be found about this feature in the "Unified L1/Texture Cache" + * section of the Maxwell tuning guide. + * + * \param numBlocks - Returned occupancy + * \param func - Kernel function for which occupancy is calulated + * \param blockSize - Block size the kernel is intended to be launched with + * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes + * \param flags - Requested behavior for the occupancy calculator + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + * \sa ::cudaOccupancyMaxPotentialBlockSize + * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags + * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock + */ +template +static __inline__ __host__ cudaError_t cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags( + int *numBlocks, + T func, + int blockSize, + size_t dynamicSMemSize, + unsigned int flags) +{ + return ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(numBlocks, (const void*)func, blockSize, dynamicSMemSize, flags); +} + +/** + * Helper functor for cudaOccupancyMaxPotentialBlockSize + */ +class __cudaOccupancyB2DHelper { + size_t n; +public: + inline __host__ CUDART_DEVICE __cudaOccupancyB2DHelper(size_t n_) : n(n_) {} + inline __host__ CUDART_DEVICE size_t operator()(int) + { + return n; + } +}; + +/** + * \brief Returns grid and block size that achieves maximum potential occupancy for a device function + * + * Returns in \p *minGridSize and \p *blocksize a suggested grid / + * block size pair that achieves the best potential occupancy + * (i.e. the maximum number of active warps with the smallest number + * of blocks). + * + * The \p flags parameter controls how special cases are handled. Valid flags include: + * + * - ::cudaOccupancyDefault: keeps the default behavior as + * ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags + * + * - ::cudaOccupancyDisableCachingOverride: This flag suppresses the default behavior + * on platform where global caching affects occupancy. On such platforms, if caching + * is enabled, but per-block SM resource usage would result in zero occupancy, the + * occupancy calculator will calculate the occupancy as if caching is disabled. + * Setting this flag makes the occupancy calculator to return 0 in such cases. + * More information can be found about this feature in the "Unified L1/Texture Cache" + * section of the Maxwell tuning guide. + * + * \param minGridSize - Returned minimum grid size needed to achieve the best potential occupancy + * \param blockSize - Returned block size + * \param func - Device function symbol + * \param blockSizeToDynamicSMemSize - A unary function / functor that takes block size, and returns the size, in bytes, of dynamic shared memory needed for a block + * \param blockSizeLimit - The maximum block size \p func is designed to work with. 0 means no limit. + * \param flags - Requested behavior for the occupancy calculator + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags + * \sa ::cudaOccupancyMaxPotentialBlockSize + * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags + * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock + */ + +template +static __inline__ __host__ CUDART_DEVICE cudaError_t cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags( + int *minGridSize, + int *blockSize, + T func, + UnaryFunction blockSizeToDynamicSMemSize, + int blockSizeLimit = 0, + unsigned int flags = 0) +{ + cudaError_t status; + + // Device and function properties + int device; + struct cudaFuncAttributes attr; + + // Limits + int maxThreadsPerMultiProcessor; + int warpSize; + int devMaxThreadsPerBlock; + int multiProcessorCount; + int funcMaxThreadsPerBlock; + int occupancyLimit; + int granularity; + + // Recorded maximum + int maxBlockSize = 0; + int numBlocks = 0; + int maxOccupancy = 0; + + // Temporary + int blockSizeToTryAligned; + int blockSizeToTry; + int blockSizeLimitAligned; + int occupancyInBlocks; + int occupancyInThreads; + size_t dynamicSMemSize; + + /////////////////////////// + // Check user input + /////////////////////////// + + if (!minGridSize || !blockSize || !func) { + return cudaErrorInvalidValue; + } + + ////////////////////////////////////////////// + // Obtain device and function properties + ////////////////////////////////////////////// + + status = ::cudaGetDevice(&device); + if (status != cudaSuccess) { + return status; + } + + status = cudaDeviceGetAttribute( + &maxThreadsPerMultiProcessor, + cudaDevAttrMaxThreadsPerMultiProcessor, + device); + if (status != cudaSuccess) { + return status; + } + + status = cudaDeviceGetAttribute( + &warpSize, + cudaDevAttrWarpSize, + device); + if (status != cudaSuccess) { + return status; + } + + status = cudaDeviceGetAttribute( + &devMaxThreadsPerBlock, + cudaDevAttrMaxThreadsPerBlock, + device); + if (status != cudaSuccess) { + return status; + } + + status = cudaDeviceGetAttribute( + &multiProcessorCount, + cudaDevAttrMultiProcessorCount, + device); + if (status != cudaSuccess) { + return status; + } + + status = cudaFuncGetAttributes(&attr, func); + if (status != cudaSuccess) { + return status; + } + + funcMaxThreadsPerBlock = attr.maxThreadsPerBlock; + + ///////////////////////////////////////////////////////////////////////////////// + // Try each block size, and pick the block size with maximum occupancy + ///////////////////////////////////////////////////////////////////////////////// + + occupancyLimit = maxThreadsPerMultiProcessor; + granularity = warpSize; + + if (blockSizeLimit == 0) { + blockSizeLimit = devMaxThreadsPerBlock; + } + + if (devMaxThreadsPerBlock < blockSizeLimit) { + blockSizeLimit = devMaxThreadsPerBlock; + } + + if (funcMaxThreadsPerBlock < blockSizeLimit) { + blockSizeLimit = funcMaxThreadsPerBlock; + } + + blockSizeLimitAligned = ((blockSizeLimit + (granularity - 1)) / granularity) * granularity; + + for (blockSizeToTryAligned = blockSizeLimitAligned; blockSizeToTryAligned > 0; blockSizeToTryAligned -= granularity) { + // This is needed for the first iteration, because + // blockSizeLimitAligned could be greater than blockSizeLimit + // + if (blockSizeLimit < blockSizeToTryAligned) { + blockSizeToTry = blockSizeLimit; + } else { + blockSizeToTry = blockSizeToTryAligned; + } + + dynamicSMemSize = blockSizeToDynamicSMemSize(blockSizeToTry); + + status = cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags( + &occupancyInBlocks, + func, + blockSizeToTry, + dynamicSMemSize, + flags); + + if (status != cudaSuccess) { + return status; + } + + occupancyInThreads = blockSizeToTry * occupancyInBlocks; + + if (occupancyInThreads > maxOccupancy) { + maxBlockSize = blockSizeToTry; + numBlocks = occupancyInBlocks; + maxOccupancy = occupancyInThreads; + } + + // Early out if we have reached the maximum + // + if (occupancyLimit == maxOccupancy) { + break; + } + } + + /////////////////////////// + // Return best available + /////////////////////////// + + // Suggested min grid size to achieve a full machine launch + // + *minGridSize = numBlocks * multiProcessorCount; + *blockSize = maxBlockSize; + + return status; +} + +/** + * \brief Returns grid and block size that achieves maximum potential occupancy for a device function + * + * Returns in \p *minGridSize and \p *blocksize a suggested grid / + * block size pair that achieves the best potential occupancy + * (i.e. the maximum number of active warps with the smallest number + * of blocks). + * + * \param minGridSize - Returned minimum grid size needed to achieve the best potential occupancy + * \param blockSize - Returned block size + * \param func - Device function symbol + * \param blockSizeToDynamicSMemSize - A unary function / functor that takes block size, and returns the size, in bytes, of dynamic shared memory needed for a block + * \param blockSizeLimit - The maximum block size \p func is designed to work with. 0 means no limit. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags + * \sa ::cudaOccupancyMaxPotentialBlockSize + * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags + * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock + */ + +template +static __inline__ __host__ CUDART_DEVICE cudaError_t cudaOccupancyMaxPotentialBlockSizeVariableSMem( + int *minGridSize, + int *blockSize, + T func, + UnaryFunction blockSizeToDynamicSMemSize, + int blockSizeLimit = 0) +{ + return cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(minGridSize, blockSize, func, blockSizeToDynamicSMemSize, blockSizeLimit, cudaOccupancyDefault); +} + +/** + * \brief Returns grid and block size that achieves maximum potential occupancy for a device function + * + * Returns in \p *minGridSize and \p *blocksize a suggested grid / + * block size pair that achieves the best potential occupancy + * (i.e. the maximum number of active warps with the smallest number + * of blocks). + * + * Use \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem if the + * amount of per-block dynamic shared memory changes with different + * block sizes. + * + * \param minGridSize - Returned minimum grid size needed to achieve the best potential occupancy + * \param blockSize - Returned block size + * \param func - Device function symbol + * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes + * \param blockSizeLimit - The maximum block size \p func is designed to work with. 0 means no limit. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags + * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock + */ +template +static __inline__ __host__ CUDART_DEVICE cudaError_t cudaOccupancyMaxPotentialBlockSize( + int *minGridSize, + int *blockSize, + T func, + size_t dynamicSMemSize = 0, + int blockSizeLimit = 0) +{ + return cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(minGridSize, blockSize, func, __cudaOccupancyB2DHelper(dynamicSMemSize), blockSizeLimit, cudaOccupancyDefault); +} + +/** + * \brief Returns dynamic shared memory available per block when launching \p numBlocks blocks on SM. + * + * Returns in \p *dynamicSmemSize the maximum size of dynamic shared memory to allow \p numBlocks blocks per SM. + * + * \param dynamicSmemSize - Returned maximum dynamic shared memory + * \param func - Kernel function for which occupancy is calculated + * \param numBlocks - Number of blocks to fit on SM + * \param blockSize - Size of the block + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxPotentialBlockSize + * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags + */ +template +static __inline__ __host__ cudaError_t cudaOccupancyAvailableDynamicSMemPerBlock( + size_t *dynamicSmemSize, + T func, + int numBlocks, + int blockSize) +{ + return ::cudaOccupancyAvailableDynamicSMemPerBlock(dynamicSmemSize, (const void*)func, numBlocks, blockSize); +} + +/** + * \brief Returns grid and block size that achived maximum potential occupancy for a device function with the specified flags + * + * Returns in \p *minGridSize and \p *blocksize a suggested grid / + * block size pair that achieves the best potential occupancy + * (i.e. the maximum number of active warps with the smallest number + * of blocks). + * + * The \p flags parameter controls how special cases are handle. Valid flags include: + * + * - ::cudaOccupancyDefault: keeps the default behavior as + * ::cudaOccupancyMaxPotentialBlockSize + * + * - ::cudaOccupancyDisableCachingOverride: This flag suppresses the default behavior + * on platform where global caching affects occupancy. On such platforms, if caching + * is enabled, but per-block SM resource usage would result in zero occupancy, the + * occupancy calculator will calculate the occupancy as if caching is disabled. + * Setting this flag makes the occupancy calculator to return 0 in such cases. + * More information can be found about this feature in the "Unified L1/Texture Cache" + * section of the Maxwell tuning guide. + * + * Use \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem if the + * amount of per-block dynamic shared memory changes with different + * block sizes. + * + * \param minGridSize - Returned minimum grid size needed to achieve the best potential occupancy + * \param blockSize - Returned block size + * \param func - Device function symbol + * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes + * \param blockSizeLimit - The maximum block size \p func is designed to work with. 0 means no limit. + * \param flags - Requested behavior for the occupancy calculator + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxPotentialBlockSize + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags + * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock + */ +template +static __inline__ __host__ CUDART_DEVICE cudaError_t cudaOccupancyMaxPotentialBlockSizeWithFlags( + int *minGridSize, + int *blockSize, + T func, + size_t dynamicSMemSize = 0, + int blockSizeLimit = 0, + unsigned int flags = 0) +{ + return cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(minGridSize, blockSize, func, __cudaOccupancyB2DHelper(dynamicSMemSize), blockSizeLimit, flags); +} + +/** + * \brief Given the kernel function (\p func) and launch configuration + * (\p config), return the maximum cluster size in \p *clusterSize. + * + * The cluster dimensions in \p config are ignored. If func has a required + * cluster size set (see ::cudaFuncGetAttributes),\p *clusterSize will reflect + * the required cluster size. + * + * By default this function will always return a value that's portable on + * future hardware. A higher value may be returned if the kernel function + * allows non-portable cluster sizes. + * + * This function will respect the compile time launch bounds. + * + * \param clusterSize - Returned maximum cluster size that can be launched + * for the given kernel function and launch configuration + * \param func - Kernel function for which maximum cluster + * size is calculated + * \param config - Launch configuration for the given kernel function + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaFuncGetAttributes + */ +template +static __inline__ __host__ cudaError_t cudaOccupancyMaxPotentialClusterSize( + int *clusterSize, + T *func, + const cudaLaunchConfig_t *config) +{ + return ::cudaOccupancyMaxPotentialClusterSize(clusterSize, (const void*)func, config); +} + +/** + * \brief Given the kernel function (\p func) and launch configuration + * (\p config), return the maximum number of clusters that could co-exist + * on the target device in \p *numClusters. + * + * If the function has required cluster size already set (see + * ::cudaFuncGetAttributes), the cluster size from config must either be + * unspecified or match the required size. + * Without required sizes, the cluster size must be specified in config, + * else the function will return an error. + * + * Note that various attributes of the kernel function may affect occupancy + * calculation. Runtime environment may affect how the hardware schedules + * the clusters, so the calculated occupancy is not guaranteed to be achievable. + * + * \param numClusters - Returned maximum number of clusters that + * could co-exist on the target device + * \param func - Kernel function for which maximum number + * of clusters are calculated + * \param config - Launch configuration for the given kernel function + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidClusterSize, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaFuncGetAttributes + */ +template +static __inline__ __host__ cudaError_t cudaOccupancyMaxActiveClusters( + int *numClusters, + T *func, + const cudaLaunchConfig_t *config) +{ + return ::cudaOccupancyMaxActiveClusters(numClusters, (const void*)func, config); +} + +#if defined __CUDACC__ + +/** + * \brief \hl Find out attributes for a given function + * + * This function obtains the attributes of a function specified via \p entry. + * The parameter \p entry must be a pointer to a function that executes + * on the device. The parameter specified by \p entry must be declared as a \p __global__ + * function. The fetched attributes are placed in \p attr. If the specified + * function does not exist, then ::cudaErrorInvalidDeviceFunction is returned. + * + * Note that some function attributes such as + * \ref ::cudaFuncAttributes::maxThreadsPerBlock "maxThreadsPerBlock" + * may vary based on the device that is currently being used. + * + * \param attr - Return pointer to function's attributes + * \param entry - Function to get attributes of + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction + * \notefnerr + * \note_init_rt + * \note_callback + * + * \ref ::cudaLaunchKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C++ API)", + * \ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C++ API)", + * \ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, const void*) "cudaFuncGetAttributes (C API)", + * ::cudaSetDoubleForDevice, + * ::cudaSetDoubleForHost + */ +template +static __inline__ __host__ cudaError_t cudaFuncGetAttributes( + struct cudaFuncAttributes *attr, + T *entry +) +{ + return ::cudaFuncGetAttributes(attr, (const void*)entry); +} + +/** + * \brief \hl Set attributes for a given function + * + * This function sets the attributes of a function specified via \p entry. + * The parameter \p entry must be a pointer to a function that executes + * on the device. The parameter specified by \p entry must be declared as a \p __global__ + * function. The enumeration defined by \p attr is set to the value defined by \p value. + * If the specified function does not exist, then ::cudaErrorInvalidDeviceFunction is returned. + * If the specified attribute cannot be written, or if the value is incorrect, + * then ::cudaErrorInvalidValue is returned. + * + * Valid values for \p attr are: + * - ::cudaFuncAttributeMaxDynamicSharedMemorySize - The requested maximum size in bytes of dynamically-allocated shared memory. The sum of this value and the function attribute ::sharedSizeBytes + * cannot exceed the device attribute ::cudaDevAttrMaxSharedMemoryPerBlockOptin. The maximal size of requestable dynamic shared memory may differ by GPU architecture. + * - ::cudaFuncAttributePreferredSharedMemoryCarveout - On devices where the L1 cache and shared memory use the same hardware resources, + * this sets the shared memory carveout preference, in percent of the total shared memory. See ::cudaDevAttrMaxSharedMemoryPerMultiprocessor. + * This is only a hint, and the driver can choose a different ratio if required to execute the function. + * - ::cudaFuncAttributeRequiredClusterWidth: The required cluster width in + * blocks. The width, height, and depth values must either all be 0 or all be + * positive. The validity of the cluster dimensions is checked at launch time. + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime will return cudaErrorNotPermitted. + * - ::cudaFuncAttributeRequiredClusterHeight: The required cluster height in + * blocks. The width, height, and depth values must either all be 0 or all be + * positive. The validity of the cluster dimensions is checked at launch time. + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime will return cudaErrorNotPermitted. + * - ::cudaFuncAttributeRequiredClusterDepth: The required cluster depth in + * blocks. The width, height, and depth values must either all be 0 or all be + * positive. The validity of the cluster dimensions is checked at launch time. + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime will return cudaErrorNotPermitted. + * - ::cudaFuncAttributeClusterSchedulingPolicyPreference: The block + * scheduling policy of a function. The value type is cudaClusterSchedulingPolicy. + * + * \param entry - Function to get attributes of + * \param attr - Attribute to set + * \param value - Value to set + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \ref ::cudaLaunchKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C++ API)", + * \ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C++ API)", + * \ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, const void*) "cudaFuncGetAttributes (C API)", + * ::cudaSetDoubleForDevice, + * ::cudaSetDoubleForHost + */ +template +static __inline__ __host__ cudaError_t cudaFuncSetAttribute( + T *entry, + enum cudaFuncAttribute attr, + int value +) +{ + return ::cudaFuncSetAttribute((const void*)entry, attr, value); +} + +/** + * \brief Get pointer to device kernel that matches entry function \p entryFuncAddr + * + * Returns in \p kernelPtr the device kernel corresponding to the entry function \p entryFuncAddr. + * + * \param kernelPtr - Returns the device kernel + * \param entryFuncAddr - Address of device entry function to search kernel for + * + * \return + * ::cudaSuccess + * + * \sa + * \ref ::cudaGetKernel(cudaKernel_t *kernelPtr, const void *entryFuncAddr) "cudaGetKernel (C API)" + */ +template +static __inline__ __host__ cudaError_t cudaGetKernel( + cudaKernel_t *kernelPtr, + const T *entryFuncAddr +) +{ + return ::cudaGetKernel(kernelPtr, (const void *)entryFuncAddr); +} + +#endif /* __CUDACC__ */ + +/** @} */ /* END CUDART_HIGHLEVEL */ + +#endif /* __cplusplus && !__CUDACC_RTC__ */ + +#if !defined(__CUDACC_RTC__) +#if defined(__GNUC__) +#if defined(__clang__) || (!defined(__PGIC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) +#pragma GCC diagnostic pop +#endif +#elif defined(_MSC_VER) +#pragma warning(pop) +#endif +#endif + +#undef __CUDA_DEPRECATED + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_RUNTIME_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_RUNTIME_H__ +#endif + +#endif /* !__CUDA_RUNTIME_H__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime_api.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime_api.h new file mode 100644 index 0000000000000000000000000000000000000000..1aabe6987f23b4fd31b891fcfddc43e3aa6980d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime_api.h @@ -0,0 +1,13140 @@ +/* + * Copyright 1993-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + + + +#if !defined(__CUDA_RUNTIME_API_H__) +#define __CUDA_RUNTIME_API_H__ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_RUNTIME_API_H__ +#endif + +/** + * \latexonly + * \page sync_async API synchronization behavior + * + * \section memcpy_sync_async_behavior Memcpy + * The API provides memcpy/memset functions in both synchronous and asynchronous forms, + * the latter having an \e "Async" suffix. This is a misnomer as each function + * may exhibit synchronous or asynchronous behavior depending on the arguments + * passed to the function. In the reference documentation, each memcpy function is + * categorized as \e synchronous or \e asynchronous, corresponding to the definitions + * below. + * + * \subsection MemcpySynchronousBehavior Synchronous + * + *
    + *
  1. For transfers from pageable host memory to device memory, a stream sync is performed + * before the copy is initiated. The function will return once the pageable + * buffer has been copied to the staging memory for DMA transfer to device memory, + * but the DMA to final destination may not have completed. + * + *
  2. For transfers from pinned host memory to device memory, the function is synchronous + * with respect to the host. + * + *
  3. For transfers from device to either pageable or pinned host memory, the function returns + * only once the copy has completed. + * + *
  4. For transfers from device memory to device memory, no host-side synchronization is + * performed. + * + *
  5. For transfers from any host memory to any host memory, the function is fully + * synchronous with respect to the host. + *
+ * + * \subsection MemcpyAsynchronousBehavior Asynchronous + * + *
    + *
  1. For transfers from device memory to pageable host memory, the function + * will return only once the copy has completed. + * + *
  2. For transfers from any host memory to any host memory, the function is fully + * synchronous with respect to the host. + * + *
  3. If pageable memory must first be staged to pinned memory, the driver may + * synchronize with the stream and stage the copy into pinned memory. + * + *
  4. For all other transfers, the function should be fully asynchronous. + *
+ * + * \section memset_sync_async_behavior Memset + * The cudaMemset functions are asynchronous with respect to the host + * except when the target memory is pinned host memory. The \e Async + * versions are always asynchronous with respect to the host. + * + * \section kernel_launch_details Kernel Launches + * Kernel launches are asynchronous with respect to the host. Details of + * concurrent kernel execution and data transfers can be found in the CUDA + * Programmers Guide. + * + * \endlatexonly + */ + +/** + * There are two levels for the runtime API. + * + * The C API (cuda_runtime_api.h) is + * a C-style interface that does not require compiling with \p nvcc. + * + * The \ref CUDART_HIGHLEVEL "C++ API" (cuda_runtime.h) is a + * C++-style interface built on top of the C API. It wraps some of the + * C API routines, using overloading, references and default arguments. + * These wrappers can be used from C++ code and can be compiled with any C++ + * compiler. The C++ API also has some CUDA-specific wrappers that wrap + * C API routines that deal with symbols, textures, and device functions. + * These wrappers require the use of \p nvcc because they depend on code being + * generated by the compiler. For example, the execution configuration syntax + * to invoke kernels is only available in source code compiled with \p nvcc. + */ + +/** CUDA Runtime API Version */ +#define CUDART_VERSION 12010 + +#if defined(__CUDA_API_VER_MAJOR__) && defined(__CUDA_API_VER_MINOR__) +# define __CUDART_API_VERSION ((__CUDA_API_VER_MAJOR__ * 1000) + (__CUDA_API_VER_MINOR__ * 10)) +#else +# define __CUDART_API_VERSION CUDART_VERSION +#endif + +#ifndef __DOXYGEN_ONLY__ +#include "crt/host_defines.h" +#endif +#include "builtin_types.h" + +#include "cuda_device_runtime_api.h" + +#if defined(CUDA_API_PER_THREAD_DEFAULT_STREAM) || defined(__CUDA_API_VERSION_INTERNAL) + #define __CUDART_API_PER_THREAD_DEFAULT_STREAM + #define __CUDART_API_PTDS(api) api ## _ptds + #define __CUDART_API_PTSZ(api) api ## _ptsz +#else + #define __CUDART_API_PTDS(api) api + #define __CUDART_API_PTSZ(api) api +#endif + +#define cudaSignalExternalSemaphoresAsync __CUDART_API_PTSZ(cudaSignalExternalSemaphoresAsync_v2) +#define cudaWaitExternalSemaphoresAsync __CUDART_API_PTSZ(cudaWaitExternalSemaphoresAsync_v2) + + #define cudaStreamGetCaptureInfo __CUDART_API_PTSZ(cudaStreamGetCaptureInfo_v2) + +#define cudaGetDeviceProperties cudaGetDeviceProperties_v2 + +#if defined(__CUDART_API_PER_THREAD_DEFAULT_STREAM) + #define cudaMemcpy __CUDART_API_PTDS(cudaMemcpy) + #define cudaMemcpyToSymbol __CUDART_API_PTDS(cudaMemcpyToSymbol) + #define cudaMemcpyFromSymbol __CUDART_API_PTDS(cudaMemcpyFromSymbol) + #define cudaMemcpy2D __CUDART_API_PTDS(cudaMemcpy2D) + #define cudaMemcpyToArray __CUDART_API_PTDS(cudaMemcpyToArray) + #define cudaMemcpy2DToArray __CUDART_API_PTDS(cudaMemcpy2DToArray) + #define cudaMemcpyFromArray __CUDART_API_PTDS(cudaMemcpyFromArray) + #define cudaMemcpy2DFromArray __CUDART_API_PTDS(cudaMemcpy2DFromArray) + #define cudaMemcpyArrayToArray __CUDART_API_PTDS(cudaMemcpyArrayToArray) + #define cudaMemcpy2DArrayToArray __CUDART_API_PTDS(cudaMemcpy2DArrayToArray) + #define cudaMemcpy3D __CUDART_API_PTDS(cudaMemcpy3D) + #define cudaMemcpy3DPeer __CUDART_API_PTDS(cudaMemcpy3DPeer) + #define cudaMemset __CUDART_API_PTDS(cudaMemset) + #define cudaMemset2D __CUDART_API_PTDS(cudaMemset2D) + #define cudaMemset3D __CUDART_API_PTDS(cudaMemset3D) + #define cudaGraphInstantiateWithParams __CUDART_API_PTSZ(cudaGraphInstantiateWithParams) + #define cudaGraphUpload __CUDART_API_PTSZ(cudaGraphUpload) + #define cudaGraphLaunch __CUDART_API_PTSZ(cudaGraphLaunch) + #define cudaStreamBeginCapture __CUDART_API_PTSZ(cudaStreamBeginCapture) + #define cudaStreamEndCapture __CUDART_API_PTSZ(cudaStreamEndCapture) + #define cudaStreamIsCapturing __CUDART_API_PTSZ(cudaStreamIsCapturing) + #define cudaMemcpyAsync __CUDART_API_PTSZ(cudaMemcpyAsync) + #define cudaMemcpyToSymbolAsync __CUDART_API_PTSZ(cudaMemcpyToSymbolAsync) + #define cudaMemcpyFromSymbolAsync __CUDART_API_PTSZ(cudaMemcpyFromSymbolAsync) + #define cudaMemcpy2DAsync __CUDART_API_PTSZ(cudaMemcpy2DAsync) + #define cudaMemcpyToArrayAsync __CUDART_API_PTSZ(cudaMemcpyToArrayAsync) + #define cudaMemcpy2DToArrayAsync __CUDART_API_PTSZ(cudaMemcpy2DToArrayAsync) + #define cudaMemcpyFromArrayAsync __CUDART_API_PTSZ(cudaMemcpyFromArrayAsync) + #define cudaMemcpy2DFromArrayAsync __CUDART_API_PTSZ(cudaMemcpy2DFromArrayAsync) + #define cudaMemcpy3DAsync __CUDART_API_PTSZ(cudaMemcpy3DAsync) + #define cudaMemcpy3DPeerAsync __CUDART_API_PTSZ(cudaMemcpy3DPeerAsync) + #define cudaMemsetAsync __CUDART_API_PTSZ(cudaMemsetAsync) + #define cudaMemset2DAsync __CUDART_API_PTSZ(cudaMemset2DAsync) + #define cudaMemset3DAsync __CUDART_API_PTSZ(cudaMemset3DAsync) + #define cudaStreamQuery __CUDART_API_PTSZ(cudaStreamQuery) + #define cudaStreamGetFlags __CUDART_API_PTSZ(cudaStreamGetFlags) + #define cudaStreamGetId __CUDART_API_PTSZ(cudaStreamGetId) + #define cudaStreamGetPriority __CUDART_API_PTSZ(cudaStreamGetPriority) + #define cudaEventRecord __CUDART_API_PTSZ(cudaEventRecord) + #define cudaEventRecordWithFlags __CUDART_API_PTSZ(cudaEventRecordWithFlags) + #define cudaStreamWaitEvent __CUDART_API_PTSZ(cudaStreamWaitEvent) + #define cudaStreamAddCallback __CUDART_API_PTSZ(cudaStreamAddCallback) + #define cudaStreamAttachMemAsync __CUDART_API_PTSZ(cudaStreamAttachMemAsync) + #define cudaStreamSynchronize __CUDART_API_PTSZ(cudaStreamSynchronize) + #define cudaLaunchKernel __CUDART_API_PTSZ(cudaLaunchKernel) + #define cudaLaunchKernelExC __CUDART_API_PTSZ(cudaLaunchKernelExC) + #define cudaLaunchHostFunc __CUDART_API_PTSZ(cudaLaunchHostFunc) + #define cudaMemPrefetchAsync __CUDART_API_PTSZ(cudaMemPrefetchAsync) + #define cudaLaunchCooperativeKernel __CUDART_API_PTSZ(cudaLaunchCooperativeKernel) + #define cudaStreamCopyAttributes __CUDART_API_PTSZ(cudaStreamCopyAttributes) + #define cudaStreamGetAttribute __CUDART_API_PTSZ(cudaStreamGetAttribute) + #define cudaStreamSetAttribute __CUDART_API_PTSZ(cudaStreamSetAttribute) + #define cudaMallocAsync __CUDART_API_PTSZ(cudaMallocAsync) + #define cudaFreeAsync __CUDART_API_PTSZ(cudaFreeAsync) + #define cudaMallocFromPoolAsync __CUDART_API_PTSZ(cudaMallocFromPoolAsync) + #define cudaGetDriverEntryPoint __CUDART_API_PTSZ(cudaGetDriverEntryPoint) +#endif + +/** \cond impl_private */ +#if !defined(__dv) + +#if defined(__cplusplus) + +#define __dv(v) \ + = v + +#else /* __cplusplus */ + +#define __dv(v) + +#endif /* __cplusplus */ + +#endif /* !__dv */ +/** \endcond impl_private */ + +#if (defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 350)) /** Visible to SM>=3.5 and "__host__ __device__" only **/ + +#define CUDART_DEVICE __device__ + +#else + +#define CUDART_DEVICE + +#endif /** CUDART_DEVICE */ + +#if !defined(__CUDACC_RTC__) +#define EXCLUDE_FROM_RTC + +/** \cond impl_private */ +#if defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED) +#define __CUDA_DEPRECATED +#elif defined(_MSC_VER) +#define __CUDA_DEPRECATED __declspec(deprecated) +#elif defined(__GNUC__) +#define __CUDA_DEPRECATED __attribute__((deprecated)) +#else +#define __CUDA_DEPRECATED +#endif +/** \endcond impl_private */ + +#if defined(__cplusplus) +extern "C" { +#endif /* __cplusplus */ + +/** + * \defgroup CUDART_DEVICE Device Management + * + * ___MANBRIEF___ device management functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the device management functions of the CUDA runtime + * application programming interface. + * + * @{ + */ + +/** + * \brief Destroy all allocations and reset all state on the current device + * in the current process. + * + * Explicitly destroys and cleans up all resources associated with the current + * device in the current process. It is the caller's responsibility to ensure + * that the resources are not accessed or passed in subsequent API calls and + * doing so will result in undefined behavior. These resources include CUDA types + * such as ::cudaStream_t, ::cudaEvent_t, ::cudaArray_t, ::cudaMipmappedArray_t, + * ::cudaTextureObject_t, ::cudaSurfaceObject_t, ::textureReference, ::surfaceReference, + * ::cudaExternalMemory_t, ::cudaExternalSemaphore_t and ::cudaGraphicsResource_t. + * Any subsequent API call to this device will reinitialize the device. + * + * Note that this function will reset the device immediately. It is the caller's + * responsibility to ensure that the device is not being accessed by any + * other host threads from the process when this function is called. + * + * \return + * ::cudaSuccess + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceSynchronize + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceReset(void); + +/** + * \brief Wait for compute device to finish + * + * Blocks until the device has completed all preceding requested tasks. + * ::cudaDeviceSynchronize() returns an error if one of the preceding tasks + * has failed. If the ::cudaDeviceScheduleBlockingSync flag was set for + * this device, the host thread will block until the device has finished + * its work. + * + * \return + * ::cudaSuccess + * \note_device_sync_deprecated + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaDeviceReset, + * ::cuCtxSynchronize + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceSynchronize(void); + +/** + * \brief Set resource limits + * + * Setting \p limit to \p value is a request by the application to update + * the current limit maintained by the device. The driver is free to + * modify the requested value to meet h/w requirements (this could be + * clamping to minimum or maximum values, rounding up to nearest element + * size, etc). The application can use ::cudaDeviceGetLimit() to find out + * exactly what the limit has been set to. + * + * Setting each ::cudaLimit has its own specific restrictions, so each is + * discussed here. + * + * - ::cudaLimitStackSize controls the stack size in bytes of each GPU thread. + * + * - ::cudaLimitPrintfFifoSize controls the size in bytes of the shared FIFO + * used by the ::printf() device system call. Setting + * ::cudaLimitPrintfFifoSize must not be performed after launching any kernel + * that uses the ::printf() device system call - in such case + * ::cudaErrorInvalidValue will be returned. + * + * - ::cudaLimitMallocHeapSize controls the size in bytes of the heap used by + * the ::malloc() and ::free() device system calls. Setting + * ::cudaLimitMallocHeapSize must not be performed after launching any kernel + * that uses the ::malloc() or ::free() device system calls - in such case + * ::cudaErrorInvalidValue will be returned. + * + * - ::cudaLimitDevRuntimeSyncDepth controls the maximum nesting depth of a + * grid at which a thread can safely call ::cudaDeviceSynchronize(). Setting + * this limit must be performed before any launch of a kernel that uses the + * device runtime and calls ::cudaDeviceSynchronize() above the default sync + * depth, two levels of grids. Calls to ::cudaDeviceSynchronize() will fail + * with error code ::cudaErrorSyncDepthExceeded if the limitation is + * violated. This limit can be set smaller than the default or up the maximum + * launch depth of 24. When setting this limit, keep in mind that additional + * levels of sync depth require the runtime to reserve large amounts of + * device memory which can no longer be used for user allocations. If these + * reservations of device memory fail, ::cudaDeviceSetLimit will return + * ::cudaErrorMemoryAllocation, and the limit can be reset to a lower value. + * This limit is only applicable to devices of compute capability < 9.0. + * Attempting to set this limit on devices of other compute capability will + * results in error ::cudaErrorUnsupportedLimit being returned. + * + * - ::cudaLimitDevRuntimePendingLaunchCount controls the maximum number of + * outstanding device runtime launches that can be made from the current + * device. A grid is outstanding from the point of launch up until the grid + * is known to have been completed. Device runtime launches which violate + * this limitation fail and return ::cudaErrorLaunchPendingCountExceeded when + * ::cudaGetLastError() is called after launch. If more pending launches than + * the default (2048 launches) are needed for a module using the device + * runtime, this limit can be increased. Keep in mind that being able to + * sustain additional pending launches will require the runtime to reserve + * larger amounts of device memory upfront which can no longer be used for + * allocations. If these reservations fail, ::cudaDeviceSetLimit will return + * ::cudaErrorMemoryAllocation, and the limit can be reset to a lower value. + * This limit is only applicable to devices of compute capability 3.5 and + * higher. Attempting to set this limit on devices of compute capability less + * than 3.5 will result in the error ::cudaErrorUnsupportedLimit being + * returned. + * + * - ::cudaLimitMaxL2FetchGranularity controls the L2 cache fetch granularity. + * Values can range from 0B to 128B. This is purely a performance hint and + * it can be ignored or clamped depending on the platform. + * + * - ::cudaLimitPersistingL2CacheSize controls size in bytes available + * for persisting L2 cache. This is purely a performance hint and it + * can be ignored or clamped depending on the platform. + * + * \param limit - Limit to set + * \param value - Size of limit + * + * \return + * ::cudaSuccess, + * ::cudaErrorUnsupportedLimit, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaDeviceGetLimit, + * ::cuCtxSetLimit + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceSetLimit(enum cudaLimit limit, size_t value); + +/** + * \brief Return resource limits + * + * Returns in \p *pValue the current size of \p limit. The following ::cudaLimit values are supported. + * - ::cudaLimitStackSize is the stack size in bytes of each GPU thread. + * - ::cudaLimitPrintfFifoSize is the size in bytes of the shared FIFO used by the + * ::printf() device system call. + * - ::cudaLimitMallocHeapSize is the size in bytes of the heap used by the + * ::malloc() and ::free() device system calls. + * - ::cudaLimitDevRuntimeSyncDepth is the maximum grid depth at which a + * thread can isssue the device runtime call ::cudaDeviceSynchronize() + * to wait on child grid launches to complete. This functionality is removed + * for devices of compute capability >= 9.0, and hence will return error + * ::cudaErrorUnsupportedLimit on such devices. + * - ::cudaLimitDevRuntimePendingLaunchCount is the maximum number of outstanding + * device runtime launches. + * - ::cudaLimitMaxL2FetchGranularity is the L2 cache fetch granularity. + * - ::cudaLimitPersistingL2CacheSize is the persisting L2 cache size in bytes. + * + * \param limit - Limit to query + * \param pValue - Returned size of the limit + * + * \return + * ::cudaSuccess, + * ::cudaErrorUnsupportedLimit, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaDeviceSetLimit, + * ::cuCtxGetLimit + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetLimit(size_t *pValue, enum cudaLimit limit); + +/** + * \brief Returns the maximum number of elements allocatable in a 1D linear texture for a given element size. + * + * Returns in \p maxWidthInElements the maximum number of elements allocatable in a 1D linear texture + * for given format descriptor \p fmtDesc. + * + * \param maxWidthInElements - Returns maximum number of texture elements allocatable for given \p fmtDesc. + * \param fmtDesc - Texture format description. + * + * \return + * ::cudaSuccess, + * ::cudaErrorUnsupportedLimit, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cuDeviceGetTexture1DLinearMaxWidth + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetTexture1DLinearMaxWidth(size_t *maxWidthInElements, const struct cudaChannelFormatDesc *fmtDesc, int device); +#endif + +/** + * \brief Returns the preferred cache configuration for the current device. + * + * On devices where the L1 cache and shared memory use the same hardware + * resources, this returns through \p pCacheConfig the preferred cache + * configuration for the current device. This is only a preference. The + * runtime will use the requested configuration if possible, but it is free to + * choose a different configuration if required to execute functions. + * + * This will return a \p pCacheConfig of ::cudaFuncCachePreferNone on devices + * where the size of the L1 cache and shared memory are fixed. + * + * The supported cache configurations are: + * - ::cudaFuncCachePreferNone: no preference for shared memory or L1 (default) + * - ::cudaFuncCachePreferShared: prefer larger shared memory and smaller L1 cache + * - ::cudaFuncCachePreferL1: prefer larger L1 cache and smaller shared memory + * - ::cudaFuncCachePreferEqual: prefer equal size L1 cache and shared memory + * + * \param pCacheConfig - Returned cache configuration + * + * \return + * ::cudaSuccess + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceSetCacheConfig, + * \ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C API)", + * \ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C++ API)", + * ::cuCtxGetCacheConfig + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetCacheConfig(enum cudaFuncCache *pCacheConfig); + +/** + * \brief Returns numerical values that correspond to the least and + * greatest stream priorities. + * + * Returns in \p *leastPriority and \p *greatestPriority the numerical values that correspond + * to the least and greatest stream priorities respectively. Stream priorities + * follow a convention where lower numbers imply greater priorities. The range of + * meaningful stream priorities is given by [\p *greatestPriority, \p *leastPriority]. + * If the user attempts to create a stream with a priority value that is + * outside the the meaningful range as specified by this API, the priority is + * automatically clamped down or up to either \p *leastPriority or \p *greatestPriority + * respectively. See ::cudaStreamCreateWithPriority for details on creating a + * priority stream. + * A NULL may be passed in for \p *leastPriority or \p *greatestPriority if the value + * is not desired. + * + * This function will return '0' in both \p *leastPriority and \p *greatestPriority if + * the current context's device does not support stream priorities + * (see ::cudaDeviceGetAttribute). + * + * \param leastPriority - Pointer to an int in which the numerical value for least + * stream priority is returned + * \param greatestPriority - Pointer to an int in which the numerical value for greatest + * stream priority is returned + * + * \return + * ::cudaSuccess + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreateWithPriority, + * ::cudaStreamGetPriority, + * ::cuCtxGetStreamPriorityRange + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetStreamPriorityRange(int *leastPriority, int *greatestPriority); + +/** + * \brief Sets the preferred cache configuration for the current device. + * + * On devices where the L1 cache and shared memory use the same hardware + * resources, this sets through \p cacheConfig the preferred cache + * configuration for the current device. This is only a preference. The + * runtime will use the requested configuration if possible, but it is free to + * choose a different configuration if required to execute the function. Any + * function preference set via + * \ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C API)" + * or + * \ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C++ API)" + * will be preferred over this device-wide setting. Setting the device-wide + * cache configuration to ::cudaFuncCachePreferNone will cause subsequent + * kernel launches to prefer to not change the cache configuration unless + * required to launch the kernel. + * + * This setting does nothing on devices where the size of the L1 cache and + * shared memory are fixed. + * + * Launching a kernel with a different preference than the most recent + * preference setting may insert a device-side synchronization point. + * + * The supported cache configurations are: + * - ::cudaFuncCachePreferNone: no preference for shared memory or L1 (default) + * - ::cudaFuncCachePreferShared: prefer larger shared memory and smaller L1 cache + * - ::cudaFuncCachePreferL1: prefer larger L1 cache and smaller shared memory + * - ::cudaFuncCachePreferEqual: prefer equal size L1 cache and shared memory + * + * \param cacheConfig - Requested cache configuration + * + * \return + * ::cudaSuccess + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceGetCacheConfig, + * \ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C API)", + * \ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C++ API)", + * ::cuCtxSetCacheConfig + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceSetCacheConfig(enum cudaFuncCache cacheConfig); + +/** + * \brief Returns the shared memory configuration for the current device. + * + * This function will return in \p pConfig the current size of shared memory banks + * on the current device. On devices with configurable shared memory banks, + * ::cudaDeviceSetSharedMemConfig can be used to change this setting, so that all + * subsequent kernel launches will by default use the new bank size. When + * ::cudaDeviceGetSharedMemConfig is called on devices without configurable shared + * memory, it will return the fixed bank size of the hardware. + * + * The returned bank configurations can be either: + * - ::cudaSharedMemBankSizeFourByte - shared memory bank width is four bytes. + * - ::cudaSharedMemBankSizeEightByte - shared memory bank width is eight bytes. + * + * \param pConfig - Returned cache configuration + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceSetCacheConfig, + * ::cudaDeviceGetCacheConfig, + * ::cudaDeviceSetSharedMemConfig, + * ::cudaFuncSetCacheConfig, + * ::cuCtxGetSharedMemConfig + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetSharedMemConfig(enum cudaSharedMemConfig *pConfig); + +/** + * \brief Sets the shared memory configuration for the current device. + * + * On devices with configurable shared memory banks, this function will set + * the shared memory bank size which is used for all subsequent kernel launches. + * Any per-function setting of shared memory set via ::cudaFuncSetSharedMemConfig + * will override the device wide setting. + * + * Changing the shared memory configuration between launches may introduce + * a device side synchronization point. + * + * Changing the shared memory bank size will not increase shared memory usage + * or affect occupancy of kernels, but may have major effects on performance. + * Larger bank sizes will allow for greater potential bandwidth to shared memory, + * but will change what kinds of accesses to shared memory will result in bank + * conflicts. + * + * This function will do nothing on devices with fixed shared memory bank size. + * + * The supported bank configurations are: + * - ::cudaSharedMemBankSizeDefault: set bank width the device default (currently, + * four bytes) + * - ::cudaSharedMemBankSizeFourByte: set shared memory bank width to be four bytes + * natively. + * - ::cudaSharedMemBankSizeEightByte: set shared memory bank width to be eight + * bytes natively. + * + * \param config - Requested cache configuration + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceSetCacheConfig, + * ::cudaDeviceGetCacheConfig, + * ::cudaDeviceGetSharedMemConfig, + * ::cudaFuncSetCacheConfig, + * ::cuCtxSetSharedMemConfig + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceSetSharedMemConfig(enum cudaSharedMemConfig config); + +/** + * \brief Returns a handle to a compute device + * + * Returns in \p *device a device ordinal given a PCI bus ID string. + * + * \param device - Returned device ordinal + * + * \param pciBusId - String in one of the following forms: + * [domain]:[bus]:[device].[function] + * [domain]:[bus]:[device] + * [bus]:[device].[function] + * where \p domain, \p bus, \p device, and \p function are all hexadecimal values + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidDevice + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaDeviceGetPCIBusId, + * ::cuDeviceGetByPCIBusId + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceGetByPCIBusId(int *device, const char *pciBusId); + +/** + * \brief Returns a PCI Bus Id string for the device + * + * Returns an ASCII string identifying the device \p dev in the NULL-terminated + * string pointed to by \p pciBusId. \p len specifies the maximum length of the + * string that may be returned. + * + * \param pciBusId - Returned identifier string for the device in the following format + * [domain]:[bus]:[device].[function] + * where \p domain, \p bus, \p device, and \p function are all hexadecimal values. + * pciBusId should be large enough to store 13 characters including the NULL-terminator. + * + * \param len - Maximum length of string to store in \p name + * + * \param device - Device to get identifier string for + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidDevice + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaDeviceGetByPCIBusId, + * ::cuDeviceGetPCIBusId + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceGetPCIBusId(char *pciBusId, int len, int device); + +/** + * \brief Gets an interprocess handle for a previously allocated event + * + * Takes as input a previously allocated event. This event must have been + * created with the ::cudaEventInterprocess and ::cudaEventDisableTiming + * flags set. This opaque handle may be copied into other processes and + * opened with ::cudaIpcOpenEventHandle to allow efficient hardware + * synchronization between GPU work in different processes. + * + * After the event has been been opened in the importing process, + * ::cudaEventRecord, ::cudaEventSynchronize, ::cudaStreamWaitEvent and + * ::cudaEventQuery may be used in either process. Performing operations + * on the imported event after the exported event has been freed + * with ::cudaEventDestroy will result in undefined behavior. + * + * IPC functionality is restricted to devices with support for unified + * addressing on Linux and Windows operating systems. + * IPC functionality on Windows is restricted to GPUs in TCC mode. + * Users can test their device for IPC functionality by calling + * ::cudaDeviceGetAttribute with ::cudaDevAttrIpcEventSupport + * + * \param handle - Pointer to a user allocated cudaIpcEventHandle + * in which to return the opaque event handle + * \param event - Event allocated with ::cudaEventInterprocess and + * ::cudaEventDisableTiming flags. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorMemoryAllocation, + * ::cudaErrorMapBufferObjectFailed, + * ::cudaErrorNotSupported, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaEventCreate, + * ::cudaEventDestroy, + * ::cudaEventSynchronize, + * ::cudaEventQuery, + * ::cudaStreamWaitEvent, + * ::cudaIpcOpenEventHandle, + * ::cudaIpcGetMemHandle, + * ::cudaIpcOpenMemHandle, + * ::cudaIpcCloseMemHandle, + * ::cuIpcGetEventHandle + */ +extern __host__ cudaError_t CUDARTAPI cudaIpcGetEventHandle(cudaIpcEventHandle_t *handle, cudaEvent_t event); + +/** + * \brief Opens an interprocess event handle for use in the current process + * + * Opens an interprocess event handle exported from another process with + * ::cudaIpcGetEventHandle. This function returns a ::cudaEvent_t that behaves like + * a locally created event with the ::cudaEventDisableTiming flag specified. + * This event must be freed with ::cudaEventDestroy. + * + * Performing operations on the imported event after the exported event has + * been freed with ::cudaEventDestroy will result in undefined behavior. + * + * IPC functionality is restricted to devices with support for unified + * addressing on Linux and Windows operating systems. + * IPC functionality on Windows is restricted to GPUs in TCC mode. + * Users can test their device for IPC functionality by calling + * ::cudaDeviceGetAttribute with ::cudaDevAttrIpcEventSupport + * + * \param event - Returns the imported event + * \param handle - Interprocess handle to open + * + * \returns + * ::cudaSuccess, + * ::cudaErrorMapBufferObjectFailed, + * ::cudaErrorNotSupported, + * ::cudaErrorInvalidValue, + * ::cudaErrorDeviceUninitialized + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaEventCreate, + * ::cudaEventDestroy, + * ::cudaEventSynchronize, + * ::cudaEventQuery, + * ::cudaStreamWaitEvent, + * ::cudaIpcGetEventHandle, + * ::cudaIpcGetMemHandle, + * ::cudaIpcOpenMemHandle, + * ::cudaIpcCloseMemHandle, + * ::cuIpcOpenEventHandle + */ +extern __host__ cudaError_t CUDARTAPI cudaIpcOpenEventHandle(cudaEvent_t *event, cudaIpcEventHandle_t handle); + +/** + * \brief Gets an interprocess memory handle for an existing device memory + * allocation + * + * Takes a pointer to the base of an existing device memory allocation created + * with ::cudaMalloc and exports it for use in another process. This is a + * lightweight operation and may be called multiple times on an allocation + * without adverse effects. + * + * If a region of memory is freed with ::cudaFree and a subsequent call + * to ::cudaMalloc returns memory with the same device address, + * ::cudaIpcGetMemHandle will return a unique handle for the + * new memory. + * + * IPC functionality is restricted to devices with support for unified + * addressing on Linux and Windows operating systems. + * IPC functionality on Windows is restricted to GPUs in TCC mode. + * Users can test their device for IPC functionality by calling + * ::cudaDeviceGetAttribute with ::cudaDevAttrIpcEventSupport + * + * \param handle - Pointer to user allocated ::cudaIpcMemHandle to return + * the handle in. + * \param devPtr - Base pointer to previously allocated device memory + * + * \returns + * ::cudaSuccess, + * ::cudaErrorMemoryAllocation, + * ::cudaErrorMapBufferObjectFailed, + * ::cudaErrorNotSupported, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMalloc, + * ::cudaFree, + * ::cudaIpcGetEventHandle, + * ::cudaIpcOpenEventHandle, + * ::cudaIpcOpenMemHandle, + * ::cudaIpcCloseMemHandle, + * ::cuIpcGetMemHandle + */ +extern __host__ cudaError_t CUDARTAPI cudaIpcGetMemHandle(cudaIpcMemHandle_t *handle, void *devPtr); + +/** + * \brief Opens an interprocess memory handle exported from another process + * and returns a device pointer usable in the local process. + * + * Maps memory exported from another process with ::cudaIpcGetMemHandle into + * the current device address space. For contexts on different devices + * ::cudaIpcOpenMemHandle can attempt to enable peer access between the + * devices as if the user called ::cudaDeviceEnablePeerAccess. This behavior is + * controlled by the ::cudaIpcMemLazyEnablePeerAccess flag. + * ::cudaDeviceCanAccessPeer can determine if a mapping is possible. + * + * ::cudaIpcOpenMemHandle can open handles to devices that may not be visible + * in the process calling the API. + * + * Contexts that may open ::cudaIpcMemHandles are restricted in the following way. + * ::cudaIpcMemHandles from each device in a given process may only be opened + * by one context per device per other process. + * + * If the memory handle has already been opened by the current context, the + * reference count on the handle is incremented by 1 and the existing device pointer + * is returned. + * + * Memory returned from ::cudaIpcOpenMemHandle must be freed with + * ::cudaIpcCloseMemHandle. + * + * Calling ::cudaFree on an exported memory region before calling + * ::cudaIpcCloseMemHandle in the importing context will result in undefined + * behavior. + * + * IPC functionality is restricted to devices with support for unified + * addressing on Linux and Windows operating systems. + * IPC functionality on Windows is restricted to GPUs in TCC mode. + * Users can test their device for IPC functionality by calling + * ::cudaDeviceGetAttribute with ::cudaDevAttrIpcEventSupport + * + * \param devPtr - Returned device pointer + * \param handle - ::cudaIpcMemHandle to open + * \param flags - Flags for this operation. Must be specified as ::cudaIpcMemLazyEnablePeerAccess + * + * \returns + * ::cudaSuccess, + * ::cudaErrorMapBufferObjectFailed, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorDeviceUninitialized, + * ::cudaErrorTooManyPeers, + * ::cudaErrorNotSupported, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * + * \note No guarantees are made about the address returned in \p *devPtr. + * In particular, multiple processes may not receive the same address for the same \p handle. + * + * \sa + * ::cudaMalloc, + * ::cudaFree, + * ::cudaIpcGetEventHandle, + * ::cudaIpcOpenEventHandle, + * ::cudaIpcGetMemHandle, + * ::cudaIpcCloseMemHandle, + * ::cudaDeviceEnablePeerAccess, + * ::cudaDeviceCanAccessPeer, + * ::cuIpcOpenMemHandle + */ +extern __host__ cudaError_t CUDARTAPI cudaIpcOpenMemHandle(void **devPtr, cudaIpcMemHandle_t handle, unsigned int flags); + +/** + * \brief Attempts to close memory mapped with cudaIpcOpenMemHandle + * + * Decrements the reference count of the memory returnd by ::cudaIpcOpenMemHandle by 1. + * When the reference count reaches 0, this API unmaps the memory. The original allocation + * in the exporting process as well as imported mappings in other processes + * will be unaffected. + * + * Any resources used to enable peer access will be freed if this is the + * last mapping using them. + * + * IPC functionality is restricted to devices with support for unified + * addressing on Linux and Windows operating systems. + * IPC functionality on Windows is restricted to GPUs in TCC mode. + * Users can test their device for IPC functionality by calling + * ::cudaDeviceGetAttribute with ::cudaDevAttrIpcEventSupport + * + * \param devPtr - Device pointer returned by ::cudaIpcOpenMemHandle + * + * \returns + * ::cudaSuccess, + * ::cudaErrorMapBufferObjectFailed, + * ::cudaErrorNotSupported, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMalloc, + * ::cudaFree, + * ::cudaIpcGetEventHandle, + * ::cudaIpcOpenEventHandle, + * ::cudaIpcGetMemHandle, + * ::cudaIpcOpenMemHandle, + * ::cuIpcCloseMemHandle + */ +extern __host__ cudaError_t CUDARTAPI cudaIpcCloseMemHandle(void *devPtr); + +/** + * \brief Blocks until remote writes are visible to the specified scope + * + * Blocks until remote writes to the target context via mappings created + * through GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see + * https://docs.nvidia.com/cuda/gpudirect-rdma for more information), are + * visible to the specified scope. + * + * If the scope equals or lies within the scope indicated by + * ::cudaDevAttrGPUDirectRDMAWritesOrdering, the call will be a no-op and + * can be safely omitted for performance. This can be determined by + * comparing the numerical values between the two enums, with smaller + * scopes having smaller values. + * + * Users may query support for this API via ::cudaDevAttrGPUDirectRDMAFlushWritesOptions. + * + * \param target - The target of the operation, see cudaFlushGPUDirectRDMAWritesTarget + * \param scope - The scope of the operation, see cudaFlushGPUDirectRDMAWritesScope + * + * \return + * ::cudaSuccess, + * ::cudaErrorNotSupported, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cuFlushGPUDirectRDMAWrites + */ +#if __CUDART_API_VERSION >= 11030 +extern __host__ cudaError_t CUDARTAPI cudaDeviceFlushGPUDirectRDMAWrites(enum cudaFlushGPUDirectRDMAWritesTarget target, enum cudaFlushGPUDirectRDMAWritesScope scope); +#endif + +/** @} */ /* END CUDART_DEVICE */ + +/** + * \defgroup CUDART_THREAD_DEPRECATED Thread Management [DEPRECATED] + * + * ___MANBRIEF___ deprecated thread management functions of the CUDA runtime + * API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes deprecated thread management functions of the CUDA runtime + * application programming interface. + * + * @{ + */ + +/** + * \brief Exit and clean up from CUDA launches + * + * \deprecated + * + * Note that this function is deprecated because its name does not + * reflect its behavior. Its functionality is identical to the + * non-deprecated function ::cudaDeviceReset(), which should be used + * instead. + * + * Explicitly destroys all cleans up all resources associated with the current + * device in the current process. Any subsequent API call to this device will + * reinitialize the device. + * + * Note that this function will reset the device immediately. It is the caller's + * responsibility to ensure that the device is not being accessed by any + * other host threads from the process when this function is called. + * + * \return + * ::cudaSuccess + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceReset + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaThreadExit(void); + +/** + * \brief Wait for compute device to finish + * + * \deprecated + * + * Note that this function is deprecated because its name does not + * reflect its behavior. Its functionality is similar to the + * non-deprecated function ::cudaDeviceSynchronize(), which should be used + * instead. + * + * Blocks until the device has completed all preceding requested tasks. + * ::cudaThreadSynchronize() returns an error if one of the preceding tasks + * has failed. If the ::cudaDeviceScheduleBlockingSync flag was set for + * this device, the host thread will block until the device has finished + * its work. + * + * \return + * ::cudaSuccess + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceSynchronize + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaThreadSynchronize(void); + +/** + * \brief Set resource limits + * + * \deprecated + * + * Note that this function is deprecated because its name does not + * reflect its behavior. Its functionality is identical to the + * non-deprecated function ::cudaDeviceSetLimit(), which should be used + * instead. + * + * Setting \p limit to \p value is a request by the application to update + * the current limit maintained by the device. The driver is free to + * modify the requested value to meet h/w requirements (this could be + * clamping to minimum or maximum values, rounding up to nearest element + * size, etc). The application can use ::cudaThreadGetLimit() to find out + * exactly what the limit has been set to. + * + * Setting each ::cudaLimit has its own specific restrictions, so each is + * discussed here. + * + * - ::cudaLimitStackSize controls the stack size of each GPU thread. + * + * - ::cudaLimitPrintfFifoSize controls the size of the shared FIFO + * used by the ::printf() device system call. + * Setting ::cudaLimitPrintfFifoSize must be performed before + * launching any kernel that uses the ::printf() device + * system call, otherwise ::cudaErrorInvalidValue will be returned. + * + * - ::cudaLimitMallocHeapSize controls the size of the heap used + * by the ::malloc() and ::free() device system calls. Setting + * ::cudaLimitMallocHeapSize must be performed before launching + * any kernel that uses the ::malloc() or ::free() device system calls, + * otherwise ::cudaErrorInvalidValue will be returned. + * + * \param limit - Limit to set + * \param value - Size in bytes of limit + * + * \return + * ::cudaSuccess, + * ::cudaErrorUnsupportedLimit, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceSetLimit + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaThreadSetLimit(enum cudaLimit limit, size_t value); + +/** + * \brief Returns resource limits + * + * \deprecated + * + * Note that this function is deprecated because its name does not + * reflect its behavior. Its functionality is identical to the + * non-deprecated function ::cudaDeviceGetLimit(), which should be used + * instead. + * + * Returns in \p *pValue the current size of \p limit. The supported + * ::cudaLimit values are: + * - ::cudaLimitStackSize: stack size of each GPU thread; + * - ::cudaLimitPrintfFifoSize: size of the shared FIFO used by the + * ::printf() device system call. + * - ::cudaLimitMallocHeapSize: size of the heap used by the + * ::malloc() and ::free() device system calls; + * + * \param limit - Limit to query + * \param pValue - Returned size in bytes of limit + * + * \return + * ::cudaSuccess, + * ::cudaErrorUnsupportedLimit, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceGetLimit + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaThreadGetLimit(size_t *pValue, enum cudaLimit limit); + +/** + * \brief Returns the preferred cache configuration for the current device. + * + * \deprecated + * + * Note that this function is deprecated because its name does not + * reflect its behavior. Its functionality is identical to the + * non-deprecated function ::cudaDeviceGetCacheConfig(), which should be + * used instead. + * + * On devices where the L1 cache and shared memory use the same hardware + * resources, this returns through \p pCacheConfig the preferred cache + * configuration for the current device. This is only a preference. The + * runtime will use the requested configuration if possible, but it is free to + * choose a different configuration if required to execute functions. + * + * This will return a \p pCacheConfig of ::cudaFuncCachePreferNone on devices + * where the size of the L1 cache and shared memory are fixed. + * + * The supported cache configurations are: + * - ::cudaFuncCachePreferNone: no preference for shared memory or L1 (default) + * - ::cudaFuncCachePreferShared: prefer larger shared memory and smaller L1 cache + * - ::cudaFuncCachePreferL1: prefer larger L1 cache and smaller shared memory + * + * \param pCacheConfig - Returned cache configuration + * + * \return + * ::cudaSuccess + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceGetCacheConfig + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaThreadGetCacheConfig(enum cudaFuncCache *pCacheConfig); + +/** + * \brief Sets the preferred cache configuration for the current device. + * + * \deprecated + * + * Note that this function is deprecated because its name does not + * reflect its behavior. Its functionality is identical to the + * non-deprecated function ::cudaDeviceSetCacheConfig(), which should be + * used instead. + * + * On devices where the L1 cache and shared memory use the same hardware + * resources, this sets through \p cacheConfig the preferred cache + * configuration for the current device. This is only a preference. The + * runtime will use the requested configuration if possible, but it is free to + * choose a different configuration if required to execute the function. Any + * function preference set via + * \ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C API)" + * or + * \ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C++ API)" + * will be preferred over this device-wide setting. Setting the device-wide + * cache configuration to ::cudaFuncCachePreferNone will cause subsequent + * kernel launches to prefer to not change the cache configuration unless + * required to launch the kernel. + * + * This setting does nothing on devices where the size of the L1 cache and + * shared memory are fixed. + * + * Launching a kernel with a different preference than the most recent + * preference setting may insert a device-side synchronization point. + * + * The supported cache configurations are: + * - ::cudaFuncCachePreferNone: no preference for shared memory or L1 (default) + * - ::cudaFuncCachePreferShared: prefer larger shared memory and smaller L1 cache + * - ::cudaFuncCachePreferL1: prefer larger L1 cache and smaller shared memory + * + * \param cacheConfig - Requested cache configuration + * + * \return + * ::cudaSuccess + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceSetCacheConfig + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaThreadSetCacheConfig(enum cudaFuncCache cacheConfig); + +/** @} */ /* END CUDART_THREAD_DEPRECATED */ + +/** + * \defgroup CUDART_ERROR Error Handling + * + * ___MANBRIEF___ error handling functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the error handling functions of the CUDA runtime + * application programming interface. + * + * @{ + */ + +/** + * \brief Returns the last error from a runtime call + * + * Returns the last error that has been produced by any of the runtime calls + * in the same instance of the CUDA Runtime library in the host thread and + * resets it to ::cudaSuccess. + * + * Note: Multiple instances of the CUDA Runtime library can be present in an + * application when using a library that statically links the CUDA Runtime. + * + * \return + * ::cudaSuccess, + * ::cudaErrorMissingConfiguration, + * ::cudaErrorMemoryAllocation, + * ::cudaErrorInitializationError, + * ::cudaErrorLaunchFailure, + * ::cudaErrorLaunchTimeout, + * ::cudaErrorLaunchOutOfResources, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidConfiguration, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidPitchValue, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorUnmapBufferObjectFailed, + * ::cudaErrorInvalidDevicePointer, + * ::cudaErrorInvalidTexture, + * ::cudaErrorInvalidTextureBinding, + * ::cudaErrorInvalidChannelDescriptor, + * ::cudaErrorInvalidMemcpyDirection, + * ::cudaErrorInvalidFilterSetting, + * ::cudaErrorInvalidNormSetting, + * ::cudaErrorUnknown, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorInsufficientDriver, + * ::cudaErrorNoDevice, + * ::cudaErrorSetOnActiveProcess, + * ::cudaErrorStartupFailure, + * ::cudaErrorInvalidPtx, + * ::cudaErrorUnsupportedPtxVersion, + * ::cudaErrorNoKernelImageForDevice, + * ::cudaErrorJitCompilerNotFound, + * ::cudaErrorJitCompilationDisabled + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaPeekAtLastError, ::cudaGetErrorName, ::cudaGetErrorString, ::cudaError + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetLastError(void); + +/** + * \brief Returns the last error from a runtime call + * + * Returns the last error that has been produced by any of the runtime calls + * in the same instance of the CUDA Runtime library in the host thread. This + * call does not reset the error to ::cudaSuccess like ::cudaGetLastError(). + * + * Note: Multiple instances of the CUDA Runtime library can be present in an + * application when using a library that statically links the CUDA Runtime. + * + * \return + * ::cudaSuccess, + * ::cudaErrorMissingConfiguration, + * ::cudaErrorMemoryAllocation, + * ::cudaErrorInitializationError, + * ::cudaErrorLaunchFailure, + * ::cudaErrorLaunchTimeout, + * ::cudaErrorLaunchOutOfResources, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidConfiguration, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidPitchValue, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorUnmapBufferObjectFailed, + * ::cudaErrorInvalidDevicePointer, + * ::cudaErrorInvalidTexture, + * ::cudaErrorInvalidTextureBinding, + * ::cudaErrorInvalidChannelDescriptor, + * ::cudaErrorInvalidMemcpyDirection, + * ::cudaErrorInvalidFilterSetting, + * ::cudaErrorInvalidNormSetting, + * ::cudaErrorUnknown, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorInsufficientDriver, + * ::cudaErrorNoDevice, + * ::cudaErrorSetOnActiveProcess, + * ::cudaErrorStartupFailure, + * ::cudaErrorInvalidPtx, + * ::cudaErrorUnsupportedPtxVersion, + * ::cudaErrorNoKernelImageForDevice, + * ::cudaErrorJitCompilerNotFound, + * ::cudaErrorJitCompilationDisabled + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaGetLastError, ::cudaGetErrorName, ::cudaGetErrorString, ::cudaError + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaPeekAtLastError(void); + +/** + * \brief Returns the string representation of an error code enum name + * + * Returns a string containing the name of an error code in the enum. If the error + * code is not recognized, "unrecognized error code" is returned. + * + * \param error - Error code to convert to string + * + * \return + * \p char* pointer to a NULL-terminated string + * + * \sa ::cudaGetErrorString, ::cudaGetLastError, ::cudaPeekAtLastError, ::cudaError, + * ::cuGetErrorName + */ +extern __host__ __cudart_builtin__ const char* CUDARTAPI cudaGetErrorName(cudaError_t error); + +/** + * \brief Returns the description string for an error code + * + * Returns the description string for an error code. If the error + * code is not recognized, "unrecognized error code" is returned. + * + * \param error - Error code to convert to string + * + * \return + * \p char* pointer to a NULL-terminated string + * + * \sa ::cudaGetErrorName, ::cudaGetLastError, ::cudaPeekAtLastError, ::cudaError, + * ::cuGetErrorString + */ +extern __host__ __cudart_builtin__ const char* CUDARTAPI cudaGetErrorString(cudaError_t error); +/** @} */ /* END CUDART_ERROR */ + +/** + * \addtogroup CUDART_DEVICE + * + * @{ + */ + +/** + * \brief Returns the number of compute-capable devices + * + * Returns in \p *count the number of devices with compute capability greater + * or equal to 2.0 that are available for execution. + * + * \param count - Returns the number of devices with compute capability + * greater or equal to 2.0 + * + * \return + * ::cudaSuccess + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaGetDevice, ::cudaSetDevice, ::cudaGetDeviceProperties, + * ::cudaChooseDevice, + * ::cudaInitDevice, + * ::cuDeviceGetCount + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetDeviceCount(int *count); + +/** + * \brief Returns information about the compute-device + * + * Returns in \p *prop the properties of device \p dev. The ::cudaDeviceProp + * structure is defined as: + * \code + struct cudaDeviceProp { + char name[256]; + cudaUUID_t uuid; + size_t totalGlobalMem; + size_t sharedMemPerBlock; + int regsPerBlock; + int warpSize; + size_t memPitch; + int maxThreadsPerBlock; + int maxThreadsDim[3]; + int maxGridSize[3]; + int clockRate; + size_t totalConstMem; + int major; + int minor; + size_t textureAlignment; + size_t texturePitchAlignment; + int deviceOverlap; + int multiProcessorCount; + int kernelExecTimeoutEnabled; + int integrated; + int canMapHostMemory; + int computeMode; + int maxTexture1D; + int maxTexture1DMipmap; + int maxTexture1DLinear; + int maxTexture2D[2]; + int maxTexture2DMipmap[2]; + int maxTexture2DLinear[3]; + int maxTexture2DGather[2]; + int maxTexture3D[3]; + int maxTexture3DAlt[3]; + int maxTextureCubemap; + int maxTexture1DLayered[2]; + int maxTexture2DLayered[3]; + int maxTextureCubemapLayered[2]; + int maxSurface1D; + int maxSurface2D[2]; + int maxSurface3D[3]; + int maxSurface1DLayered[2]; + int maxSurface2DLayered[3]; + int maxSurfaceCubemap; + int maxSurfaceCubemapLayered[2]; + size_t surfaceAlignment; + int concurrentKernels; + int ECCEnabled; + int pciBusID; + int pciDeviceID; + int pciDomainID; + int tccDriver; + int asyncEngineCount; + int unifiedAddressing; + int memoryClockRate; + int memoryBusWidth; + int l2CacheSize; + int persistingL2CacheMaxSize; + int maxThreadsPerMultiProcessor; + int streamPrioritiesSupported; + int globalL1CacheSupported; + int localL1CacheSupported; + size_t sharedMemPerMultiprocessor; + int regsPerMultiprocessor; + int managedMemory; + int isMultiGpuBoard; + int multiGpuBoardGroupID; + int singleToDoublePrecisionPerfRatio; + int pageableMemoryAccess; + int concurrentManagedAccess; + int computePreemptionSupported; + int canUseHostPointerForRegisteredMem; + int cooperativeLaunch; + int cooperativeMultiDeviceLaunch; + int pageableMemoryAccessUsesHostPageTables; + int directManagedMemAccessFromHost; + int accessPolicyMaxWindowSize; + } + \endcode + * where: + * - \ref ::cudaDeviceProp::name "name[256]" is an ASCII string identifying + * the device. + * - \ref ::cudaDeviceProp::uuid "uuid" is a 16-byte unique identifier. + * - \ref ::cudaDeviceProp::totalGlobalMem "totalGlobalMem" is the total + * amount of global memory available on the device in bytes. + * - \ref ::cudaDeviceProp::sharedMemPerBlock "sharedMemPerBlock" is the + * maximum amount of shared memory available to a thread block in bytes. + * - \ref ::cudaDeviceProp::regsPerBlock "regsPerBlock" is the maximum number + * of 32-bit registers available to a thread block. + * - \ref ::cudaDeviceProp::warpSize "warpSize" is the warp size in threads. + * - \ref ::cudaDeviceProp::memPitch "memPitch" is the maximum pitch in + * bytes allowed by the memory copy functions that involve memory regions + * allocated through ::cudaMallocPitch(). + * - \ref ::cudaDeviceProp::maxThreadsPerBlock "maxThreadsPerBlock" is the + * maximum number of threads per block. + * - \ref ::cudaDeviceProp::maxThreadsDim "maxThreadsDim[3]" contains the + * maximum size of each dimension of a block. + * - \ref ::cudaDeviceProp::maxGridSize "maxGridSize[3]" contains the + * maximum size of each dimension of a grid. + * - \ref ::cudaDeviceProp::clockRate "clockRate" is the clock frequency in + * kilohertz. + * - \ref ::cudaDeviceProp::totalConstMem "totalConstMem" is the total amount + * of constant memory available on the device in bytes. + * - \ref ::cudaDeviceProp::major "major", + * \ref ::cudaDeviceProp::minor "minor" are the major and minor revision + * numbers defining the device's compute capability. + * - \ref ::cudaDeviceProp::textureAlignment "textureAlignment" is the + * alignment requirement; texture base addresses that are aligned to + * \ref ::cudaDeviceProp::textureAlignment "textureAlignment" bytes do not + * need an offset applied to texture fetches. + * - \ref ::cudaDeviceProp::texturePitchAlignment "texturePitchAlignment" is the + * pitch alignment requirement for 2D texture references that are bound to + * pitched memory. + * - \ref ::cudaDeviceProp::deviceOverlap "deviceOverlap" is 1 if the device + * can concurrently copy memory between host and device while executing a + * kernel, or 0 if not. Deprecated, use instead asyncEngineCount. + * - \ref ::cudaDeviceProp::multiProcessorCount "multiProcessorCount" is the + * number of multiprocessors on the device. + * - \ref ::cudaDeviceProp::kernelExecTimeoutEnabled "kernelExecTimeoutEnabled" + * is 1 if there is a run time limit for kernels executed on the device, or + * 0 if not. + * - \ref ::cudaDeviceProp::integrated "integrated" is 1 if the device is an + * integrated (motherboard) GPU and 0 if it is a discrete (card) component. + * - \ref ::cudaDeviceProp::canMapHostMemory "canMapHostMemory" is 1 if the + * device can map host memory into the CUDA address space for use with + * ::cudaHostAlloc()/::cudaHostGetDevicePointer(), or 0 if not. + * - \ref ::cudaDeviceProp::computeMode "computeMode" is the compute mode + * that the device is currently in. Available modes are as follows: + * - cudaComputeModeDefault: Default mode - Device is not restricted and + * multiple threads can use ::cudaSetDevice() with this device. + * - cudaComputeModeProhibited: Compute-prohibited mode - No threads can use + * ::cudaSetDevice() with this device. + * - cudaComputeModeExclusiveProcess: Compute-exclusive-process mode - Many + * threads in one process will be able to use ::cudaSetDevice() with this device. + *
When an occupied exclusive mode device is chosen with ::cudaSetDevice, + * all subsequent non-device management runtime functions will return + * ::cudaErrorDevicesUnavailable. + * - \ref ::cudaDeviceProp::maxTexture1D "maxTexture1D" is the maximum 1D + * texture size. + * - \ref ::cudaDeviceProp::maxTexture1DMipmap "maxTexture1DMipmap" is the maximum + * 1D mipmapped texture texture size. + * - \ref ::cudaDeviceProp::maxTexture1DLinear "maxTexture1DLinear" is the maximum + * 1D texture size for textures bound to linear memory. + * - \ref ::cudaDeviceProp::maxTexture2D "maxTexture2D[2]" contains the maximum + * 2D texture dimensions. + * - \ref ::cudaDeviceProp::maxTexture2DMipmap "maxTexture2DMipmap[2]" contains the + * maximum 2D mipmapped texture dimensions. + * - \ref ::cudaDeviceProp::maxTexture2DLinear "maxTexture2DLinear[3]" contains the + * maximum 2D texture dimensions for 2D textures bound to pitch linear memory. + * - \ref ::cudaDeviceProp::maxTexture2DGather "maxTexture2DGather[2]" contains the + * maximum 2D texture dimensions if texture gather operations have to be performed. + * - \ref ::cudaDeviceProp::maxTexture3D "maxTexture3D[3]" contains the maximum + * 3D texture dimensions. + * - \ref ::cudaDeviceProp::maxTexture3DAlt "maxTexture3DAlt[3]" + * contains the maximum alternate 3D texture dimensions. + * - \ref ::cudaDeviceProp::maxTextureCubemap "maxTextureCubemap" is the + * maximum cubemap texture width or height. + * - \ref ::cudaDeviceProp::maxTexture1DLayered "maxTexture1DLayered[2]" contains + * the maximum 1D layered texture dimensions. + * - \ref ::cudaDeviceProp::maxTexture2DLayered "maxTexture2DLayered[3]" contains + * the maximum 2D layered texture dimensions. + * - \ref ::cudaDeviceProp::maxTextureCubemapLayered "maxTextureCubemapLayered[2]" + * contains the maximum cubemap layered texture dimensions. + * - \ref ::cudaDeviceProp::maxSurface1D "maxSurface1D" is the maximum 1D + * surface size. + * - \ref ::cudaDeviceProp::maxSurface2D "maxSurface2D[2]" contains the maximum + * 2D surface dimensions. + * - \ref ::cudaDeviceProp::maxSurface3D "maxSurface3D[3]" contains the maximum + * 3D surface dimensions. + * - \ref ::cudaDeviceProp::maxSurface1DLayered "maxSurface1DLayered[2]" contains + * the maximum 1D layered surface dimensions. + * - \ref ::cudaDeviceProp::maxSurface2DLayered "maxSurface2DLayered[3]" contains + * the maximum 2D layered surface dimensions. + * - \ref ::cudaDeviceProp::maxSurfaceCubemap "maxSurfaceCubemap" is the maximum + * cubemap surface width or height. + * - \ref ::cudaDeviceProp::maxSurfaceCubemapLayered "maxSurfaceCubemapLayered[2]" + * contains the maximum cubemap layered surface dimensions. + * - \ref ::cudaDeviceProp::surfaceAlignment "surfaceAlignment" specifies the + * alignment requirements for surfaces. + * - \ref ::cudaDeviceProp::concurrentKernels "concurrentKernels" is 1 if the + * device supports executing multiple kernels within the same context + * simultaneously, or 0 if not. It is not guaranteed that multiple kernels + * will be resident on the device concurrently so this feature should not be + * relied upon for correctness. + * - \ref ::cudaDeviceProp::ECCEnabled "ECCEnabled" is 1 if the device has ECC + * support turned on, or 0 if not. + * - \ref ::cudaDeviceProp::pciBusID "pciBusID" is the PCI bus identifier of + * the device. + * - \ref ::cudaDeviceProp::pciDeviceID "pciDeviceID" is the PCI device + * (sometimes called slot) identifier of the device. + * - \ref ::cudaDeviceProp::pciDomainID "pciDomainID" is the PCI domain identifier + * of the device. + * - \ref ::cudaDeviceProp::tccDriver "tccDriver" is 1 if the device is using a + * TCC driver or 0 if not. + * - \ref ::cudaDeviceProp::asyncEngineCount "asyncEngineCount" is 1 when the + * device can concurrently copy memory between host and device while executing + * a kernel. It is 2 when the device can concurrently copy memory between host + * and device in both directions and execute a kernel at the same time. It is + * 0 if neither of these is supported. + * - \ref ::cudaDeviceProp::unifiedAddressing "unifiedAddressing" is 1 if the device + * shares a unified address space with the host and 0 otherwise. + * - \ref ::cudaDeviceProp::memoryClockRate "memoryClockRate" is the peak memory + * clock frequency in kilohertz. + * - \ref ::cudaDeviceProp::memoryBusWidth "memoryBusWidth" is the memory bus width + * in bits. + * - \ref ::cudaDeviceProp::l2CacheSize "l2CacheSize" is L2 cache size in bytes. + * - \ref ::cudaDeviceProp::persistingL2CacheMaxSize "persistingL2CacheMaxSize" is L2 cache's maximum persisting lines size in bytes. + * - \ref ::cudaDeviceProp::maxThreadsPerMultiProcessor "maxThreadsPerMultiProcessor" + * is the number of maximum resident threads per multiprocessor. + * - \ref ::cudaDeviceProp::streamPrioritiesSupported "streamPrioritiesSupported" + * is 1 if the device supports stream priorities, or 0 if it is not supported. + * - \ref ::cudaDeviceProp::globalL1CacheSupported "globalL1CacheSupported" + * is 1 if the device supports caching of globals in L1 cache, or 0 if it is not supported. + * - \ref ::cudaDeviceProp::localL1CacheSupported "localL1CacheSupported" + * is 1 if the device supports caching of locals in L1 cache, or 0 if it is not supported. + * - \ref ::cudaDeviceProp::sharedMemPerMultiprocessor "sharedMemPerMultiprocessor" is the + * maximum amount of shared memory available to a multiprocessor in bytes; this amount is + * shared by all thread blocks simultaneously resident on a multiprocessor. + * - \ref ::cudaDeviceProp::regsPerMultiprocessor "regsPerMultiprocessor" is the maximum number + * of 32-bit registers available to a multiprocessor; this number is shared + * by all thread blocks simultaneously resident on a multiprocessor. + * - \ref ::cudaDeviceProp::managedMemory "managedMemory" + * is 1 if the device supports allocating managed memory on this system, or 0 if it is not supported. + * - \ref ::cudaDeviceProp::isMultiGpuBoard "isMultiGpuBoard" + * is 1 if the device is on a multi-GPU board (e.g. Gemini cards), and 0 if not; + * - \ref ::cudaDeviceProp::multiGpuBoardGroupID "multiGpuBoardGroupID" is a unique identifier + * for a group of devices associated with the same board. + * Devices on the same multi-GPU board will share the same identifier. + * - \ref ::cudaDeviceProp::hostNativeAtomicSupported "hostNativeAtomicSupported" + * is 1 if the link between the device and the host supports native atomic operations, or 0 if it is not supported. + * - \ref ::cudaDeviceProp::singleToDoublePrecisionPerfRatio "singleToDoublePrecisionPerfRatio" + * is the ratio of single precision performance (in floating-point operations per second) + * to double precision performance. + * - \ref ::cudaDeviceProp::pageableMemoryAccess "pageableMemoryAccess" is 1 if the device supports + * coherently accessing pageable memory without calling cudaHostRegister on it, and 0 otherwise. + * - \ref ::cudaDeviceProp::concurrentManagedAccess "concurrentManagedAccess" is 1 if the device can + * coherently access managed memory concurrently with the CPU, and 0 otherwise. + * - \ref ::cudaDeviceProp::computePreemptionSupported "computePreemptionSupported" is 1 if the device + * supports Compute Preemption, and 0 otherwise. + * - \ref ::cudaDeviceProp::canUseHostPointerForRegisteredMem "canUseHostPointerForRegisteredMem" is 1 if + * the device can access host registered memory at the same virtual address as the CPU, and 0 otherwise. + * - \ref ::cudaDeviceProp::cooperativeLaunch "cooperativeLaunch" is 1 if the device supports launching + * cooperative kernels via ::cudaLaunchCooperativeKernel, and 0 otherwise. + * - \ref ::cudaDeviceProp::cooperativeMultiDeviceLaunch "cooperativeMultiDeviceLaunch" is 1 if the device + * supports launching cooperative kernels via ::cudaLaunchCooperativeKernelMultiDevice, and 0 otherwise. + * - \ref ::cudaDeviceProp::sharedMemPerBlockOptin "sharedMemPerBlockOptin" + * is the per device maximum shared memory per block usable by special opt in + * - \ref ::cudaDeviceProp::pageableMemoryAccessUsesHostPageTables "pageableMemoryAccessUsesHostPageTables" is 1 if the device accesses + * pageable memory via the host's page tables, and 0 otherwise. + * - \ref ::cudaDeviceProp::directManagedMemAccessFromHost "directManagedMemAccessFromHost" is 1 if the host can directly access managed + * memory on the device without migration, and 0 otherwise. + * - \ref ::cudaDeviceProp::maxBlocksPerMultiProcessor "maxBlocksPerMultiProcessor" is the maximum number of thread blocks + * that can reside on a multiprocessor. + * - \ref ::cudaDeviceProp::accessPolicyMaxWindowSize "accessPolicyMaxWindowSize" is + * the maximum value of ::cudaAccessPolicyWindow::num_bytes. + * - \ref ::cudaDeviceProp::reservedSharedMemPerBlock "reservedSharedMemPerBlock" + * is the shared memory reserved by CUDA driver per block in bytes + * - \ref ::cudaDeviceProp::hostRegisterSupported "hostRegisterSupported" + * is 1 if the device supports host memory registration via ::cudaHostRegister, and 0 otherwise. + * - \ref ::cudaDeviceProp::sparseCudaArraySupported "sparseCudaArraySupported" + * is 1 if the device supports sparse CUDA arrays and sparse CUDA mipmapped arrays, 0 otherwise + * - \ref ::cudaDeviceProp::hostRegisterReadOnlySupported "hostRegisterReadOnlySupported" + * is 1 if the device supports using the ::cudaHostRegister flag cudaHostRegisterReadOnly to register memory that must be mapped as + * read-only to the GPU + * - \ref ::cudaDeviceProp::timelineSemaphoreInteropSupported "timelineSemaphoreInteropSupported" + * is 1 if external timeline semaphore interop is supported on the device, 0 otherwise + * - \ref ::cudaDeviceProp::memoryPoolsSupported "memoryPoolsSupported" + * is 1 if the device supports using the cudaMallocAsync and cudaMemPool family of APIs, 0 otherwise + * - \ref ::cudaDeviceProp::gpuDirectRDMASupported "gpuDirectRDMASupported" + * is 1 if the device supports GPUDirect RDMA APIs, 0 otherwise + * - \ref ::cudaDeviceProp::gpuDirectRDMAFlushWritesOptions "gpuDirectRDMAFlushWritesOptions" + * is a bitmask to be interpreted according to the ::cudaFlushGPUDirectRDMAWritesOptions enum + * - \ref ::cudaDeviceProp::gpuDirectRDMAWritesOrdering "gpuDirectRDMAWritesOrdering" + * See the ::cudaGPUDirectRDMAWritesOrdering enum for numerical values + * - \ref ::cudaDeviceProp::memoryPoolSupportedHandleTypes "memoryPoolSupportedHandleTypes" + * is a bitmask of handle types supported with mempool-based IPC + * - \ref ::cudaDeviceProp::deferredMappingCudaArraySupported "deferredMappingCudaArraySupported" + * is 1 if the device supports deferred mapping CUDA arrays and CUDA mipmapped arrays + * - \ref ::cudaDeviceProp::ipcEventSupported "ipcEventSupported" + * is 1 if the device supports IPC Events, and 0 otherwise + * - \ref ::cudaDeviceProp::unifiedFunctionPointers "unifiedFunctionPointers" + * is 1 if the device support unified pointers, and 0 otherwise + * + * \param prop - Properties for the specified device + * \param device - Device number to get properties for + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaGetDeviceCount, ::cudaGetDevice, ::cudaSetDevice, ::cudaChooseDevice, + * ::cudaDeviceGetAttribute, + * ::cudaInitDevice, + * ::cuDeviceGetAttribute, + * ::cuDeviceGetName + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetDeviceProperties(struct cudaDeviceProp *prop, int device); + +/** + * \brief Returns information about the device + * + * Returns in \p *value the integer value of the attribute \p attr on device + * \p device. The supported attributes are: + * - ::cudaDevAttrMaxThreadsPerBlock: Maximum number of threads per block + * - ::cudaDevAttrMaxBlockDimX: Maximum x-dimension of a block + * - ::cudaDevAttrMaxBlockDimY: Maximum y-dimension of a block + * - ::cudaDevAttrMaxBlockDimZ: Maximum z-dimension of a block + * - ::cudaDevAttrMaxGridDimX: Maximum x-dimension of a grid + * - ::cudaDevAttrMaxGridDimY: Maximum y-dimension of a grid + * - ::cudaDevAttrMaxGridDimZ: Maximum z-dimension of a grid + * - ::cudaDevAttrMaxSharedMemoryPerBlock: Maximum amount of shared memory + * available to a thread block in bytes + * - ::cudaDevAttrTotalConstantMemory: Memory available on device for + * __constant__ variables in a CUDA C kernel in bytes + * - ::cudaDevAttrWarpSize: Warp size in threads + * - ::cudaDevAttrMaxPitch: Maximum pitch in bytes allowed by the memory copy + * functions that involve memory regions allocated through ::cudaMallocPitch() + * - ::cudaDevAttrMaxTexture1DWidth: Maximum 1D texture width + * - ::cudaDevAttrMaxTexture1DLinearWidth: Maximum width for a 1D texture bound + * to linear memory + * - ::cudaDevAttrMaxTexture1DMipmappedWidth: Maximum mipmapped 1D texture width + * - ::cudaDevAttrMaxTexture2DWidth: Maximum 2D texture width + * - ::cudaDevAttrMaxTexture2DHeight: Maximum 2D texture height + * - ::cudaDevAttrMaxTexture2DLinearWidth: Maximum width for a 2D texture + * bound to linear memory + * - ::cudaDevAttrMaxTexture2DLinearHeight: Maximum height for a 2D texture + * bound to linear memory + * - ::cudaDevAttrMaxTexture2DLinearPitch: Maximum pitch in bytes for a 2D + * texture bound to linear memory + * - ::cudaDevAttrMaxTexture2DMipmappedWidth: Maximum mipmapped 2D texture + * width + * - ::cudaDevAttrMaxTexture2DMipmappedHeight: Maximum mipmapped 2D texture + * height + * - ::cudaDevAttrMaxTexture3DWidth: Maximum 3D texture width + * - ::cudaDevAttrMaxTexture3DHeight: Maximum 3D texture height + * - ::cudaDevAttrMaxTexture3DDepth: Maximum 3D texture depth + * - ::cudaDevAttrMaxTexture3DWidthAlt: Alternate maximum 3D texture width, + * 0 if no alternate maximum 3D texture size is supported + * - ::cudaDevAttrMaxTexture3DHeightAlt: Alternate maximum 3D texture height, + * 0 if no alternate maximum 3D texture size is supported + * - ::cudaDevAttrMaxTexture3DDepthAlt: Alternate maximum 3D texture depth, + * 0 if no alternate maximum 3D texture size is supported + * - ::cudaDevAttrMaxTextureCubemapWidth: Maximum cubemap texture width or + * height + * - ::cudaDevAttrMaxTexture1DLayeredWidth: Maximum 1D layered texture width + * - ::cudaDevAttrMaxTexture1DLayeredLayers: Maximum layers in a 1D layered + * texture + * - ::cudaDevAttrMaxTexture2DLayeredWidth: Maximum 2D layered texture width + * - ::cudaDevAttrMaxTexture2DLayeredHeight: Maximum 2D layered texture height + * - ::cudaDevAttrMaxTexture2DLayeredLayers: Maximum layers in a 2D layered + * texture + * - ::cudaDevAttrMaxTextureCubemapLayeredWidth: Maximum cubemap layered + * texture width or height + * - ::cudaDevAttrMaxTextureCubemapLayeredLayers: Maximum layers in a cubemap + * layered texture + * - ::cudaDevAttrMaxSurface1DWidth: Maximum 1D surface width + * - ::cudaDevAttrMaxSurface2DWidth: Maximum 2D surface width + * - ::cudaDevAttrMaxSurface2DHeight: Maximum 2D surface height + * - ::cudaDevAttrMaxSurface3DWidth: Maximum 3D surface width + * - ::cudaDevAttrMaxSurface3DHeight: Maximum 3D surface height + * - ::cudaDevAttrMaxSurface3DDepth: Maximum 3D surface depth + * - ::cudaDevAttrMaxSurface1DLayeredWidth: Maximum 1D layered surface width + * - ::cudaDevAttrMaxSurface1DLayeredLayers: Maximum layers in a 1D layered + * surface + * - ::cudaDevAttrMaxSurface2DLayeredWidth: Maximum 2D layered surface width + * - ::cudaDevAttrMaxSurface2DLayeredHeight: Maximum 2D layered surface height + * - ::cudaDevAttrMaxSurface2DLayeredLayers: Maximum layers in a 2D layered + * surface + * - ::cudaDevAttrMaxSurfaceCubemapWidth: Maximum cubemap surface width + * - ::cudaDevAttrMaxSurfaceCubemapLayeredWidth: Maximum cubemap layered + * surface width + * - ::cudaDevAttrMaxSurfaceCubemapLayeredLayers: Maximum layers in a cubemap + * layered surface + * - ::cudaDevAttrMaxRegistersPerBlock: Maximum number of 32-bit registers + * available to a thread block + * - ::cudaDevAttrClockRate: Peak clock frequency in kilohertz + * - ::cudaDevAttrTextureAlignment: Alignment requirement; texture base + * addresses aligned to ::textureAlign bytes do not need an offset applied + * to texture fetches + * - ::cudaDevAttrTexturePitchAlignment: Pitch alignment requirement for 2D + * texture references bound to pitched memory + * - ::cudaDevAttrGpuOverlap: 1 if the device can concurrently copy memory + * between host and device while executing a kernel, or 0 if not + * - ::cudaDevAttrMultiProcessorCount: Number of multiprocessors on the device + * - ::cudaDevAttrKernelExecTimeout: 1 if there is a run time limit for kernels + * executed on the device, or 0 if not + * - ::cudaDevAttrIntegrated: 1 if the device is integrated with the memory + * subsystem, or 0 if not + * - ::cudaDevAttrCanMapHostMemory: 1 if the device can map host memory into + * the CUDA address space, or 0 if not + * - ::cudaDevAttrComputeMode: Compute mode is the compute mode that the device + * is currently in. Available modes are as follows: + * - ::cudaComputeModeDefault: Default mode - Device is not restricted and + * multiple threads can use ::cudaSetDevice() with this device. + * - ::cudaComputeModeProhibited: Compute-prohibited mode - No threads can use + * ::cudaSetDevice() with this device. + * - ::cudaComputeModeExclusiveProcess: Compute-exclusive-process mode - Many + * threads in one process will be able to use ::cudaSetDevice() with this + * device. + * - ::cudaDevAttrConcurrentKernels: 1 if the device supports executing + * multiple kernels within the same context simultaneously, or 0 if + * not. It is not guaranteed that multiple kernels will be resident on the + * device concurrently so this feature should not be relied upon for + * correctness. + * - ::cudaDevAttrEccEnabled: 1 if error correction is enabled on the device, + * 0 if error correction is disabled or not supported by the device + * - ::cudaDevAttrPciBusId: PCI bus identifier of the device + * - ::cudaDevAttrPciDeviceId: PCI device (also known as slot) identifier of + * the device + * - ::cudaDevAttrTccDriver: 1 if the device is using a TCC driver. TCC is only + * available on Tesla hardware running Windows Vista or later. + * - ::cudaDevAttrMemoryClockRate: Peak memory clock frequency in kilohertz + * - ::cudaDevAttrGlobalMemoryBusWidth: Global memory bus width in bits + * - ::cudaDevAttrL2CacheSize: Size of L2 cache in bytes. 0 if the device + * doesn't have L2 cache. + * - ::cudaDevAttrMaxThreadsPerMultiProcessor: Maximum resident threads per + * multiprocessor + * - ::cudaDevAttrUnifiedAddressing: 1 if the device shares a unified address + * space with the host, or 0 if not + * - ::cudaDevAttrComputeCapabilityMajor: Major compute capability version + * number + * - ::cudaDevAttrComputeCapabilityMinor: Minor compute capability version + * number + * - ::cudaDevAttrStreamPrioritiesSupported: 1 if the device supports stream + * priorities, or 0 if not + * - ::cudaDevAttrGlobalL1CacheSupported: 1 if device supports caching globals + * in L1 cache, 0 if not + * - ::cudaDevAttrLocalL1CacheSupported: 1 if device supports caching locals + * in L1 cache, 0 if not + * - ::cudaDevAttrMaxSharedMemoryPerMultiprocessor: Maximum amount of shared memory + * available to a multiprocessor in bytes; this amount is shared by all + * thread blocks simultaneously resident on a multiprocessor + * - ::cudaDevAttrMaxRegistersPerMultiprocessor: Maximum number of 32-bit registers + * available to a multiprocessor; this number is shared by all thread blocks + * simultaneously resident on a multiprocessor + * - ::cudaDevAttrManagedMemory: 1 if device supports allocating + * managed memory, 0 if not + * - ::cudaDevAttrIsMultiGpuBoard: 1 if device is on a multi-GPU board, 0 if not + * - ::cudaDevAttrMultiGpuBoardGroupID: Unique identifier for a group of devices on the + * same multi-GPU board + * - ::cudaDevAttrHostNativeAtomicSupported: 1 if the link between the device and the + * host supports native atomic operations + * - ::cudaDevAttrSingleToDoublePrecisionPerfRatio: Ratio of single precision performance + * (in floating-point operations per second) to double precision performance + * - ::cudaDevAttrPageableMemoryAccess: 1 if the device supports coherently accessing + * pageable memory without calling cudaHostRegister on it, and 0 otherwise + * - ::cudaDevAttrConcurrentManagedAccess: 1 if the device can coherently access managed + * memory concurrently with the CPU, and 0 otherwise + * - ::cudaDevAttrComputePreemptionSupported: 1 if the device supports + * Compute Preemption, 0 if not + * - ::cudaDevAttrCanUseHostPointerForRegisteredMem: 1 if the device can access host + * registered memory at the same virtual address as the CPU, and 0 otherwise + * - ::cudaDevAttrCooperativeLaunch: 1 if the device supports launching cooperative kernels + * via ::cudaLaunchCooperativeKernel, and 0 otherwise + * - ::cudaDevAttrCooperativeMultiDeviceLaunch: 1 if the device supports launching cooperative + * kernels via ::cudaLaunchCooperativeKernelMultiDevice, and 0 otherwise + * - ::cudaDevAttrCanFlushRemoteWrites: 1 if the device supports flushing of outstanding + * remote writes, and 0 otherwise + * - ::cudaDevAttrHostRegisterSupported: 1 if the device supports host memory registration + * via ::cudaHostRegister, and 0 otherwise + * - ::cudaDevAttrPageableMemoryAccessUsesHostPageTables: 1 if the device accesses pageable memory via the + * host's page tables, and 0 otherwise + * - ::cudaDevAttrDirectManagedMemAccessFromHost: 1 if the host can directly access managed memory on the device + * without migration, and 0 otherwise + * - ::cudaDevAttrMaxSharedMemoryPerBlockOptin: Maximum per block shared memory size on the device. This value can + * be opted into when using ::cudaFuncSetAttribute + * - ::cudaDevAttrMaxBlocksPerMultiprocessor: Maximum number of thread blocks that can reside on a multiprocessor + * - ::cudaDevAttrMaxPersistingL2CacheSize: Maximum L2 persisting lines capacity setting in bytes + * - ::cudaDevAttrMaxAccessPolicyWindowSize: Maximum value of cudaAccessPolicyWindow::num_bytes + * - ::cudaDevAttrReservedSharedMemoryPerBlock: Shared memory reserved by CUDA driver per block in bytes + * - ::cudaDevAttrSparseCudaArraySupported: 1 if the device supports sparse CUDA arrays and sparse CUDA mipmapped arrays. + * - ::cudaDevAttrHostRegisterReadOnlySupported: Device supports using the ::cudaHostRegister flag cudaHostRegisterReadOnly + * to register memory that must be mapped as read-only to the GPU + * - ::cudaDevAttrMemoryPoolsSupported: 1 if the device supports using the cudaMallocAsync and cudaMemPool family of APIs, and 0 otherwise + * - ::cudaDevAttrGPUDirectRDMASupported: 1 if the device supports GPUDirect RDMA APIs, and 0 otherwise + * - ::cudaDevAttrGPUDirectRDMAFlushWritesOptions: bitmask to be interpreted according to the ::cudaFlushGPUDirectRDMAWritesOptions enum + * - ::cudaDevAttrGPUDirectRDMAWritesOrdering: see the ::cudaGPUDirectRDMAWritesOrdering enum for numerical values + * - ::cudaDevAttrMemoryPoolSupportedHandleTypes: Bitmask of handle types supported with mempool based IPC + * - ::cudaDevAttrDeferredMappingCudaArraySupported : 1 if the device supports deferred mapping CUDA arrays and CUDA mipmapped arrays. + * - ::cudaDevAttrIpcEventSupport: 1 if the device supports IPC Events. + * + * \param value - Returned device attribute value + * \param attr - Device attribute to query + * \param device - Device number to query + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaGetDeviceCount, ::cudaGetDevice, ::cudaSetDevice, ::cudaChooseDevice, + * ::cudaGetDeviceProperties, + * ::cudaInitDevice, + * ::cuDeviceGetAttribute + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetAttribute(int *value, enum cudaDeviceAttr attr, int device); + +/** + * \brief Returns the default mempool of a device + * + * The default mempool of a device contains device memory from that device. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidValue + * ::cudaErrorNotSupported + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cuDeviceGetDefaultMemPool, ::cudaMallocAsync, ::cudaMemPoolTrimTo, ::cudaMemPoolGetAttribute, ::cudaDeviceSetMemPool, ::cudaMemPoolSetAttribute, ::cudaMemPoolSetAccess + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceGetDefaultMemPool(cudaMemPool_t *memPool, int device); + + +/** + * \brief Sets the current memory pool of a device + * + * The memory pool must be local to the specified device. + * Unless a mempool is specified in the ::cudaMallocAsync call, + * ::cudaMallocAsync allocates from the current mempool of the provided stream's device. + * By default, a device's current memory pool is its default memory pool. + * + * \note Use ::cudaMallocFromPoolAsync to specify asynchronous allocations from a device different + * than the one the stream runs on. + * + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * ::cudaErrorInvalidDevice + * ::cudaErrorNotSupported + * \notefnerr + * \note_callback + * + * \sa ::cuDeviceSetMemPool, ::cudaDeviceGetMemPool, ::cudaDeviceGetDefaultMemPool, ::cudaMemPoolCreate, ::cudaMemPoolDestroy, ::cudaMallocFromPoolAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceSetMemPool(int device, cudaMemPool_t memPool); + +/** + * \brief Gets the current mempool for a device + * + * Returns the last pool provided to ::cudaDeviceSetMemPool for this device + * or the device's default memory pool if ::cudaDeviceSetMemPool has never been called. + * By default the current mempool is the default mempool for a device, + * otherwise the returned pool must have been set with ::cuDeviceSetMemPool or ::cudaDeviceSetMemPool. + * + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * ::cudaErrorNotSupported + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cuDeviceGetMemPool, ::cudaDeviceGetDefaultMemPool, ::cudaDeviceSetMemPool + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceGetMemPool(cudaMemPool_t *memPool, int device); + +/** + * \brief Return NvSciSync attributes that this device can support. + * + * Returns in \p nvSciSyncAttrList, the properties of NvSciSync that + * this CUDA device, \p dev can support. The returned \p nvSciSyncAttrList + * can be used to create an NvSciSync that matches this device's capabilities. + * + * If NvSciSyncAttrKey_RequiredPerm field in \p nvSciSyncAttrList is + * already set this API will return ::cudaErrorInvalidValue. + * + * The applications should set \p nvSciSyncAttrList to a valid + * NvSciSyncAttrList failing which this API will return + * ::cudaErrorInvalidHandle. + * + * The \p flags controls how applications intends to use + * the NvSciSync created from the \p nvSciSyncAttrList. The valid flags are: + * - ::cudaNvSciSyncAttrSignal, specifies that the applications intends to + * signal an NvSciSync on this CUDA device. + * - ::cudaNvSciSyncAttrWait, specifies that the applications intends to + * wait on an NvSciSync on this CUDA device. + * + * At least one of these flags must be set, failing which the API + * returns ::cudaErrorInvalidValue. Both the flags are orthogonal + * to one another: a developer may set both these flags that allows to + * set both wait and signal specific attributes in the same \p nvSciSyncAttrList. + * + * Note that this API updates the input \p nvSciSyncAttrList with values equivalent + * to the following public attribute key-values: + * NvSciSyncAttrKey_RequiredPerm is set to + * - NvSciSyncAccessPerm_SignalOnly if ::cudaNvSciSyncAttrSignal is set in \p flags. + * - NvSciSyncAccessPerm_WaitOnly if ::cudaNvSciSyncAttrWait is set in \p flags. + * - NvSciSyncAccessPerm_WaitSignal if both ::cudaNvSciSyncAttrWait and + * ::cudaNvSciSyncAttrSignal are set in \p flags. + * NvSciSyncAttrKey_PrimitiveInfo is set to + * - NvSciSyncAttrValPrimitiveType_SysmemSemaphore on any valid \p device. + * - NvSciSyncAttrValPrimitiveType_Syncpoint if \p device is a Tegra device. + * - NvSciSyncAttrValPrimitiveType_SysmemSemaphorePayload64b if \p device is GA10X+. + * NvSciSyncAttrKey_GpuId is set to the same UUID that is returned in + * \p cudaDeviceProp.uuid from ::cudaDeviceGetProperties for this \p device. + * + * \param nvSciSyncAttrList - Return NvSciSync attributes supported. + * \param device - Valid Cuda Device to get NvSciSync attributes for. + * \param flags - flags describing NvSciSync usage. + * + * \return + * + * ::cudaSuccess, + * ::cudaErrorDeviceUninitialized, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidHandle, + * ::cudaErrorInvalidDevice, + * ::cudaErrorNotSupported, + * ::cudaErrorMemoryAllocation + * + * \sa + * ::cudaImportExternalSemaphore, + * ::cudaDestroyExternalSemaphore, + * ::cudaSignalExternalSemaphoresAsync, + * ::cudaWaitExternalSemaphoresAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceGetNvSciSyncAttributes(void *nvSciSyncAttrList, int device, int flags); + +/** + * \brief Queries attributes of the link between two devices. + * + * Returns in \p *value the value of the requested attribute \p attrib of the + * link between \p srcDevice and \p dstDevice. The supported attributes are: + * - ::cudaDevP2PAttrPerformanceRank: A relative value indicating the + * performance of the link between two devices. Lower value means better + * performance (0 being the value used for most performant link). + * - ::cudaDevP2PAttrAccessSupported: 1 if peer access is enabled. + * - ::cudaDevP2PAttrNativeAtomicSupported: 1 if native atomic operations over + * the link are supported. + * - ::cudaDevP2PAttrCudaArrayAccessSupported: 1 if accessing CUDA arrays over + * the link is supported. + * + * Returns ::cudaErrorInvalidDevice if \p srcDevice or \p dstDevice are not valid + * or if they represent the same device. + * + * Returns ::cudaErrorInvalidValue if \p attrib is not valid or if \p value is + * a null pointer. + * + * \param value - Returned value of the requested attribute + * \param attrib - The requested attribute of the link between \p srcDevice and \p dstDevice. + * \param srcDevice - The source device of the target link. + * \param dstDevice - The destination device of the target link. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceEnablePeerAccess, + * ::cudaDeviceDisablePeerAccess, + * ::cudaDeviceCanAccessPeer, + * ::cuDeviceGetP2PAttribute + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetP2PAttribute(int *value, enum cudaDeviceP2PAttr attr, int srcDevice, int dstDevice); + +/** + * \brief Select compute-device which best matches criteria + * + * Returns in \p *device the device which has properties that best match + * \p *prop. + * + * \param device - Device with best match + * \param prop - Desired device properties + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaGetDeviceCount, ::cudaGetDevice, ::cudaSetDevice, + * ::cudaGetDeviceProperties, + * ::cudaInitDevice + */ +extern __host__ cudaError_t CUDARTAPI cudaChooseDevice(int *device, const struct cudaDeviceProp *prop); +/** + * \brief Initialize device to be used for GPU executions + * + * This function will initialize the CUDA Runtime structures and primary context on \p device when called, + * but the context will not be made current to \p device. + * + * When ::cudaInitDeviceFlagsAreValid is set in \p flags, deviceFlags are applied to the requested device. + * The values of deviceFlags match those of the flags parameters in ::cudaSetDeviceFlags. + * The effect may be verified by ::cudaGetDeviceFlags. + * + * This function will return an error if the device is in ::cudaComputeModeExclusiveProcess + * and is occupied by another process or if the device is in ::cudaComputeModeProhibited. + * + * \param device - Device on which the runtime will initialize itself. + * \param deviceFlags - Parameters for device operation. + * \param flags - Flags for controlling the device initialization. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaGetDeviceCount, ::cudaGetDevice, ::cudaGetDeviceProperties, + * ::cudaChooseDevice, ::cudaSetDevice + * ::cuCtxSetCurrent + */ +extern __host__ cudaError_t CUDARTAPI cudaInitDevice(int device, unsigned int deviceFlags, unsigned int flags); +/** + * \brief Set device to be used for GPU executions + * + * Sets \p device as the current device for the calling host thread. + * Valid device id's are 0 to (::cudaGetDeviceCount() - 1). + * + * Any device memory subsequently allocated from this host thread + * using ::cudaMalloc(), ::cudaMallocPitch() or ::cudaMallocArray() + * will be physically resident on \p device. Any host memory allocated + * from this host thread using ::cudaMallocHost() or ::cudaHostAlloc() + * or ::cudaHostRegister() will have its lifetime associated with + * \p device. Any streams or events created from this host thread will + * be associated with \p device. Any kernels launched from this host + * thread using the <<<>>> operator or ::cudaLaunchKernel() will be executed + * on \p device. + * + * This call may be made from any host thread, to any device, and at + * any time. This function will do no synchronization with the previous + * or new device, + * and should only take significant time when it initializes the runtime's context state. + * This call will bind the primary context of the specified device to the calling thread and all the + * subsequent memory allocations, stream and event creations, and kernel launches + * will be associated with the primary context. + * This function will also immediately initialize the runtime state on the primary context, + * and the context will be current on \p device immediately. This function will return an + * error if the device is in ::cudaComputeModeExclusiveProcess and is occupied by another + * process or if the device is in ::cudaComputeModeProhibited. + * + * It is not required to call ::cudaInitDevice before using this function. + * \param device - Device on which the active host thread should execute the + * device code. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorDeviceUnavailable, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaGetDeviceCount, ::cudaGetDevice, ::cudaGetDeviceProperties, + * ::cudaChooseDevice, + * ::cudaInitDevice, + * ::cuCtxSetCurrent + */ +extern __host__ cudaError_t CUDARTAPI cudaSetDevice(int device); + +/** + * \brief Returns which device is currently being used + * + * Returns in \p *device the current device for the calling host thread. + * + * \param device - Returns the device on which the active host thread + * executes the device code. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorDeviceUnavailable, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaGetDeviceCount, ::cudaSetDevice, ::cudaGetDeviceProperties, + * ::cudaChooseDevice, + * ::cuCtxGetCurrent + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetDevice(int *device); + +/** + * \brief Set a list of devices that can be used for CUDA + * + * Sets a list of devices for CUDA execution in priority order using + * \p device_arr. The parameter \p len specifies the number of elements in the + * list. CUDA will try devices from the list sequentially until it finds one + * that works. If this function is not called, or if it is called with a \p len + * of 0, then CUDA will go back to its default behavior of trying devices + * sequentially from a default list containing all of the available CUDA + * devices in the system. If a specified device ID in the list does not exist, + * this function will return ::cudaErrorInvalidDevice. If \p len is not 0 and + * \p device_arr is NULL or if \p len exceeds the number of devices in + * the system, then ::cudaErrorInvalidValue is returned. + * + * \param device_arr - List of devices to try + * \param len - Number of devices in specified list + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidDevice + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaGetDeviceCount, ::cudaSetDevice, ::cudaGetDeviceProperties, + * ::cudaSetDeviceFlags, + * ::cudaChooseDevice + */ +extern __host__ cudaError_t CUDARTAPI cudaSetValidDevices(int *device_arr, int len); + +/** + * \brief Sets flags to be used for device executions + * + * Records \p flags as the flags for the current device. If the current device + * has been set and that device has already been initialized, the previous flags + * are overwritten. If the current device has not been initialized, it is + * initialized with the provided flags. If no device has been made current to + * the calling thread, a default device is selected and initialized with the + * provided flags. + * + * The two LSBs of the \p flags parameter can be used to control how the CPU + * thread interacts with the OS scheduler when waiting for results from the + * device. + * + * - ::cudaDeviceScheduleAuto: The default value if the \p flags parameter is + * zero, uses a heuristic based on the number of active CUDA contexts in the + * process \p C and the number of logical processors in the system \p P. If + * \p C \> \p P, then CUDA will yield to other OS threads when waiting for the + * device, otherwise CUDA will not yield while waiting for results and + * actively spin on the processor. Additionally, on Tegra devices, + * ::cudaDeviceScheduleAuto uses a heuristic based on the power profile of + * the platform and may choose ::cudaDeviceScheduleBlockingSync for low-powered + * devices. + * - ::cudaDeviceScheduleSpin: Instruct CUDA to actively spin when waiting for + * results from the device. This can decrease latency when waiting for the + * device, but may lower the performance of CPU threads if they are performing + * work in parallel with the CUDA thread. + * - ::cudaDeviceScheduleYield: Instruct CUDA to yield its thread when waiting + * for results from the device. This can increase latency when waiting for the + * device, but can increase the performance of CPU threads performing work in + * parallel with the device. + * - ::cudaDeviceScheduleBlockingSync: Instruct CUDA to block the CPU thread + * on a synchronization primitive when waiting for the device to finish work. + * - ::cudaDeviceBlockingSync: Instruct CUDA to block the CPU thread on a + * synchronization primitive when waiting for the device to finish work.
+ * \ref deprecated "Deprecated:" This flag was deprecated as of CUDA 4.0 and + * replaced with ::cudaDeviceScheduleBlockingSync. + * - ::cudaDeviceMapHost: This flag enables allocating pinned + * host memory that is accessible to the device. It is implicit for the + * runtime but may be absent if a context is created using the driver API. + * If this flag is not set, ::cudaHostGetDevicePointer() will always return + * a failure code. + * - ::cudaDeviceLmemResizeToMax: Instruct CUDA to not reduce local memory + * after resizing local memory for a kernel. This can prevent thrashing by + * local memory allocations when launching many kernels with high local + * memory usage at the cost of potentially increased memory usage.
+ * \ref deprecated "Deprecated:" This flag is deprecated and the behavior enabled + * by this flag is now the default and cannot be disabled. + * + * \param flags - Parameters for device operation + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaGetDeviceFlags, ::cudaGetDeviceCount, ::cudaGetDevice, ::cudaGetDeviceProperties, + * ::cudaSetDevice, ::cudaSetValidDevices, + * ::cudaInitDevice, + * ::cudaChooseDevice, + * ::cuDevicePrimaryCtxSetFlags + */ +extern __host__ cudaError_t CUDARTAPI cudaSetDeviceFlags( unsigned int flags ); + +/** + * \brief Gets the flags for the current device + * + * + * Returns in \p flags the flags for the current device. If there is a current + * device for the calling thread, the flags for the device are returned. If + * there is no current device, the flags for the first device are returned, + * which may be the default flags. Compare to the behavior of + * ::cudaSetDeviceFlags. + * + * Typically, the flags returned should match the behavior that will be seen + * if the calling thread uses a device after this call, without any change to + * the flags or current device inbetween by this or another thread. Note that + * if the device is not initialized, it is possible for another thread to + * change the flags for the current device before it is initialized. + * Additionally, when using exclusive mode, if this thread has not requested a + * specific device, it may use a device other than the first device, contrary + * to the assumption made by this function. + * + * If a context has been created via the driver API and is current to the + * calling thread, the flags for that context are always returned. + * + * Flags returned by this function may specifically include ::cudaDeviceMapHost + * even though it is not accepted by ::cudaSetDeviceFlags because it is + * implicit in runtime API flags. The reason for this is that the current + * context may have been created via the driver API in which case the flag is + * not implicit and may be unset. + * + * \param flags - Pointer to store the device flags + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaGetDevice, ::cudaGetDeviceProperties, + * ::cudaSetDevice, ::cudaSetDeviceFlags, + * ::cudaInitDevice, + * ::cuCtxGetFlags, + * ::cuDevicePrimaryCtxGetState + */ +extern __host__ cudaError_t CUDARTAPI cudaGetDeviceFlags( unsigned int *flags ); +/** @} */ /* END CUDART_DEVICE */ + +/** + * \defgroup CUDART_STREAM Stream Management + * + * ___MANBRIEF___ stream management functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the stream management functions of the CUDA runtime + * application programming interface. + * + * @{ + */ + +/** + * \brief Create an asynchronous stream + * + * Creates a new asynchronous stream. + * + * \param pStream - Pointer to new stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreateWithPriority, + * ::cudaStreamCreateWithFlags, + * ::cudaStreamGetPriority, + * ::cudaStreamGetFlags, + * ::cudaStreamQuery, + * ::cudaStreamSynchronize, + * ::cudaStreamWaitEvent, + * ::cudaStreamAddCallback, + * ::cudaStreamDestroy, + * ::cuStreamCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaStreamCreate(cudaStream_t *pStream); + +/** + * \brief Create an asynchronous stream + * + * Creates a new asynchronous stream. The \p flags argument determines the + * behaviors of the stream. Valid values for \p flags are + * - ::cudaStreamDefault: Default stream creation flag. + * - ::cudaStreamNonBlocking: Specifies that work running in the created + * stream may run concurrently with work in stream 0 (the NULL stream), and that + * the created stream should perform no implicit synchronization with stream 0. + * + * \param pStream - Pointer to new stream identifier + * \param flags - Parameters for stream creation + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreate, + * ::cudaStreamCreateWithPriority, + * ::cudaStreamGetFlags, + * ::cudaStreamQuery, + * ::cudaStreamSynchronize, + * ::cudaStreamWaitEvent, + * ::cudaStreamAddCallback, + * ::cudaStreamDestroy, + * ::cuStreamCreate + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamCreateWithFlags(cudaStream_t *pStream, unsigned int flags); + +/** + * \brief Create an asynchronous stream with the specified priority + * + * Creates a stream with the specified priority and returns a handle in \p pStream. + * This API alters the scheduler priority of work in the stream. Work in a higher + * priority stream may preempt work already executing in a low priority stream. + * + * \p priority follows a convention where lower numbers represent higher priorities. + * '0' represents default priority. The range of meaningful numerical priorities can + * be queried using ::cudaDeviceGetStreamPriorityRange. If the specified priority is + * outside the numerical range returned by ::cudaDeviceGetStreamPriorityRange, + * it will automatically be clamped to the lowest or the highest number in the range. + * + * \param pStream - Pointer to new stream identifier + * \param flags - Flags for stream creation. See ::cudaStreamCreateWithFlags for a list of valid flags that can be passed + * \param priority - Priority of the stream. Lower numbers represent higher priorities. + * See ::cudaDeviceGetStreamPriorityRange for more information about + * the meaningful stream priorities that can be passed. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \note Stream priorities are supported only on GPUs + * with compute capability 3.5 or higher. + * + * \note In the current implementation, only compute kernels launched in + * priority streams are affected by the stream's priority. Stream priorities have + * no effect on host-to-device and device-to-host memory operations. + * + * \sa ::cudaStreamCreate, + * ::cudaStreamCreateWithFlags, + * ::cudaDeviceGetStreamPriorityRange, + * ::cudaStreamGetPriority, + * ::cudaStreamQuery, + * ::cudaStreamWaitEvent, + * ::cudaStreamAddCallback, + * ::cudaStreamSynchronize, + * ::cudaStreamDestroy, + * ::cuStreamCreateWithPriority + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamCreateWithPriority(cudaStream_t *pStream, unsigned int flags, int priority); + +/** + * \brief Query the priority of a stream + * + * Query the priority of a stream. The priority is returned in in \p priority. + * Note that if the stream was created with a priority outside the meaningful + * numerical range returned by ::cudaDeviceGetStreamPriorityRange, + * this function returns the clamped priority. + * See ::cudaStreamCreateWithPriority for details about priority clamping. + * + * \param hStream - Handle to the stream to be queried + * \param priority - Pointer to a signed integer in which the stream's priority is returned + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreateWithPriority, + * ::cudaDeviceGetStreamPriorityRange, + * ::cudaStreamGetFlags, + * ::cuStreamGetPriority + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamGetPriority(cudaStream_t hStream, int *priority); + +/** + * \brief Query the flags of a stream + * + * Query the flags of a stream. The flags are returned in \p flags. + * See ::cudaStreamCreateWithFlags for a list of valid flags. + * + * \param hStream - Handle to the stream to be queried + * \param flags - Pointer to an unsigned integer in which the stream's flags are returned + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreateWithPriority, + * ::cudaStreamCreateWithFlags, + * ::cudaStreamGetPriority, + * ::cuStreamGetFlags + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamGetFlags(cudaStream_t hStream, unsigned int *flags); + +/** + * \brief Query the Id of a stream + * + * Query the Id of a stream. The Id is returned in \p streamId. + * The Id is unique for the life of the program. + * + * The stream handle \p hStream can refer to any of the following: + *
    + *
  • a stream created via any of the CUDA runtime APIs such as ::cudaStreamCreate, + * ::cudaStreamCreateWithFlags and ::cudaStreamCreateWithPriority, or their driver + * API equivalents such as ::cuStreamCreate or ::cuStreamCreateWithPriority. + * Passing an invalid handle will result in undefined behavior.
  • + *
  • any of the special streams such as the NULL stream, ::cudaStreamLegacy + * and ::cudaStreamPerThread respectively. The driver API equivalents of these + * are also accepted which are NULL, ::CU_STREAM_LEGACY and ::CU_STREAM_PER_THREAD.
  • + *
+ * + * \param hStream - Handle to the stream to be queried + * \param streamId - Pointer to an unsigned long long in which the stream Id is returned + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreateWithPriority, + * ::cudaStreamCreateWithFlags, + * ::cudaStreamGetPriority, + * ::cudaStreamGetFlags, + * ::cuStreamGetId + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamGetId(cudaStream_t hStream, unsigned long long *streamId); + +/** + * \brief Resets all persisting lines in cache to normal status. + * + * Resets all persisting lines in cache to normal status. + * Takes effect on function return. + * + * \return + * ::cudaSuccess, + * \notefnerr + * + * \sa + * ::cudaAccessPolicyWindow + */ +extern __host__ cudaError_t CUDARTAPI cudaCtxResetPersistingL2Cache(void); + +/** + * \brief Copies attributes from source stream to destination stream. + * + * Copies attributes from source stream \p src to destination stream \p dst. + * Both streams must have the same context. + * + * \param[out] dst Destination stream + * \param[in] src Source stream + * For attributes see ::cudaStreamAttrID + * + * \return + * ::cudaSuccess, + * ::cudaErrorNotSupported + * \notefnerr + * + * \sa + * ::cudaAccessPolicyWindow + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamCopyAttributes(cudaStream_t dst, cudaStream_t src); + + /** + * \brief Queries stream attribute. + * + * Queries attribute \p attr from \p hStream and stores it in corresponding + * member of \p value_out. + * + * \param[in] hStream + * \param[in] attr + * \param[out] value_out + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * + * \sa + * ::cudaAccessPolicyWindow + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamGetAttribute( + cudaStream_t hStream, cudaStreamAttrID attr, + cudaStreamAttrValue *value_out); + + /** + * \brief Sets stream attribute. + * + * Sets attribute \p attr on \p hStream from corresponding attribute of + * \p value. The updated attribute will be applied to subsequent work + * submitted to the stream. It will not affect previously submitted work. + * + * \param[out] hStream + * \param[in] attr + * \param[in] value + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * + * \sa + * ::cudaAccessPolicyWindow + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamSetAttribute( + cudaStream_t hStream, cudaStreamAttrID attr, + const cudaStreamAttrValue *value); + + /** + * \brief Destroys and cleans up an asynchronous stream + * + * Destroys and cleans up the asynchronous stream specified by \p stream. + * + * In case the device is still doing work in the stream \p stream + * when ::cudaStreamDestroy() is called, the function will return immediately + * and the resources associated with \p stream will be released automatically + * once the device has completed all work in \p stream. + * + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * \note_destroy_ub + * + * \sa ::cudaStreamCreate, + * ::cudaStreamCreateWithFlags, + * ::cudaStreamQuery, + * ::cudaStreamWaitEvent, + * ::cudaStreamSynchronize, + * ::cudaStreamAddCallback, + * ::cuStreamDestroy + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamDestroy(cudaStream_t stream); + +/** + * \brief Make a compute stream wait on an event + * + * Makes all future work submitted to \p stream wait for all work captured in + * \p event. See ::cudaEventRecord() for details on what is captured by an event. + * The synchronization will be performed efficiently on the device when applicable. + * \p event may be from a different device than \p stream. + * + * flags include: + * - ::cudaEventWaitDefault: Default event creation flag. + * - ::cudaEventWaitExternal: Event is captured in the graph as an external + * event node when performing stream capture. + * + * \param stream - Stream to wait + * \param event - Event to wait on + * \param flags - Parameters for the operation(See above) + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreate, ::cudaStreamCreateWithFlags, ::cudaStreamQuery, ::cudaStreamSynchronize, ::cudaStreamAddCallback, ::cudaStreamDestroy, + * ::cuStreamWaitEvent + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamWaitEvent(cudaStream_t stream, cudaEvent_t event, unsigned int flags __dv(0)); + +/** + * Type of stream callback functions. + * \param stream The stream as passed to ::cudaStreamAddCallback, may be NULL. + * \param status ::cudaSuccess or any persistent error on the stream. + * \param userData User parameter provided at registration. + */ +typedef void (CUDART_CB *cudaStreamCallback_t)(cudaStream_t stream, cudaError_t status, void *userData); + +/** + * \brief Add a callback to a compute stream + * + * \note This function is slated for eventual deprecation and removal. If + * you do not require the callback to execute in case of a device error, + * consider using ::cudaLaunchHostFunc. Additionally, this function is not + * supported with ::cudaStreamBeginCapture and ::cudaStreamEndCapture, unlike + * ::cudaLaunchHostFunc. + * + * Adds a callback to be called on the host after all currently enqueued + * items in the stream have completed. For each + * cudaStreamAddCallback call, a callback will be executed exactly once. + * The callback will block later work in the stream until it is finished. + * + * The callback may be passed ::cudaSuccess or an error code. In the event + * of a device error, all subsequently executed callbacks will receive an + * appropriate ::cudaError_t. + * + * Callbacks must not make any CUDA API calls. Attempting to use CUDA APIs + * may result in ::cudaErrorNotPermitted. Callbacks must not perform any + * synchronization that may depend on outstanding device work or other callbacks + * that are not mandated to run earlier. Callbacks without a mandated order + * (in independent streams) execute in undefined order and may be serialized. + * + * For the purposes of Unified Memory, callback execution makes a number of + * guarantees: + *
    + *
  • The callback stream is considered idle for the duration of the + * callback. Thus, for example, a callback may always use memory attached + * to the callback stream.
  • + *
  • The start of execution of a callback has the same effect as + * synchronizing an event recorded in the same stream immediately prior to + * the callback. It thus synchronizes streams which have been "joined" + * prior to the callback.
  • + *
  • Adding device work to any stream does not have the effect of making + * the stream active until all preceding callbacks have executed. Thus, for + * example, a callback might use global attached memory even if work has + * been added to another stream, if it has been properly ordered with an + * event.
  • + *
  • Completion of a callback does not cause a stream to become + * active except as described above. The callback stream will remain idle + * if no device work follows the callback, and will remain idle across + * consecutive callbacks without device work in between. Thus, for example, + * stream synchronization can be done by signaling from a callback at the + * end of the stream.
  • + *
+ * + * \param stream - Stream to add callback to + * \param callback - The function to call once preceding stream operations are complete + * \param userData - User specified data to be passed to the callback function + * \param flags - Reserved for future use, must be 0 + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorInvalidValue, + * ::cudaErrorNotSupported + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreate, ::cudaStreamCreateWithFlags, ::cudaStreamQuery, ::cudaStreamSynchronize, ::cudaStreamWaitEvent, ::cudaStreamDestroy, ::cudaMallocManaged, ::cudaStreamAttachMemAsync, + * ::cudaLaunchHostFunc, ::cuStreamAddCallback + */ +extern __host__ cudaError_t CUDARTAPI cudaStreamAddCallback(cudaStream_t stream, + cudaStreamCallback_t callback, void *userData, unsigned int flags); + +/** + * \brief Waits for stream tasks to complete + * + * Blocks until \p stream has completed all operations. If the + * ::cudaDeviceScheduleBlockingSync flag was set for this device, + * the host thread will block until the stream is finished with + * all of its tasks. + * + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreate, ::cudaStreamCreateWithFlags, ::cudaStreamQuery, ::cudaStreamWaitEvent, ::cudaStreamAddCallback, ::cudaStreamDestroy, + * ::cuStreamSynchronize + */ +extern __host__ cudaError_t CUDARTAPI cudaStreamSynchronize(cudaStream_t stream); + +/** + * \brief Queries an asynchronous stream for completion status + * + * Returns ::cudaSuccess if all operations in \p stream have + * completed, or ::cudaErrorNotReady if not. + * + * For the purposes of Unified Memory, a return value of ::cudaSuccess + * is equivalent to having called ::cudaStreamSynchronize(). + * + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorNotReady, + * ::cudaErrorInvalidResourceHandle + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreate, ::cudaStreamCreateWithFlags, ::cudaStreamWaitEvent, ::cudaStreamSynchronize, ::cudaStreamAddCallback, ::cudaStreamDestroy, + * ::cuStreamQuery + */ +extern __host__ cudaError_t CUDARTAPI cudaStreamQuery(cudaStream_t stream); + +/** + * \brief Attach memory to a stream asynchronously + * + * Enqueues an operation in \p stream to specify stream association of + * \p length bytes of memory starting from \p devPtr. This function is a + * stream-ordered operation, meaning that it is dependent on, and will + * only take effect when, previous work in stream has completed. Any + * previous association is automatically replaced. + * + * \p devPtr must point to an one of the following types of memories: + * - managed memory declared using the __managed__ keyword or allocated with + * ::cudaMallocManaged. + * - a valid host-accessible region of system-allocated pageable memory. This + * type of memory may only be specified if the device associated with the + * stream reports a non-zero value for the device attribute + * ::cudaDevAttrPageableMemoryAccess. + * + * For managed allocations, \p length must be either zero or the entire + * allocation's size. Both indicate that the entire allocation's stream + * association is being changed. Currently, it is not possible to change stream + * association for a portion of a managed allocation. + * + * For pageable allocations, \p length must be non-zero. + * + * The stream association is specified using \p flags which must be + * one of ::cudaMemAttachGlobal, ::cudaMemAttachHost or ::cudaMemAttachSingle. + * The default value for \p flags is ::cudaMemAttachSingle + * If the ::cudaMemAttachGlobal flag is specified, the memory can be accessed + * by any stream on any device. + * If the ::cudaMemAttachHost flag is specified, the program makes a guarantee + * that it won't access the memory on the device from any stream on a device that + * has a zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess. + * If the ::cudaMemAttachSingle flag is specified and \p stream is associated with + * a device that has a zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess, + * the program makes a guarantee that it will only access the memory on the device + * from \p stream. It is illegal to attach singly to the NULL stream, because the + * NULL stream is a virtual global stream and not a specific stream. An error will + * be returned in this case. + * + * When memory is associated with a single stream, the Unified Memory system will + * allow CPU access to this memory region so long as all operations in \p stream + * have completed, regardless of whether other streams are active. In effect, + * this constrains exclusive ownership of the managed memory region by + * an active GPU to per-stream activity instead of whole-GPU activity. + * + * Accessing memory on the device from streams that are not associated with + * it will produce undefined results. No error checking is performed by the + * Unified Memory system to ensure that kernels launched into other streams + * do not access this region. + * + * It is a program's responsibility to order calls to ::cudaStreamAttachMemAsync + * via events, synchronization or other means to ensure legal access to memory + * at all times. Data visibility and coherency will be changed appropriately + * for all kernels which follow a stream-association change. + * + * If \p stream is destroyed while data is associated with it, the association is + * removed and the association reverts to the default visibility of the allocation + * as specified at ::cudaMallocManaged. For __managed__ variables, the default + * association is always ::cudaMemAttachGlobal. Note that destroying a stream is an + * asynchronous operation, and as a result, the change to default association won't + * happen until all work in the stream has completed. + * + * \param stream - Stream in which to enqueue the attach operation + * \param devPtr - Pointer to memory (must be a pointer to managed memory or + * to a valid host-accessible region of system-allocated + * memory) + * \param length - Length of memory (defaults to zero) + * \param flags - Must be one of ::cudaMemAttachGlobal, ::cudaMemAttachHost or ::cudaMemAttachSingle (defaults to ::cudaMemAttachSingle) + * + * \return + * ::cudaSuccess, + * ::cudaErrorNotReady, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreate, ::cudaStreamCreateWithFlags, ::cudaStreamWaitEvent, ::cudaStreamSynchronize, ::cudaStreamAddCallback, ::cudaStreamDestroy, ::cudaMallocManaged, + * ::cuStreamAttachMemAsync + */ +#if defined(__cplusplus) +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamAttachMemAsync(cudaStream_t stream, void *devPtr, size_t length __dv(0), unsigned int flags = cudaMemAttachSingle); +#else +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamAttachMemAsync(cudaStream_t stream, void *devPtr, size_t length __dv(0), unsigned int flags); +#endif + +/** + * \brief Begins graph capture on a stream + * + * Begin graph capture on \p stream. When a stream is in capture mode, all operations + * pushed into the stream will not be executed, but will instead be captured into + * a graph, which will be returned via ::cudaStreamEndCapture. Capture may not be initiated + * if \p stream is ::cudaStreamLegacy. Capture must be ended on the same stream in which + * it was initiated, and it may only be initiated if the stream is not already in capture + * mode. The capture mode may be queried via ::cudaStreamIsCapturing. A unique id + * representing the capture sequence may be queried via ::cudaStreamGetCaptureInfo. + * + * If \p mode is not ::cudaStreamCaptureModeRelaxed, ::cudaStreamEndCapture must be + * called on this stream from the same thread. + * + * \note Kernels captured using this API must not use texture and surface references. + * Reading or writing through any texture or surface reference is undefined + * behavior. This restriction does not apply to texture and surface objects. + * + * \param stream - Stream in which to initiate capture + * \param mode - Controls the interaction of this capture sequence with other API + * calls that are potentially unsafe. For more details see + * ::cudaThreadExchangeStreamCaptureMode. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * + * \sa + * ::cudaStreamCreate, + * ::cudaStreamIsCapturing, + * ::cudaStreamEndCapture, + * ::cudaThreadExchangeStreamCaptureMode + */ +extern __host__ cudaError_t CUDARTAPI cudaStreamBeginCapture(cudaStream_t stream, enum cudaStreamCaptureMode mode); + +/** + * \brief Swaps the stream capture interaction mode for a thread + * + * Sets the calling thread's stream capture interaction mode to the value contained + * in \p *mode, and overwrites \p *mode with the previous mode for the thread. To + * facilitate deterministic behavior across function or module boundaries, callers + * are encouraged to use this API in a push-pop fashion: \code + cudaStreamCaptureMode mode = desiredMode; + cudaThreadExchangeStreamCaptureMode(&mode); + ... + cudaThreadExchangeStreamCaptureMode(&mode); // restore previous mode + * \endcode + * + * During stream capture (see ::cudaStreamBeginCapture), some actions, such as a call + * to ::cudaMalloc, may be unsafe. In the case of ::cudaMalloc, the operation is + * not enqueued asynchronously to a stream, and is not observed by stream capture. + * Therefore, if the sequence of operations captured via ::cudaStreamBeginCapture + * depended on the allocation being replayed whenever the graph is launched, the + * captured graph would be invalid. + * + * Therefore, stream capture places restrictions on API calls that can be made within + * or concurrently to a ::cudaStreamBeginCapture-::cudaStreamEndCapture sequence. This + * behavior can be controlled via this API and flags to ::cudaStreamBeginCapture. + * + * A thread's mode is one of the following: + * - \p cudaStreamCaptureModeGlobal: This is the default mode. If the local thread has + * an ongoing capture sequence that was not initiated with + * \p cudaStreamCaptureModeRelaxed at \p cuStreamBeginCapture, or if any other thread + * has a concurrent capture sequence initiated with \p cudaStreamCaptureModeGlobal, + * this thread is prohibited from potentially unsafe API calls. + * - \p cudaStreamCaptureModeThreadLocal: If the local thread has an ongoing capture + * sequence not initiated with \p cudaStreamCaptureModeRelaxed, it is prohibited + * from potentially unsafe API calls. Concurrent capture sequences in other threads + * are ignored. + * - \p cudaStreamCaptureModeRelaxed: The local thread is not prohibited from potentially + * unsafe API calls. Note that the thread is still prohibited from API calls which + * necessarily conflict with stream capture, for example, attempting ::cudaEventQuery + * on an event that was last recorded inside a capture sequence. + * + * \param mode - Pointer to mode value to swap with the current mode + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * + * \sa + * ::cudaStreamBeginCapture + */ +extern __host__ cudaError_t CUDARTAPI cudaThreadExchangeStreamCaptureMode(enum cudaStreamCaptureMode *mode); + +/** + * \brief Ends capture on a stream, returning the captured graph + * + * End capture on \p stream, returning the captured graph via \p pGraph. + * Capture must have been initiated on \p stream via a call to ::cudaStreamBeginCapture. + * If capture was invalidated, due to a violation of the rules of stream capture, then + * a NULL graph will be returned. + * + * If the \p mode argument to ::cudaStreamBeginCapture was not + * ::cudaStreamCaptureModeRelaxed, this call must be from the same thread as + * ::cudaStreamBeginCapture. + * + * \param stream - Stream to query + * \param pGraph - The captured graph + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorStreamCaptureWrongThread + * \notefnerr + * + * \sa + * ::cudaStreamCreate, + * ::cudaStreamBeginCapture, + * ::cudaStreamIsCapturing + */ +extern __host__ cudaError_t CUDARTAPI cudaStreamEndCapture(cudaStream_t stream, cudaGraph_t *pGraph); + +/** + * \brief Returns a stream's capture status + * + * Return the capture status of \p stream via \p pCaptureStatus. After a successful + * call, \p *pCaptureStatus will contain one of the following: + * - ::cudaStreamCaptureStatusNone: The stream is not capturing. + * - ::cudaStreamCaptureStatusActive: The stream is capturing. + * - ::cudaStreamCaptureStatusInvalidated: The stream was capturing but an error + * has invalidated the capture sequence. The capture sequence must be terminated + * with ::cudaStreamEndCapture on the stream where it was initiated in order to + * continue using \p stream. + * + * Note that, if this is called on ::cudaStreamLegacy (the "null stream") while + * a blocking stream on the same device is capturing, it will return + * ::cudaErrorStreamCaptureImplicit and \p *pCaptureStatus is unspecified + * after the call. The blocking stream capture is not invalidated. + * + * When a blocking stream is capturing, the legacy stream is in an + * unusable state until the blocking stream capture is terminated. The legacy + * stream is not supported for stream capture, but attempted use would have an + * implicit dependency on the capturing stream(s). + * + * \param stream - Stream to query + * \param pCaptureStatus - Returns the stream's capture status + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorStreamCaptureImplicit + * \notefnerr + * + * \sa + * ::cudaStreamCreate, + * ::cudaStreamBeginCapture, + * ::cudaStreamEndCapture + */ +extern __host__ cudaError_t CUDARTAPI cudaStreamIsCapturing(cudaStream_t stream, enum cudaStreamCaptureStatus *pCaptureStatus); + +/** + * \brief Query a stream's capture state + * + * Query stream state related to stream capture. + * + * If called on ::cudaStreamLegacy (the "null stream") while a stream not created + * with ::cudaStreamNonBlocking is capturing, returns ::cudaErrorStreamCaptureImplicit. + * + * Valid data (other than capture status) is returned only if both of the following are true: + * - the call returns cudaSuccess + * - the returned capture status is ::cudaStreamCaptureStatusActive + * + * \param stream - The stream to query + * \param captureStatus_out - Location to return the capture status of the stream; required + * \param id_out - Optional location to return an id for the capture sequence, which is + * unique over the lifetime of the process + * \param graph_out - Optional location to return the graph being captured into. All + * operations other than destroy and node removal are permitted on the graph + * while the capture sequence is in progress. This API does not transfer + * ownership of the graph, which is transferred or destroyed at + * ::cudaStreamEndCapture. Note that the graph handle may be invalidated before + * end of capture for certain errors. Nodes that are or become + * unreachable from the original stream at ::cudaStreamEndCapture due to direct + * actions on the graph do not trigger ::cudaErrorStreamCaptureUnjoined. + * \param dependencies_out - Optional location to store a pointer to an array of nodes. + * The next node to be captured in the stream will depend on this set of nodes, + * absent operations such as event wait which modify this set. The array pointer + * is valid until the next API call which operates on the stream or until end of + * capture. The node handles may be copied out and are valid until they or the + * graph is destroyed. The driver-owned array may also be passed directly to + * APIs that operate on the graph (not the stream) without copying. + * \param numDependencies_out - Optional location to store the size of the array + * returned in dependencies_out. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorStreamCaptureImplicit + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cudaStreamBeginCapture, + * ::cudaStreamIsCapturing, + * ::cudaStreamUpdateCaptureDependencies + */ +extern __host__ cudaError_t CUDARTAPI cudaStreamGetCaptureInfo(cudaStream_t stream, enum cudaStreamCaptureStatus *captureStatus_out, unsigned long long *id_out __dv(0), cudaGraph_t *graph_out __dv(0), const cudaGraphNode_t **dependencies_out __dv(0), size_t *numDependencies_out __dv(0)); + +/** + * \brief Update the set of dependencies in a capturing stream (11.3+) + * + * Modifies the dependency set of a capturing stream. The dependency set is the set + * of nodes that the next captured node in the stream will depend on. + * + * Valid flags are ::cudaStreamAddCaptureDependencies and + * ::cudaStreamSetCaptureDependencies. These control whether the set passed to + * the API is added to the existing set or replaces it. A flags value of 0 defaults + * to ::cudaStreamAddCaptureDependencies. + * + * Nodes that are removed from the dependency set via this API do not result in + * ::cudaErrorStreamCaptureUnjoined if they are unreachable from the stream at + * ::cudaStreamEndCapture. + * + * Returns ::cudaErrorIllegalState if the stream is not capturing. + * + * This API is new in CUDA 11.3. Developers requiring compatibility across minor + * versions of the CUDA driver to 11.0 should not use this API or provide a fallback. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorIllegalState + * \notefnerr + * + * \sa + * ::cudaStreamBeginCapture, + * ::cudaStreamGetCaptureInfo, + */ +extern __host__ cudaError_t CUDARTAPI cudaStreamUpdateCaptureDependencies(cudaStream_t stream, cudaGraphNode_t *dependencies, size_t numDependencies, unsigned int flags __dv(0)); +/** @} */ /* END CUDART_STREAM */ + +/** + * \defgroup CUDART_EVENT Event Management + * + * ___MANBRIEF___ event management functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the event management functions of the CUDA runtime + * application programming interface. + * + * @{ + */ + +/** + * \brief Creates an event object + * + * Creates an event object for the current device using ::cudaEventDefault. + * + * \param event - Newly created event + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorLaunchFailure, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa \ref ::cudaEventCreate(cudaEvent_t*, unsigned int) "cudaEventCreate (C++ API)", + * ::cudaEventCreateWithFlags, ::cudaEventRecord, ::cudaEventQuery, + * ::cudaEventSynchronize, ::cudaEventDestroy, ::cudaEventElapsedTime, + * ::cudaStreamWaitEvent, + * ::cuEventCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaEventCreate(cudaEvent_t *event); + +/** + * \brief Creates an event object with the specified flags + * + * Creates an event object for the current device with the specified flags. Valid + * flags include: + * - ::cudaEventDefault: Default event creation flag. + * - ::cudaEventBlockingSync: Specifies that event should use blocking + * synchronization. A host thread that uses ::cudaEventSynchronize() to wait + * on an event created with this flag will block until the event actually + * completes. + * - ::cudaEventDisableTiming: Specifies that the created event does not need + * to record timing data. Events created with this flag specified and + * the ::cudaEventBlockingSync flag not specified will provide the best + * performance when used with ::cudaStreamWaitEvent() and ::cudaEventQuery(). + * - ::cudaEventInterprocess: Specifies that the created event may be used as an + * interprocess event by ::cudaIpcGetEventHandle(). ::cudaEventInterprocess must + * be specified along with ::cudaEventDisableTiming. + * + * \param event - Newly created event + * \param flags - Flags for new event + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorLaunchFailure, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa \ref ::cudaEventCreate(cudaEvent_t*) "cudaEventCreate (C API)", + * ::cudaEventSynchronize, ::cudaEventDestroy, ::cudaEventElapsedTime, + * ::cudaStreamWaitEvent, + * ::cuEventCreate + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventCreateWithFlags(cudaEvent_t *event, unsigned int flags); + +/** + * \brief Records an event + * + * Captures in \p event the contents of \p stream at the time of this call. + * \p event and \p stream must be on the same CUDA context. + * Calls such as ::cudaEventQuery() or ::cudaStreamWaitEvent() will then + * examine or wait for completion of the work that was captured. Uses of + * \p stream after this call do not modify \p event. See note on default + * stream behavior for what is captured in the default case. + * + * ::cudaEventRecord() can be called multiple times on the same event and + * will overwrite the previously captured state. Other APIs such as + * ::cudaStreamWaitEvent() use the most recently captured state at the time + * of the API call, and are not affected by later calls to + * ::cudaEventRecord(). Before the first call to ::cudaEventRecord(), an + * event represents an empty set of work, so for example ::cudaEventQuery() + * would return ::cudaSuccess. + * + * \param event - Event to record + * \param stream - Stream in which to record event + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorLaunchFailure + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa \ref ::cudaEventCreate(cudaEvent_t*) "cudaEventCreate (C API)", + * ::cudaEventCreateWithFlags, ::cudaEventQuery, + * ::cudaEventSynchronize, ::cudaEventDestroy, ::cudaEventElapsedTime, + * ::cudaStreamWaitEvent, + * ::cudaEventRecordWithFlags, + * ::cuEventRecord + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecord(cudaEvent_t event, cudaStream_t stream __dv(0)); + +/** + * \brief Records an event + * + * Captures in \p event the contents of \p stream at the time of this call. + * \p event and \p stream must be on the same CUDA context. + * Calls such as ::cudaEventQuery() or ::cudaStreamWaitEvent() will then + * examine or wait for completion of the work that was captured. Uses of + * \p stream after this call do not modify \p event. See note on default + * stream behavior for what is captured in the default case. + * + * ::cudaEventRecordWithFlags() can be called multiple times on the same event and + * will overwrite the previously captured state. Other APIs such as + * ::cudaStreamWaitEvent() use the most recently captured state at the time + * of the API call, and are not affected by later calls to + * ::cudaEventRecordWithFlags(). Before the first call to ::cudaEventRecordWithFlags(), an + * event represents an empty set of work, so for example ::cudaEventQuery() + * would return ::cudaSuccess. + * + * flags include: + * - ::cudaEventRecordDefault: Default event creation flag. + * - ::cudaEventRecordExternal: Event is captured in the graph as an external + * event node when performing stream capture. + * + * \param event - Event to record + * \param stream - Stream in which to record event + * \param flags - Parameters for the operation(See above) + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorLaunchFailure + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa \ref ::cudaEventCreate(cudaEvent_t*) "cudaEventCreate (C API)", + * ::cudaEventCreateWithFlags, ::cudaEventQuery, + * ::cudaEventSynchronize, ::cudaEventDestroy, ::cudaEventElapsedTime, + * ::cudaStreamWaitEvent, + * ::cudaEventRecord, + * ::cuEventRecord, + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecordWithFlags(cudaEvent_t event, cudaStream_t stream __dv(0), unsigned int flags __dv(0)); +#endif + +/** + * \brief Queries an event's status + * + * Queries the status of all work currently captured by \p event. See + * ::cudaEventRecord() for details on what is captured by an event. + * + * Returns ::cudaSuccess if all captured work has been completed, or + * ::cudaErrorNotReady if any captured work is incomplete. + * + * For the purposes of Unified Memory, a return value of ::cudaSuccess + * is equivalent to having called ::cudaEventSynchronize(). + * + * \param event - Event to query + * + * \return + * ::cudaSuccess, + * ::cudaErrorNotReady, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorLaunchFailure + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa \ref ::cudaEventCreate(cudaEvent_t*) "cudaEventCreate (C API)", + * ::cudaEventCreateWithFlags, ::cudaEventRecord, + * ::cudaEventSynchronize, ::cudaEventDestroy, ::cudaEventElapsedTime, + * ::cuEventQuery + */ +extern __host__ cudaError_t CUDARTAPI cudaEventQuery(cudaEvent_t event); + +/** + * \brief Waits for an event to complete + * + * Waits until the completion of all work currently captured in \p event. + * See ::cudaEventRecord() for details on what is captured by an event. + * + * Waiting for an event that was created with the ::cudaEventBlockingSync + * flag will cause the calling CPU thread to block until the event has + * been completed by the device. If the ::cudaEventBlockingSync flag has + * not been set, then the CPU thread will busy-wait until the event has + * been completed by the device. + * + * \param event - Event to wait for + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorLaunchFailure + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa \ref ::cudaEventCreate(cudaEvent_t*) "cudaEventCreate (C API)", + * ::cudaEventCreateWithFlags, ::cudaEventRecord, + * ::cudaEventQuery, ::cudaEventDestroy, ::cudaEventElapsedTime, + * ::cuEventSynchronize + */ +extern __host__ cudaError_t CUDARTAPI cudaEventSynchronize(cudaEvent_t event); + +/** + * \brief Destroys an event object + * + * Destroys the event specified by \p event. + * + * An event may be destroyed before it is complete (i.e., while + * ::cudaEventQuery() would return ::cudaErrorNotReady). In this case, the + * call does not block on completion of the event, and any associated + * resources will automatically be released asynchronously at completion. + * + * \param event - Event to destroy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorLaunchFailure + * \notefnerr + * \note_init_rt + * \note_callback + * \note_destroy_ub + * + * \sa \ref ::cudaEventCreate(cudaEvent_t*) "cudaEventCreate (C API)", + * ::cudaEventCreateWithFlags, ::cudaEventQuery, + * ::cudaEventSynchronize, ::cudaEventRecord, ::cudaEventElapsedTime, + * ::cuEventDestroy + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventDestroy(cudaEvent_t event); + +/** + * \brief Computes the elapsed time between events + * + * Computes the elapsed time between two events (in milliseconds with a + * resolution of around 0.5 microseconds). + * + * If either event was last recorded in a non-NULL stream, the resulting time + * may be greater than expected (even if both used the same stream handle). This + * happens because the ::cudaEventRecord() operation takes place asynchronously + * and there is no guarantee that the measured latency is actually just between + * the two events. Any number of other different stream operations could execute + * in between the two measured events, thus altering the timing in a significant + * way. + * + * If ::cudaEventRecord() has not been called on either event, then + * ::cudaErrorInvalidResourceHandle is returned. If ::cudaEventRecord() has been + * called on both events but one or both of them has not yet been completed + * (that is, ::cudaEventQuery() would return ::cudaErrorNotReady on at least one + * of the events), ::cudaErrorNotReady is returned. If either event was created + * with the ::cudaEventDisableTiming flag, then this function will return + * ::cudaErrorInvalidResourceHandle. + * + * \param ms - Time between \p start and \p end in ms + * \param start - Starting event + * \param end - Ending event + * + * \return + * ::cudaSuccess, + * ::cudaErrorNotReady, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorLaunchFailure, + * ::cudaErrorUnknown + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa \ref ::cudaEventCreate(cudaEvent_t*) "cudaEventCreate (C API)", + * ::cudaEventCreateWithFlags, ::cudaEventQuery, + * ::cudaEventSynchronize, ::cudaEventDestroy, ::cudaEventRecord, + * ::cuEventElapsedTime + */ +extern __host__ cudaError_t CUDARTAPI cudaEventElapsedTime(float *ms, cudaEvent_t start, cudaEvent_t end); + +/** @} */ /* END CUDART_EVENT */ + +/** + * \defgroup CUDART_EXTRES_INTEROP External Resource Interoperability + * + * ___MANBRIEF___ External resource interoperability functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the external resource interoperability functions of the CUDA + * runtime application programming interface. + * + * @{ + */ + +/** + * \brief Imports an external memory object + * + * Imports an externally allocated memory object and returns + * a handle to that in \p extMem_out. + * + * The properties of the handle being imported must be described in + * \p memHandleDesc. The ::cudaExternalMemoryHandleDesc structure + * is defined as follows: + * + * \code + typedef struct cudaExternalMemoryHandleDesc_st { + cudaExternalMemoryHandleType type; + union { + int fd; + struct { + void *handle; + const void *name; + } win32; + const void *nvSciBufObject; + } handle; + unsigned long long size; + unsigned int flags; + } cudaExternalMemoryHandleDesc; + * \endcode + * + * where ::cudaExternalMemoryHandleDesc::type specifies the type + * of handle being imported. ::cudaExternalMemoryHandleType is + * defined as: + * + * \code + typedef enum cudaExternalMemoryHandleType_enum { + cudaExternalMemoryHandleTypeOpaqueFd = 1, + cudaExternalMemoryHandleTypeOpaqueWin32 = 2, + cudaExternalMemoryHandleTypeOpaqueWin32Kmt = 3, + cudaExternalMemoryHandleTypeD3D12Heap = 4, + cudaExternalMemoryHandleTypeD3D12Resource = 5, + cudaExternalMemoryHandleTypeD3D11Resource = 6, + cudaExternalMemoryHandleTypeD3D11ResourceKmt = 7, + cudaExternalMemoryHandleTypeNvSciBuf = 8 + } cudaExternalMemoryHandleType; + * \endcode + * + * If ::cudaExternalMemoryHandleDesc::type is + * ::cudaExternalMemoryHandleTypeOpaqueFd, then + * ::cudaExternalMemoryHandleDesc::handle::fd must be a valid + * file descriptor referencing a memory object. Ownership of + * the file descriptor is transferred to the CUDA driver when the + * handle is imported successfully. Performing any operations on the + * file descriptor after it is imported results in undefined behavior. + * + * If ::cudaExternalMemoryHandleDesc::type is + * ::cudaExternalMemoryHandleTypeOpaqueWin32, then exactly one + * of ::cudaExternalMemoryHandleDesc::handle::win32::handle and + * ::cudaExternalMemoryHandleDesc::handle::win32::name must not be + * NULL. If ::cudaExternalMemoryHandleDesc::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * references a memory object. Ownership of this handle is + * not transferred to CUDA after the import operation, so the + * application must release the handle using the appropriate system + * call. If ::cudaExternalMemoryHandleDesc::handle::win32::name + * is not NULL, then it must point to a NULL-terminated array of + * UTF-16 characters that refers to a memory object. + * + * If ::cudaExternalMemoryHandleDesc::type is + * ::cudaExternalMemoryHandleTypeOpaqueWin32Kmt, then + * ::cudaExternalMemoryHandleDesc::handle::win32::handle must + * be non-NULL and + * ::cudaExternalMemoryHandleDesc::handle::win32::name + * must be NULL. The handle specified must be a globally shared KMT + * handle. This handle does not hold a reference to the underlying + * object, and thus will be invalid when all references to the + * memory object are destroyed. + * + * If ::cudaExternalMemoryHandleDesc::type is + * ::cudaExternalMemoryHandleTypeD3D12Heap, then exactly one + * of ::cudaExternalMemoryHandleDesc::handle::win32::handle and + * ::cudaExternalMemoryHandleDesc::handle::win32::name must not be + * NULL. If ::cudaExternalMemoryHandleDesc::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * is returned by ID3D12Device::CreateSharedHandle when referring to a + * ID3D12Heap object. This handle holds a reference to the underlying + * object. If ::cudaExternalMemoryHandleDesc::handle::win32::name + * is not NULL, then it must point to a NULL-terminated array of + * UTF-16 characters that refers to a ID3D12Heap object. + * + * If ::cudaExternalMemoryHandleDesc::type is + * ::cudaExternalMemoryHandleTypeD3D12Resource, then exactly one + * of ::cudaExternalMemoryHandleDesc::handle::win32::handle and + * ::cudaExternalMemoryHandleDesc::handle::win32::name must not be + * NULL. If ::cudaExternalMemoryHandleDesc::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * is returned by ID3D12Device::CreateSharedHandle when referring to a + * ID3D12Resource object. This handle holds a reference to the + * underlying object. If + * ::cudaExternalMemoryHandleDesc::handle::win32::name + * is not NULL, then it must point to a NULL-terminated array of + * UTF-16 characters that refers to a ID3D12Resource object. + * + * If ::cudaExternalMemoryHandleDesc::type is + * ::cudaExternalMemoryHandleTypeD3D11Resource,then exactly one + * of ::cudaExternalMemoryHandleDesc::handle::win32::handle and + * ::cudaExternalMemoryHandleDesc::handle::win32::name must not be + * NULL. If ::cudaExternalMemoryHandleDesc::handle::win32::handle is + * not NULL, then it must represent a valid shared NT handle that is + * returned by IDXGIResource1::CreateSharedHandle when referring to a + * ID3D11Resource object. If + * ::cudaExternalMemoryHandleDesc::handle::win32::name + * is not NULL, then it must point to a NULL-terminated array of + * UTF-16 characters that refers to a ID3D11Resource object. + * + * If ::cudaExternalMemoryHandleDesc::type is + * ::cudaExternalMemoryHandleTypeD3D11ResourceKmt, then + * ::cudaExternalMemoryHandleDesc::handle::win32::handle must + * be non-NULL and ::cudaExternalMemoryHandleDesc::handle::win32::name + * must be NULL. The handle specified must be a valid shared KMT + * handle that is returned by IDXGIResource::GetSharedHandle when + * referring to a ID3D11Resource object. + * + * If ::cudaExternalMemoryHandleDesc::type is + * ::cudaExternalMemoryHandleTypeNvSciBuf, then + * ::cudaExternalMemoryHandleDesc::handle::nvSciBufObject must be NON-NULL + * and reference a valid NvSciBuf object. + * If the NvSciBuf object imported into CUDA is also mapped by other drivers, then the + * application must use ::cudaWaitExternalSemaphoresAsync or ::cudaSignalExternalSemaphoresAsync + * as approprriate barriers to maintain coherence between CUDA and the other drivers. + * See ::cudaExternalSemaphoreWaitSkipNvSciBufMemSync and ::cudaExternalSemaphoreSignalSkipNvSciBufMemSync + * for memory synchronization. + * + * The size of the memory object must be specified in + * ::cudaExternalMemoryHandleDesc::size. + * + * Specifying the flag ::cudaExternalMemoryDedicated in + * ::cudaExternalMemoryHandleDesc::flags indicates that the + * resource is a dedicated resource. The definition of what a + * dedicated resource is outside the scope of this extension. + * This flag must be set if ::cudaExternalMemoryHandleDesc::type + * is one of the following: + * ::cudaExternalMemoryHandleTypeD3D12Resource + * ::cudaExternalMemoryHandleTypeD3D11Resource + * ::cudaExternalMemoryHandleTypeD3D11ResourceKmt + * + * \param extMem_out - Returned handle to an external memory object + * \param memHandleDesc - Memory import handle descriptor + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorOperatingSystem + * \notefnerr + * \note_init_rt + * \note_callback + * + * \note If the Vulkan memory imported into CUDA is mapped on the CPU then the + * application must use vkInvalidateMappedMemoryRanges/vkFlushMappedMemoryRanges + * as well as appropriate Vulkan pipeline barriers to maintain coherence between + * CPU and GPU. For more information on these APIs, please refer to "Synchronization + * and Cache Control" chapter from Vulkan specification. + * + * + * \sa ::cudaDestroyExternalMemory, + * ::cudaExternalMemoryGetMappedBuffer, + * ::cudaExternalMemoryGetMappedMipmappedArray + */ +extern __host__ cudaError_t CUDARTAPI cudaImportExternalMemory(cudaExternalMemory_t *extMem_out, const struct cudaExternalMemoryHandleDesc *memHandleDesc); + +/** + * \brief Maps a buffer onto an imported memory object + * + * Maps a buffer onto an imported memory object and returns a device + * pointer in \p devPtr. + * + * The properties of the buffer being mapped must be described in + * \p bufferDesc. The ::cudaExternalMemoryBufferDesc structure is + * defined as follows: + * + * \code + typedef struct cudaExternalMemoryBufferDesc_st { + unsigned long long offset; + unsigned long long size; + unsigned int flags; + } cudaExternalMemoryBufferDesc; + * \endcode + * + * where ::cudaExternalMemoryBufferDesc::offset is the offset in + * the memory object where the buffer's base address is. + * ::cudaExternalMemoryBufferDesc::size is the size of the buffer. + * ::cudaExternalMemoryBufferDesc::flags must be zero. + * + * The offset and size have to be suitably aligned to match the + * requirements of the external API. Mapping two buffers whose ranges + * overlap may or may not result in the same virtual address being + * returned for the overlapped portion. In such cases, the application + * must ensure that all accesses to that region from the GPU are + * volatile. Otherwise writes made via one address are not guaranteed + * to be visible via the other address, even if they're issued by the + * same thread. It is recommended that applications map the combined + * range instead of mapping separate buffers and then apply the + * appropriate offsets to the returned pointer to derive the + * individual buffers. + * + * The returned pointer \p devPtr must be freed using ::cudaFree. + * + * \param devPtr - Returned device pointer to buffer + * \param extMem - Handle to external memory object + * \param bufferDesc - Buffer descriptor + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaImportExternalMemory, + * ::cudaDestroyExternalMemory, + * ::cudaExternalMemoryGetMappedMipmappedArray + */ +extern __host__ cudaError_t CUDARTAPI cudaExternalMemoryGetMappedBuffer(void **devPtr, cudaExternalMemory_t extMem, const struct cudaExternalMemoryBufferDesc *bufferDesc); + +/** + * \brief Maps a CUDA mipmapped array onto an external memory object + * + * Maps a CUDA mipmapped array onto an external object and returns a + * handle to it in \p mipmap. + * + * The properties of the CUDA mipmapped array being mapped must be + * described in \p mipmapDesc. The structure + * ::cudaExternalMemoryMipmappedArrayDesc is defined as follows: + * + * \code + typedef struct cudaExternalMemoryMipmappedArrayDesc_st { + unsigned long long offset; + cudaChannelFormatDesc formatDesc; + cudaExtent extent; + unsigned int flags; + unsigned int numLevels; + } cudaExternalMemoryMipmappedArrayDesc; + * \endcode + * + * where ::cudaExternalMemoryMipmappedArrayDesc::offset is the + * offset in the memory object where the base level of the mipmap + * chain is. + * ::cudaExternalMemoryMipmappedArrayDesc::formatDesc describes the + * format of the data. + * ::cudaExternalMemoryMipmappedArrayDesc::extent specifies the + * dimensions of the base level of the mipmap chain. + * ::cudaExternalMemoryMipmappedArrayDesc::flags are flags associated + * with CUDA mipmapped arrays. For further details, please refer to + * the documentation for ::cudaMalloc3DArray. Note that if the mipmapped + * array is bound as a color target in the graphics API, then the flag + * ::cudaArrayColorAttachment must be specified in + * ::cudaExternalMemoryMipmappedArrayDesc::flags. + * ::cudaExternalMemoryMipmappedArrayDesc::numLevels specifies + * the total number of levels in the mipmap chain. + * + * The returned CUDA mipmapped array must be freed using ::cudaFreeMipmappedArray. + * + * \param mipmap - Returned CUDA mipmapped array + * \param extMem - Handle to external memory object + * \param mipmapDesc - CUDA array descriptor + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaImportExternalMemory, + * ::cudaDestroyExternalMemory, + * ::cudaExternalMemoryGetMappedBuffer + * + * \note If ::cudaExternalMemoryHandleDesc::type is + * ::cudaExternalMemoryHandleTypeNvSciBuf, then + * ::cudaExternalMemoryMipmappedArrayDesc::numLevels must not be greater than 1. + */ +extern __host__ cudaError_t CUDARTAPI cudaExternalMemoryGetMappedMipmappedArray(cudaMipmappedArray_t *mipmap, cudaExternalMemory_t extMem, const struct cudaExternalMemoryMipmappedArrayDesc *mipmapDesc); + +/** + * \brief Destroys an external memory object. + * + * Destroys the specified external memory object. Any existing buffers + * and CUDA mipmapped arrays mapped onto this object must no longer be + * used and must be explicitly freed using ::cudaFree and + * ::cudaFreeMipmappedArray respectively. + * + * \param extMem - External memory object to be destroyed + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * \note_init_rt + * \note_callback + * \note_destroy_ub + * + * \sa ::cudaImportExternalMemory, + * ::cudaExternalMemoryGetMappedBuffer, + * ::cudaExternalMemoryGetMappedMipmappedArray + */ +extern __host__ cudaError_t CUDARTAPI cudaDestroyExternalMemory(cudaExternalMemory_t extMem); + +/** + * \brief Imports an external semaphore + * + * Imports an externally allocated synchronization object and returns + * a handle to that in \p extSem_out. + * + * The properties of the handle being imported must be described in + * \p semHandleDesc. The ::cudaExternalSemaphoreHandleDesc is defined + * as follows: + * + * \code + typedef struct cudaExternalSemaphoreHandleDesc_st { + cudaExternalSemaphoreHandleType type; + union { + int fd; + struct { + void *handle; + const void *name; + } win32; + const void* NvSciSyncObj; + } handle; + unsigned int flags; + } cudaExternalSemaphoreHandleDesc; + * \endcode + * + * where ::cudaExternalSemaphoreHandleDesc::type specifies the type of + * handle being imported. ::cudaExternalSemaphoreHandleType is defined + * as: + * + * \code + typedef enum cudaExternalSemaphoreHandleType_enum { + cudaExternalSemaphoreHandleTypeOpaqueFd = 1, + cudaExternalSemaphoreHandleTypeOpaqueWin32 = 2, + cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt = 3, + cudaExternalSemaphoreHandleTypeD3D12Fence = 4, + cudaExternalSemaphoreHandleTypeD3D11Fence = 5, + cudaExternalSemaphoreHandleTypeNvSciSync = 6, + cudaExternalSemaphoreHandleTypeKeyedMutex = 7, + cudaExternalSemaphoreHandleTypeKeyedMutexKmt = 8, + cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd = 9, + cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32 = 10 + } cudaExternalSemaphoreHandleType; + * \endcode + * + * If ::cudaExternalSemaphoreHandleDesc::type is + * ::cudaExternalSemaphoreHandleTypeOpaqueFd, then + * ::cudaExternalSemaphoreHandleDesc::handle::fd must be a valid file + * descriptor referencing a synchronization object. Ownership of the + * file descriptor is transferred to the CUDA driver when the handle + * is imported successfully. Performing any operations on the file + * descriptor after it is imported results in undefined behavior. + * + * If ::cudaExternalSemaphoreHandleDesc::type is + * ::cudaExternalSemaphoreHandleTypeOpaqueWin32, then exactly one of + * ::cudaExternalSemaphoreHandleDesc::handle::win32::handle and + * ::cudaExternalSemaphoreHandleDesc::handle::win32::name must not be + * NULL. If ::cudaExternalSemaphoreHandleDesc::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * references a synchronization object. Ownership of this handle is + * not transferred to CUDA after the import operation, so the + * application must release the handle using the appropriate system + * call. If ::cudaExternalSemaphoreHandleDesc::handle::win32::name is + * not NULL, then it must name a valid synchronization object. + * + * If ::cudaExternalSemaphoreHandleDesc::type is + * ::cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt, then + * ::cudaExternalSemaphoreHandleDesc::handle::win32::handle must be + * non-NULL and ::cudaExternalSemaphoreHandleDesc::handle::win32::name + * must be NULL. The handle specified must be a globally shared KMT + * handle. This handle does not hold a reference to the underlying + * object, and thus will be invalid when all references to the + * synchronization object are destroyed. + * + * If ::cudaExternalSemaphoreHandleDesc::type is + * ::cudaExternalSemaphoreHandleTypeD3D12Fence, then exactly one of + * ::cudaExternalSemaphoreHandleDesc::handle::win32::handle and + * ::cudaExternalSemaphoreHandleDesc::handle::win32::name must not be + * NULL. If ::cudaExternalSemaphoreHandleDesc::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * is returned by ID3D12Device::CreateSharedHandle when referring to a + * ID3D12Fence object. This handle holds a reference to the underlying + * object. If ::cudaExternalSemaphoreHandleDesc::handle::win32::name + * is not NULL, then it must name a valid synchronization object that + * refers to a valid ID3D12Fence object. + * + * If ::cudaExternalSemaphoreHandleDesc::type is + * ::cudaExternalSemaphoreHandleTypeD3D11Fence, then exactly one of + * ::cudaExternalSemaphoreHandleDesc::handle::win32::handle and + * ::cudaExternalSemaphoreHandleDesc::handle::win32::name must not be + * NULL. If ::cudaExternalSemaphoreHandleDesc::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * is returned by ID3D11Fence::CreateSharedHandle. If + * ::cudaExternalSemaphoreHandleDesc::handle::win32::name + * is not NULL, then it must name a valid synchronization object that + * refers to a valid ID3D11Fence object. + * + * If ::cudaExternalSemaphoreHandleDesc::type is + * ::cudaExternalSemaphoreHandleTypeNvSciSync, then + * ::cudaExternalSemaphoreHandleDesc::handle::nvSciSyncObj + * represents a valid NvSciSyncObj. + * + * ::cudaExternalSemaphoreHandleTypeKeyedMutex, then exactly one of + * ::cudaExternalSemaphoreHandleDesc::handle::win32::handle and + * ::cudaExternalSemaphoreHandleDesc::handle::win32::name must not be + * NULL. If ::cudaExternalSemaphoreHandleDesc::handle::win32::handle + * is not NULL, then it represent a valid shared NT handle that + * is returned by IDXGIResource1::CreateSharedHandle when referring to + * a IDXGIKeyedMutex object. + * + * If ::cudaExternalSemaphoreHandleDesc::type is + * ::cudaExternalSemaphoreHandleTypeKeyedMutexKmt, then + * ::cudaExternalSemaphoreHandleDesc::handle::win32::handle must be + * non-NULL and ::cudaExternalSemaphoreHandleDesc::handle::win32::name + * must be NULL. The handle specified must represent a valid KMT + * handle that is returned by IDXGIResource::GetSharedHandle when + * referring to a IDXGIKeyedMutex object. + * + * If ::cudaExternalSemaphoreHandleDesc::type is + * ::cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd, then + * ::cudaExternalSemaphoreHandleDesc::handle::fd must be a valid file + * descriptor referencing a synchronization object. Ownership of the + * file descriptor is transferred to the CUDA driver when the handle + * is imported successfully. Performing any operations on the file + * descriptor after it is imported results in undefined behavior. + * + * If ::cudaExternalSemaphoreHandleDesc::type is + * ::cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32, then exactly one of + * ::cudaExternalSemaphoreHandleDesc::handle::win32::handle and + * ::cudaExternalSemaphoreHandleDesc::handle::win32::name must not be + * NULL. If ::cudaExternalSemaphoreHandleDesc::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * references a synchronization object. Ownership of this handle is + * not transferred to CUDA after the import operation, so the + * application must release the handle using the appropriate system + * call. If ::cudaExternalSemaphoreHandleDesc::handle::win32::name is + * not NULL, then it must name a valid synchronization object. + * + * \param extSem_out - Returned handle to an external semaphore + * \param semHandleDesc - Semaphore import handle descriptor + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorOperatingSystem + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDestroyExternalSemaphore, + * ::cudaSignalExternalSemaphoresAsync, + * ::cudaWaitExternalSemaphoresAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaImportExternalSemaphore(cudaExternalSemaphore_t *extSem_out, const struct cudaExternalSemaphoreHandleDesc *semHandleDesc); + +/** + * \brief Signals a set of external semaphore objects + * + * Enqueues a signal operation on a set of externally allocated + * semaphore object in the specified stream. The operations will be + * executed when all prior operations in the stream complete. + * + * The exact semantics of signaling a semaphore depends on the type of + * the object. + * + * If the semaphore object is any one of the following types: + * ::cudaExternalSemaphoreHandleTypeOpaqueFd, + * ::cudaExternalSemaphoreHandleTypeOpaqueWin32, + * ::cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt + * then signaling the semaphore will set it to the signaled state. + * + * If the semaphore object is any one of the following types: + * ::cudaExternalSemaphoreHandleTypeD3D12Fence, + * ::cudaExternalSemaphoreHandleTypeD3D11Fence, + * ::cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd, + * ::cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32 + * then the semaphore will be set to the value specified in + * ::cudaExternalSemaphoreSignalParams::params::fence::value. + * + * If the semaphore object is of the type ::cudaExternalSemaphoreHandleTypeNvSciSync + * this API sets ::cudaExternalSemaphoreSignalParams::params::nvSciSync::fence to a + * value that can be used by subsequent waiters of the same NvSciSync object to + * order operations with those currently submitted in \p stream. Such an update + * will overwrite previous contents of + * ::cudaExternalSemaphoreSignalParams::params::nvSciSync::fence. By default, + * signaling such an external semaphore object causes appropriate memory synchronization + * operations to be performed over all the external memory objects that are imported as + * ::cudaExternalMemoryHandleTypeNvSciBuf. This ensures that any subsequent accesses + * made by other importers of the same set of NvSciBuf memory object(s) are coherent. + * These operations can be skipped by specifying the flag + * ::cudaExternalSemaphoreSignalSkipNvSciBufMemSync, which can be used as a + * performance optimization when data coherency is not required. But specifying this + * flag in scenarios where data coherency is required results in undefined behavior. + * Also, for semaphore object of the type ::cudaExternalSemaphoreHandleTypeNvSciSync, + * if the NvSciSyncAttrList used to create the NvSciSyncObj had not set the flags in + * ::cudaDeviceGetNvSciSyncAttributes to cudaNvSciSyncAttrSignal, this API will return + * cudaErrorNotSupported. + * + * ::cudaExternalSemaphoreSignalParams::params::nvSciSync::fence associated with + * semaphore object of the type ::cudaExternalSemaphoreHandleTypeNvSciSync can be + * deterministic. For this the NvSciSyncAttrList used to create the semaphore object + * must have value of NvSciSyncAttrKey_RequireDeterministicFences key set to true. + * Deterministic fences allow users to enqueue a wait over the semaphore object even + * before corresponding signal is enqueued. For such a semaphore object, CUDA guarantees + * that each signal operation will increment the fence value by '1'. Users are expected + * to track count of signals enqueued on the semaphore object and insert waits accordingly. + * When such a semaphore object is signaled from multiple streams, due to concurrent + * stream execution, it is possible that the order in which the semaphore gets signaled + * is indeterministic. This could lead to waiters of the semaphore getting unblocked + * incorrectly. Users are expected to handle such situations, either by not using the + * same semaphore object with deterministic fence support enabled in different streams + * or by adding explicit dependency amongst such streams so that the semaphore is + * signaled in order. + * + * If the semaphore object is any one of the following types: + * ::cudaExternalSemaphoreHandleTypeKeyedMutex, + * ::cudaExternalSemaphoreHandleTypeKeyedMutexKmt, + * then the keyed mutex will be released with the key specified in + * ::cudaExternalSemaphoreSignalParams::params::keyedmutex::key. + * + * \param extSemArray - Set of external semaphores to be signaled + * \param paramsArray - Array of semaphore parameters + * \param numExtSems - Number of semaphores to signal + * \param stream - Stream to enqueue the signal operations in + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaImportExternalSemaphore, + * ::cudaDestroyExternalSemaphore, + * ::cudaWaitExternalSemaphoresAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaSignalExternalSemaphoresAsync(const cudaExternalSemaphore_t *extSemArray, const struct cudaExternalSemaphoreSignalParams *paramsArray, unsigned int numExtSems, cudaStream_t stream __dv(0)); + +/** + * \brief Waits on a set of external semaphore objects + * + * Enqueues a wait operation on a set of externally allocated + * semaphore object in the specified stream. The operations will be + * executed when all prior operations in the stream complete. + * + * The exact semantics of waiting on a semaphore depends on the type + * of the object. + * + * If the semaphore object is any one of the following types: + * ::cudaExternalSemaphoreHandleTypeOpaqueFd, + * ::cudaExternalSemaphoreHandleTypeOpaqueWin32, + * ::cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt + * then waiting on the semaphore will wait until the semaphore reaches + * the signaled state. The semaphore will then be reset to the + * unsignaled state. Therefore for every signal operation, there can + * only be one wait operation. + * + * If the semaphore object is any one of the following types: + * ::cudaExternalSemaphoreHandleTypeD3D12Fence, + * ::cudaExternalSemaphoreHandleTypeD3D11Fence, + * ::cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd, + * ::cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32 + * then waiting on the semaphore will wait until the value of the + * semaphore is greater than or equal to + * ::cudaExternalSemaphoreWaitParams::params::fence::value. + * + * If the semaphore object is of the type ::cudaExternalSemaphoreHandleTypeNvSciSync + * then, waiting on the semaphore will wait until the + * ::cudaExternalSemaphoreSignalParams::params::nvSciSync::fence is signaled by the + * signaler of the NvSciSyncObj that was associated with this semaphore object. + * By default, waiting on such an external semaphore object causes appropriate + * memory synchronization operations to be performed over all external memory objects + * that are imported as ::cudaExternalMemoryHandleTypeNvSciBuf. This ensures that + * any subsequent accesses made by other importers of the same set of NvSciBuf memory + * object(s) are coherent. These operations can be skipped by specifying the flag + * ::cudaExternalSemaphoreWaitSkipNvSciBufMemSync, which can be used as a + * performance optimization when data coherency is not required. But specifying this + * flag in scenarios where data coherency is required results in undefined behavior. + * Also, for semaphore object of the type ::cudaExternalSemaphoreHandleTypeNvSciSync, + * if the NvSciSyncAttrList used to create the NvSciSyncObj had not set the flags in + * ::cudaDeviceGetNvSciSyncAttributes to cudaNvSciSyncAttrWait, this API will return + * cudaErrorNotSupported. + * + * If the semaphore object is any one of the following types: + * ::cudaExternalSemaphoreHandleTypeKeyedMutex, + * ::cudaExternalSemaphoreHandleTypeKeyedMutexKmt, + * then the keyed mutex will be acquired when it is released with the key specified + * in ::cudaExternalSemaphoreSignalParams::params::keyedmutex::key or + * until the timeout specified by + * ::cudaExternalSemaphoreSignalParams::params::keyedmutex::timeoutMs + * has lapsed. The timeout interval can either be a finite value + * specified in milliseconds or an infinite value. In case an infinite + * value is specified the timeout never elapses. The windows INFINITE + * macro must be used to specify infinite timeout + * + * \param extSemArray - External semaphores to be waited on + * \param paramsArray - Array of semaphore parameters + * \param numExtSems - Number of semaphores to wait on + * \param stream - Stream to enqueue the wait operations in + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle + * ::cudaErrorTimeout + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaImportExternalSemaphore, + * ::cudaDestroyExternalSemaphore, + * ::cudaSignalExternalSemaphoresAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaWaitExternalSemaphoresAsync(const cudaExternalSemaphore_t *extSemArray, const struct cudaExternalSemaphoreWaitParams *paramsArray, unsigned int numExtSems, cudaStream_t stream __dv(0)); + +/** + * \brief Destroys an external semaphore + * + * Destroys an external semaphore object and releases any references + * to the underlying resource. Any outstanding signals or waits must + * have completed before the semaphore is destroyed. + * + * \param extSem - External semaphore to be destroyed + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * \note_init_rt + * \note_callback + * \note_destroy_ub + * + * \sa ::cudaImportExternalSemaphore, + * ::cudaSignalExternalSemaphoresAsync, + * ::cudaWaitExternalSemaphoresAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaDestroyExternalSemaphore(cudaExternalSemaphore_t extSem); + +/** @} */ /* END CUDART_EXTRES_INTEROP */ + +/** + * \defgroup CUDART_EXECUTION Execution Control + * + * ___MANBRIEF___ execution control functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the execution control functions of the CUDA runtime + * application programming interface. + * + * Some functions have overloaded C++ API template versions documented separately in the + * \ref CUDART_HIGHLEVEL "C++ API Routines" module. + * + * @{ + */ + +/** + * \brief Launches a device function + * + * The function invokes kernel \p func on \p gridDim (\p gridDim.x × \p gridDim.y + * × \p gridDim.z) grid of blocks. Each block contains \p blockDim (\p blockDim.x × + * \p blockDim.y × \p blockDim.z) threads. + * + * If the kernel has N parameters the \p args should point to array of N pointers. + * Each pointer, from args[0] to args[N - 1], point to the region + * of memory from which the actual parameter will be copied. + * + * For templated functions, pass the function symbol as follows: + * func_name + * + * \p sharedMem sets the amount of dynamic shared memory that will be available to + * each thread block. + * + * \p stream specifies a stream the invocation is associated to. + * + * \param func - Device function symbol + * \param gridDim - Grid dimentions + * \param blockDim - Block dimentions + * \param args - Arguments + * \param sharedMem - Shared memory + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidConfiguration, + * ::cudaErrorLaunchFailure, + * ::cudaErrorLaunchTimeout, + * ::cudaErrorLaunchOutOfResources, + * ::cudaErrorSharedObjectInitFailed, + * ::cudaErrorInvalidPtx, + * ::cudaErrorUnsupportedPtxVersion, + * ::cudaErrorNoKernelImageForDevice, + * ::cudaErrorJitCompilerNotFound, + * ::cudaErrorJitCompilationDisabled + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * \ref ::cudaLaunchKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C++ API)", + * ::cuLaunchKernel + */ +extern __host__ cudaError_t CUDARTAPI cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream); + +/** + * \brief Launches a CUDA function with launch-time configuration + * + * Note that the functionally equivalent variadic template ::cudaLaunchKernelEx + * is available for C++11 and newer. + * + * Invokes the kernel \p func on \p config->gridDim (\p config->gridDim.x + * × \p config->gridDim.y × \p config->gridDim.z) grid of blocks. + * Each block contains \p config->blockDim (\p config->blockDim.x × + * \p config->blockDim.y × \p config->blockDim.z) threads. + * + * \p config->dynamicSmemBytes sets the amount of dynamic shared memory that + * will be available to each thread block. + * + * \p config->stream specifies a stream the invocation is associated to. + * + * Configuration beyond grid and block dimensions, dynamic shared memory size, + * and stream can be provided with the following two fields of \p config: + * + * \p config->attrs is an array of \p config->numAttrs contiguous + * ::cudaLaunchAttribute elements. The value of this pointer is not considered + * if \p config->numAttrs is zero. However, in that case, it is recommended to + * set the pointer to NULL. + * \p config->numAttrs is the number of attributes populating the first + * \p config->numAttrs positions of the \p config->attrs array. + * + * If the kernel has N parameters the \p args should point to array of N + * pointers. Each pointer, from args[0] to args[N - 1], point + * to the region of memory from which the actual parameter will be copied. + * + * N.B. This function is so named to avoid unintentionally invoking the + * templated version, \p cudaLaunchKernelEx, for kernels taking a single + * void** or void* parameter. + * + * \param config - Launch configuration + * \param func - Kernel to launch + * \param args - Array of pointers to kernel parameters + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidConfiguration, + * ::cudaErrorLaunchFailure, + * ::cudaErrorLaunchTimeout, + * ::cudaErrorLaunchOutOfResources, + * ::cudaErrorSharedObjectInitFailed, + * ::cudaErrorInvalidPtx, + * ::cudaErrorUnsupportedPtxVersion, + * ::cudaErrorNoKernelImageForDevice, + * ::cudaErrorJitCompilerNotFound, + * ::cudaErrorJitCompilationDisabled + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * \ref ::cudaLaunchKernelEx(const cudaLaunchConfig_t *config, void (*kernel)(ExpTypes...), ActTypes &&... args) "cudaLaunchKernelEx (C++ API)", + * ::cuLaunchKernelEx + */ +extern __host__ cudaError_t CUDARTAPI cudaLaunchKernelExC(const cudaLaunchConfig_t *config, const void *func, void **args); + +/** + * \brief Launches a device function where thread blocks can cooperate and synchronize as they execute + * + * The function invokes kernel \p func on \p gridDim (\p gridDim.x × \p gridDim.y + * × \p gridDim.z) grid of blocks. Each block contains \p blockDim (\p blockDim.x × + * \p blockDim.y × \p blockDim.z) threads. + * + * The device on which this kernel is invoked must have a non-zero value for + * the device attribute ::cudaDevAttrCooperativeLaunch. + * + * The total number of blocks launched cannot exceed the maximum number of blocks per + * multiprocessor as returned by ::cudaOccupancyMaxActiveBlocksPerMultiprocessor (or + * ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times the number of multiprocessors + * as specified by the device attribute ::cudaDevAttrMultiProcessorCount. + * + * The kernel cannot make use of CUDA dynamic parallelism. + * + * If the kernel has N parameters the \p args should point to array of N pointers. + * Each pointer, from args[0] to args[N - 1], point to the region + * of memory from which the actual parameter will be copied. + * + * For templated functions, pass the function symbol as follows: + * func_name + * + * \p sharedMem sets the amount of dynamic shared memory that will be available to + * each thread block. + * + * \p stream specifies a stream the invocation is associated to. + * + * \param func - Device function symbol + * \param gridDim - Grid dimentions + * \param blockDim - Block dimentions + * \param args - Arguments + * \param sharedMem - Shared memory + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidConfiguration, + * ::cudaErrorLaunchFailure, + * ::cudaErrorLaunchTimeout, + * ::cudaErrorLaunchOutOfResources, + * ::cudaErrorCooperativeLaunchTooLarge, + * ::cudaErrorSharedObjectInitFailed + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * \ref ::cudaLaunchCooperativeKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchCooperativeKernel (C++ API)", + * ::cudaLaunchCooperativeKernelMultiDevice, + * ::cuLaunchCooperativeKernel + */ +extern __host__ cudaError_t CUDARTAPI cudaLaunchCooperativeKernel(const void *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream); + +/** + * \brief Launches device functions on multiple devices where thread blocks can cooperate and synchronize as they execute + * + * \deprecated This function is deprecated as of CUDA 11.3. + * + * Invokes kernels as specified in the \p launchParamsList array where each element + * of the array specifies all the parameters required to perform a single kernel launch. + * These kernels can cooperate and synchronize as they execute. The size of the array is + * specified by \p numDevices. + * + * No two kernels can be launched on the same device. All the devices targeted by this + * multi-device launch must be identical. All devices must have a non-zero value for the + * device attribute ::cudaDevAttrCooperativeMultiDeviceLaunch. + * + * The same kernel must be launched on all devices. Note that any __device__ or __constant__ + * variables are independently instantiated on every device. It is the application's + * responsiblity to ensure these variables are initialized and used appropriately. + * + * The size of the grids as specified in blocks, the size of the blocks themselves and the + * amount of shared memory used by each thread block must also match across all launched kernels. + * + * The streams used to launch these kernels must have been created via either ::cudaStreamCreate + * or ::cudaStreamCreateWithPriority or ::cudaStreamCreateWithPriority. The NULL stream or + * ::cudaStreamLegacy or ::cudaStreamPerThread cannot be used. + * + * The total number of blocks launched per kernel cannot exceed the maximum number of blocks + * per multiprocessor as returned by ::cudaOccupancyMaxActiveBlocksPerMultiprocessor (or + * ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times the number of multiprocessors + * as specified by the device attribute ::cudaDevAttrMultiProcessorCount. Since the + * total number of blocks launched per device has to match across all devices, the maximum + * number of blocks that can be launched per device will be limited by the device with the + * least number of multiprocessors. + * + * The kernel cannot make use of CUDA dynamic parallelism. + * + * The ::cudaLaunchParams structure is defined as: + * \code + struct cudaLaunchParams + { + void *func; + dim3 gridDim; + dim3 blockDim; + void **args; + size_t sharedMem; + cudaStream_t stream; + }; + * \endcode + * where: + * - ::cudaLaunchParams::func specifies the kernel to be launched. This same functions must + * be launched on all devices. For templated functions, pass the function symbol as follows: + * func_name + * - ::cudaLaunchParams::gridDim specifies the width, height and depth of the grid in blocks. + * This must match across all kernels launched. + * - ::cudaLaunchParams::blockDim is the width, height and depth of each thread block. This + * must match across all kernels launched. + * - ::cudaLaunchParams::args specifies the arguments to the kernel. If the kernel has + * N parameters then ::cudaLaunchParams::args should point to array of N pointers. Each + * pointer, from ::cudaLaunchParams::args[0] to ::cudaLaunchParams::args[N - 1], + * point to the region of memory from which the actual parameter will be copied. + * - ::cudaLaunchParams::sharedMem is the dynamic shared-memory size per thread block in bytes. + * This must match across all kernels launched. + * - ::cudaLaunchParams::stream is the handle to the stream to perform the launch in. This cannot + * be the NULL stream or ::cudaStreamLegacy or ::cudaStreamPerThread. + * + * By default, the kernel won't begin execution on any GPU until all prior work in all the specified + * streams has completed. This behavior can be overridden by specifying the flag + * ::cudaCooperativeLaunchMultiDeviceNoPreSync. When this flag is specified, each kernel + * will only wait for prior work in the stream corresponding to that GPU to complete before it begins + * execution. + * + * Similarly, by default, any subsequent work pushed in any of the specified streams will not begin + * execution until the kernels on all GPUs have completed. This behavior can be overridden by specifying + * the flag ::cudaCooperativeLaunchMultiDeviceNoPostSync. When this flag is specified, + * any subsequent work pushed in any of the specified streams will only wait for the kernel launched + * on the GPU corresponding to that stream to complete before it begins execution. + * + * \param launchParamsList - List of launch parameters, one per device + * \param numDevices - Size of the \p launchParamsList array + * \param flags - Flags to control launch behavior + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidConfiguration, + * ::cudaErrorLaunchFailure, + * ::cudaErrorLaunchTimeout, + * ::cudaErrorLaunchOutOfResources, + * ::cudaErrorCooperativeLaunchTooLarge, + * ::cudaErrorSharedObjectInitFailed + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * \ref ::cudaLaunchCooperativeKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchCooperativeKernel (C++ API)", + * ::cudaLaunchCooperativeKernel, + * ::cuLaunchCooperativeKernelMultiDevice + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaLaunchCooperativeKernelMultiDevice(struct cudaLaunchParams *launchParamsList, unsigned int numDevices, unsigned int flags __dv(0)); + +/** + * \brief Sets the preferred cache configuration for a device function + * + * On devices where the L1 cache and shared memory use the same hardware + * resources, this sets through \p cacheConfig the preferred cache configuration + * for the function specified via \p func. This is only a preference. The + * runtime will use the requested configuration if possible, but it is free to + * choose a different configuration if required to execute \p func. + * + * \p func is a device function symbol and must be declared as a + * \c __global__ function. If the specified function does not exist, + * then ::cudaErrorInvalidDeviceFunction is returned. For templated functions, + * pass the function symbol as follows: func_name + * + * This setting does nothing on devices where the size of the L1 cache and + * shared memory are fixed. + * + * Launching a kernel with a different preference than the most recent + * preference setting may insert a device-side synchronization point. + * + * The supported cache configurations are: + * - ::cudaFuncCachePreferNone: no preference for shared memory or L1 (default) + * - ::cudaFuncCachePreferShared: prefer larger shared memory and smaller L1 cache + * - ::cudaFuncCachePreferL1: prefer larger L1 cache and smaller shared memory + * - ::cudaFuncCachePreferEqual: prefer equal size L1 cache and shared memory + * + * \param func - Device function symbol + * \param cacheConfig - Requested cache configuration + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction + * \notefnerr + * \note_string_api_deprecation2 + * \note_init_rt + * \note_callback + * + * \sa + * \ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C++ API)", + * \ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, const void*) "cudaFuncGetAttributes (C API)", + * \ref ::cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C API)", + * ::cuFuncSetCacheConfig + */ +extern __host__ cudaError_t CUDARTAPI cudaFuncSetCacheConfig(const void *func, enum cudaFuncCache cacheConfig); + +/** + * \brief Sets the shared memory configuration for a device function + * + * On devices with configurable shared memory banks, this function will + * force all subsequent launches of the specified device function to have + * the given shared memory bank size configuration. On any given launch of the + * function, the shared memory configuration of the device will be temporarily + * changed if needed to suit the function's preferred configuration. Changes in + * shared memory configuration between subsequent launches of functions, + * may introduce a device side synchronization point. + * + * Any per-function setting of shared memory bank size set via + * ::cudaFuncSetSharedMemConfig will override the device wide setting set by + * ::cudaDeviceSetSharedMemConfig. + * + * Changing the shared memory bank size will not increase shared memory usage + * or affect occupancy of kernels, but may have major effects on performance. + * Larger bank sizes will allow for greater potential bandwidth to shared memory, + * but will change what kinds of accesses to shared memory will result in bank + * conflicts. + * + * This function will do nothing on devices with fixed shared memory bank size. + * + * For templated functions, pass the function symbol as follows: + * func_name + * + * The supported bank configurations are: + * - ::cudaSharedMemBankSizeDefault: use the device's shared memory configuration + * when launching this function. + * - ::cudaSharedMemBankSizeFourByte: set shared memory bank width to be + * four bytes natively when launching this function. + * - ::cudaSharedMemBankSizeEightByte: set shared memory bank width to be eight + * bytes natively when launching this function. + * + * \param func - Device function symbol + * \param config - Requested shared memory configuration + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * \notefnerr + * \note_string_api_deprecation2 + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceSetSharedMemConfig, + * ::cudaDeviceGetSharedMemConfig, + * ::cudaDeviceSetCacheConfig, + * ::cudaDeviceGetCacheConfig, + * ::cudaFuncSetCacheConfig, + * ::cuFuncSetSharedMemConfig + */ +extern __host__ cudaError_t CUDARTAPI cudaFuncSetSharedMemConfig(const void *func, enum cudaSharedMemConfig config); + +/** + * \brief Find out attributes for a given function + * + * This function obtains the attributes of a function specified via \p func. + * \p func is a device function symbol and must be declared as a + * \c __global__ function. The fetched attributes are placed in \p attr. + * If the specified function does not exist, then + * ::cudaErrorInvalidDeviceFunction is returned. For templated functions, pass + * the function symbol as follows: func_name + * + * Note that some function attributes such as + * \ref ::cudaFuncAttributes::maxThreadsPerBlock "maxThreadsPerBlock" + * may vary based on the device that is currently being used. + * + * \param attr - Return pointer to function's attributes + * \param func - Device function symbol + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction + * \notefnerr + * \note_string_api_deprecation2 + * \note_init_rt + * \note_callback + * + * \sa + * \ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C API)", + * \ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, T*) "cudaFuncGetAttributes (C++ API)", + * \ref ::cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C API)", + * ::cuFuncGetAttribute + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaFuncGetAttributes(struct cudaFuncAttributes *attr, const void *func); + + +/** + * \brief Set attributes for a given function + * + * This function sets the attributes of a function specified via \p func. + * The parameter \p func must be a pointer to a function that executes + * on the device. The parameter specified by \p func must be declared as a \p __global__ + * function. The enumeration defined by \p attr is set to the value defined by \p value. + * If the specified function does not exist, then ::cudaErrorInvalidDeviceFunction is returned. + * If the specified attribute cannot be written, or if the value is incorrect, + * then ::cudaErrorInvalidValue is returned. + * + * Valid values for \p attr are: + * - ::cudaFuncAttributeMaxDynamicSharedMemorySize - The requested maximum size in bytes of dynamically-allocated shared memory. The sum of this value and the function attribute ::sharedSizeBytes + * cannot exceed the device attribute ::cudaDevAttrMaxSharedMemoryPerBlockOptin. The maximal size of requestable dynamic shared memory may differ by GPU architecture. + * - ::cudaFuncAttributePreferredSharedMemoryCarveout - On devices where the L1 cache and shared memory use the same hardware resources, + * this sets the shared memory carveout preference, in percent of the total shared memory. See ::cudaDevAttrMaxSharedMemoryPerMultiprocessor. + * This is only a hint, and the driver can choose a different ratio if required to execute the function. + * + * \param func - Function to get attributes of + * \param attr - Attribute to set + * \param value - Value to set + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \ref ::cudaLaunchKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C++ API)", + * \ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C++ API)", + * \ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, const void*) "cudaFuncGetAttributes (C API)", + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaFuncSetAttribute(const void *func, enum cudaFuncAttribute attr, int value); + +/** + * \brief Converts a double argument to be executed on a device + * + * \param d - Double to convert + * + * \deprecated This function is deprecated as of CUDA 7.5 + * + * Converts the double value of \p d to an internal float representation if + * the device does not support double arithmetic. If the device does natively + * support doubles, then this function does nothing. + * + * \return + * ::cudaSuccess + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * \ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C API)", + * \ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, const void*) "cudaFuncGetAttributes (C API)", + * ::cudaSetDoubleForHost + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaSetDoubleForDevice(double *d); + +/** + * \brief Converts a double argument after execution on a device + * + * \deprecated This function is deprecated as of CUDA 7.5 + * + * Converts the double value of \p d from a potentially internal float + * representation if the device does not support double arithmetic. If the + * device does natively support doubles, then this function does nothing. + * + * \param d - Double to convert + * + * \return + * ::cudaSuccess + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * \ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C API)", + * \ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, const void*) "cudaFuncGetAttributes (C API)", + * ::cudaSetDoubleForDevice + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaSetDoubleForHost(double *d); + +/** + * \brief Enqueues a host function call in a stream + * + * Enqueues a host function to run in a stream. The function will be called + * after currently enqueued work and will block work added after it. + * + * The host function must not make any CUDA API calls. Attempting to use a + * CUDA API may result in ::cudaErrorNotPermitted, but this is not required. + * The host function must not perform any synchronization that may depend on + * outstanding CUDA work not mandated to run earlier. Host functions without a + * mandated order (such as in independent streams) execute in undefined order + * and may be serialized. + * + * For the purposes of Unified Memory, execution makes a number of guarantees: + *
    + *
  • The stream is considered idle for the duration of the function's + * execution. Thus, for example, the function may always use memory attached + * to the stream it was enqueued in.
  • + *
  • The start of execution of the function has the same effect as + * synchronizing an event recorded in the same stream immediately prior to + * the function. It thus synchronizes streams which have been "joined" + * prior to the function.
  • + *
  • Adding device work to any stream does not have the effect of making + * the stream active until all preceding host functions and stream callbacks + * have executed. Thus, for + * example, a function might use global attached memory even if work has + * been added to another stream, if the work has been ordered behind the + * function call with an event.
  • + *
  • Completion of the function does not cause a stream to become + * active except as described above. The stream will remain idle + * if no device work follows the function, and will remain idle across + * consecutive host functions or stream callbacks without device work in + * between. Thus, for example, + * stream synchronization can be done by signaling from a host function at the + * end of the stream.
  • + *
+ * + * Note that, in constrast to ::cuStreamAddCallback, the function will not be + * called in the event of an error in the CUDA context. + * + * \param hStream - Stream to enqueue function call in + * \param fn - The function to call once preceding stream operations are complete + * \param userData - User-specified data to be passed to the function + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorInvalidValue, + * ::cudaErrorNotSupported + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreate, + * ::cudaStreamQuery, + * ::cudaStreamSynchronize, + * ::cudaStreamWaitEvent, + * ::cudaStreamDestroy, + * ::cudaMallocManaged, + * ::cudaStreamAttachMemAsync, + * ::cudaStreamAddCallback, + * ::cuLaunchHostFunc + */ +extern __host__ cudaError_t CUDARTAPI cudaLaunchHostFunc(cudaStream_t stream, cudaHostFn_t fn, void *userData); + +/** @} */ /* END CUDART_EXECUTION */ + +/** + * \defgroup CUDART_OCCUPANCY Occupancy + * + * ___MANBRIEF___ occupancy calculation functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the occupancy calculation functions of the CUDA runtime + * application programming interface. + * + * Besides the occupancy calculator functions + * (\ref ::cudaOccupancyMaxActiveBlocksPerMultiprocessor and \ref ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags), + * there are also C++ only occupancy-based launch configuration functions documented in + * \ref CUDART_HIGHLEVEL "C++ API Routines" module. + * + * See + * \ref ::cudaOccupancyMaxPotentialBlockSize(int*, int*, T, size_t, int) "cudaOccupancyMaxPotentialBlockSize (C++ API)", + * \ref ::cudaOccupancyMaxPotentialBlockSizeWithFlags(int*, int*, T, size_t, int, unsigned int) "cudaOccupancyMaxPotentialBlockSize (C++ API)", + * \ref ::cudaOccupancyMaxPotentialBlockSizeVariableSMem(int*, int*, T, UnaryFunction, int) "cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API)", + * \ref ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(int*, int*, T, UnaryFunction, int, unsigned int) "cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API)" + * \ref ::cudaOccupancyAvailableDynamicSMemPerBlock(size_t*, T, int, int) "cudaOccupancyAvailableDynamicSMemPerBlock (C++ API)", + * + * @{ + */ + +/** + * \brief Returns occupancy for a device function + * + * Returns in \p *numBlocks the maximum number of active blocks per + * streaming multiprocessor for the device function. + * + * \param numBlocks - Returned occupancy + * \param func - Kernel function for which occupancy is calculated + * \param blockSize - Block size the kernel is intended to be launched with + * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags, + * \ref ::cudaOccupancyMaxPotentialBlockSize(int*, int*, T, size_t, int) "cudaOccupancyMaxPotentialBlockSize (C++ API)", + * \ref ::cudaOccupancyMaxPotentialBlockSizeWithFlags(int*, int*, T, size_t, int, unsigned int) "cudaOccupancyMaxPotentialBlockSizeWithFlags (C++ API)", + * \ref ::cudaOccupancyMaxPotentialBlockSizeVariableSMem(int*, int*, T, UnaryFunction, int) "cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API)", + * \ref ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(int*, int*, T, UnaryFunction, int, unsigned int) "cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags (C++ API)", + * \ref ::cudaOccupancyAvailableDynamicSMemPerBlock(size_t*, T, int, int) "cudaOccupancyAvailableDynamicSMemPerBlock (C++ API)", + * ::cuOccupancyMaxActiveBlocksPerMultiprocessor + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, const void *func, int blockSize, size_t dynamicSMemSize); + +/** + * \brief Returns dynamic shared memory available per block when launching \p numBlocks blocks on SM. + * + * Returns in \p *dynamicSmemSize the maximum size of dynamic shared memory to allow \p numBlocks blocks per SM. + * + * \param dynamicSmemSize - Returned maximum dynamic shared memory + * \param func - Kernel function for which occupancy is calculated + * \param numBlocks - Number of blocks to fit on SM + * \param blockSize - Size of the block + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags, + * \ref ::cudaOccupancyMaxPotentialBlockSize(int*, int*, T, size_t, int) "cudaOccupancyMaxPotentialBlockSize (C++ API)", + * \ref ::cudaOccupancyMaxPotentialBlockSizeWithFlags(int*, int*, T, size_t, int, unsigned int) "cudaOccupancyMaxPotentialBlockSizeWithFlags (C++ API)", + * \ref ::cudaOccupancyMaxPotentialBlockSizeVariableSMem(int*, int*, T, UnaryFunction, int) "cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API)", + * \ref ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(int*, int*, T, UnaryFunction, int, unsigned int) "cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags (C++ API)", + * ::cudaOccupancyAvailableDynamicSMemPerBlock + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyAvailableDynamicSMemPerBlock(size_t *dynamicSmemSize, const void *func, int numBlocks, int blockSize); + +/** + * \brief Returns occupancy for a device function with the specified flags + * + * Returns in \p *numBlocks the maximum number of active blocks per + * streaming multiprocessor for the device function. + * + * The \p flags parameter controls how special cases are handled. Valid flags include: + * + * - ::cudaOccupancyDefault: keeps the default behavior as + * ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + * + * - ::cudaOccupancyDisableCachingOverride: This flag suppresses the default behavior + * on platform where global caching affects occupancy. On such platforms, if caching + * is enabled, but per-block SM resource usage would result in zero occupancy, the + * occupancy calculator will calculate the occupancy as if caching is disabled. + * Setting this flag makes the occupancy calculator to return 0 in such cases. + * More information can be found about this feature in the "Unified L1/Texture Cache" + * section of the Maxwell tuning guide. + * + * \param numBlocks - Returned occupancy + * \param func - Kernel function for which occupancy is calculated + * \param blockSize - Block size the kernel is intended to be launched with + * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes + * \param flags - Requested behavior for the occupancy calculator + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor, + * \ref ::cudaOccupancyMaxPotentialBlockSize(int*, int*, T, size_t, int) "cudaOccupancyMaxPotentialBlockSize (C++ API)", + * \ref ::cudaOccupancyMaxPotentialBlockSizeWithFlags(int*, int*, T, size_t, int, unsigned int) "cudaOccupancyMaxPotentialBlockSizeWithFlags (C++ API)", + * \ref ::cudaOccupancyMaxPotentialBlockSizeVariableSMem(int*, int*, T, UnaryFunction, int) "cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API)", + * \ref ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(int*, int*, T, UnaryFunction, int, unsigned int) "cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags (C++ API)", + * \ref ::cudaOccupancyAvailableDynamicSMemPerBlock(size_t*, T, int, int) "cudaOccupancyAvailableDynamicSMemPerBlock (C++ API)", + * ::cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, const void *func, int blockSize, size_t dynamicSMemSize, unsigned int flags); + +/** + * \brief Given the kernel function (\p func) and launch configuration + * (\p config), return the maximum cluster size in \p *clusterSize. + * + * The cluster dimensions in \p config are ignored. If func has a required + * cluster size set (see ::cudaFuncGetAttributes),\p *clusterSize will reflect + * the required cluster size. + * + * By default this function will always return a value that's portable on + * future hardware. A higher value may be returned if the kernel function + * allows non-portable cluster sizes. + * + * This function will respect the compile time launch bounds. + * + * \param clusterSize - Returned maximum cluster size that can be launched + * for the given kernel function and launch configuration + * \param func - Kernel function for which maximum cluster + * size is calculated + * \param config - Launch configuration for the given kernel function + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaFuncGetAttributes + * \ref ::cudaOccupancyMaxPotentialClusterSize(int*, T, const cudaLaunchConfig_t*) "cudaOccupancyMaxPotentialClusterSize (C++ API)", + * ::cuOccupancyMaxPotentialClusterSize + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyMaxPotentialClusterSize(int *clusterSize, const void *func, const cudaLaunchConfig_t *launchConfig); + + +/** + * \brief Given the kernel function (\p func) and launch configuration + * (\p config), return the maximum number of clusters that could co-exist + * on the target device in \p *numClusters. + * + * If the function has required cluster size already set (see + * ::cudaFuncGetAttributes), the cluster size from config must either be + * unspecified or match the required size. + * Without required sizes, the cluster size must be specified in config, + * else the function will return an error. + * + * Note that various attributes of the kernel function may affect occupancy + * calculation. Runtime environment may affect how the hardware schedules + * the clusters, so the calculated occupancy is not guaranteed to be achievable. + * + * \param numClusters - Returned maximum number of clusters that + * could co-exist on the target device + * \param func - Kernel function for which maximum number + * of clusters are calculated + * \param config - Launch configuration for the given kernel function + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidClusterSize, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaFuncGetAttributes + * \ref ::cudaOccupancyMaxActiveClusters(int*, T, const cudaLaunchConfig_t*) "cudaOccupancyMaxActiveClusters (C++ API)", + * ::cuOccupancyMaxActiveClusters + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveClusters(int *numClusters, const void *func, const cudaLaunchConfig_t *launchConfig); +/** @} */ /* END CUDA_OCCUPANCY */ + +/** + * \defgroup CUDART_MEMORY Memory Management + * + * ___MANBRIEF___ memory management functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the memory management functions of the CUDA runtime + * application programming interface. + * + * Some functions have overloaded C++ API template versions documented separately in the + * \ref CUDART_HIGHLEVEL "C++ API Routines" module. + * + * @{ + */ + +/** + * \brief Allocates memory that will be automatically managed by the Unified Memory system + * + * Allocates \p size bytes of managed memory on the device and returns in + * \p *devPtr a pointer to the allocated memory. If the device doesn't support + * allocating managed memory, ::cudaErrorNotSupported is returned. Support + * for managed memory can be queried using the device attribute + * ::cudaDevAttrManagedMemory. The allocated memory is suitably + * aligned for any kind of variable. The memory is not cleared. If \p size + * is 0, ::cudaMallocManaged returns ::cudaErrorInvalidValue. The pointer + * is valid on the CPU and on all GPUs in the system that support managed memory. + * All accesses to this pointer must obey the Unified Memory programming model. + * + * \p flags specifies the default stream association for this allocation. + * \p flags must be one of ::cudaMemAttachGlobal or ::cudaMemAttachHost. The + * default value for \p flags is ::cudaMemAttachGlobal. + * If ::cudaMemAttachGlobal is specified, then this memory is accessible from + * any stream on any device. If ::cudaMemAttachHost is specified, then the + * allocation should not be accessed from devices that have a zero value for the + * device attribute ::cudaDevAttrConcurrentManagedAccess; an explicit call to + * ::cudaStreamAttachMemAsync will be required to enable access on such devices. + * + * If the association is later changed via ::cudaStreamAttachMemAsync to + * a single stream, the default association, as specifed during ::cudaMallocManaged, + * is restored when that stream is destroyed. For __managed__ variables, the + * default association is always ::cudaMemAttachGlobal. Note that destroying a + * stream is an asynchronous operation, and as a result, the change to default + * association won't happen until all work in the stream has completed. + * + * Memory allocated with ::cudaMallocManaged should be released with ::cudaFree. + * + * Device memory oversubscription is possible for GPUs that have a non-zero value for the + * device attribute ::cudaDevAttrConcurrentManagedAccess. Managed memory on + * such GPUs may be evicted from device memory to host memory at any time by the Unified + * Memory driver in order to make room for other allocations. + * + * In a multi-GPU system where all GPUs have a non-zero value for the device attribute + * ::cudaDevAttrConcurrentManagedAccess, managed memory may not be populated when this + * API returns and instead may be populated on access. In such systems, managed memory can + * migrate to any processor's memory at any time. The Unified Memory driver will employ heuristics to + * maintain data locality and prevent excessive page faults to the extent possible. The application + * can also guide the driver about memory usage patterns via ::cudaMemAdvise. The application + * can also explicitly migrate memory to a desired processor's memory via + * ::cudaMemPrefetchAsync. + * + * In a multi-GPU system where all of the GPUs have a zero value for the device attribute + * ::cudaDevAttrConcurrentManagedAccess and all the GPUs have peer-to-peer support + * with each other, the physical storage for managed memory is created on the GPU which is active + * at the time ::cudaMallocManaged is called. All other GPUs will reference the data at reduced + * bandwidth via peer mappings over the PCIe bus. The Unified Memory driver does not migrate + * memory among such GPUs. + * + * In a multi-GPU system where not all GPUs have peer-to-peer support with each other and + * where the value of the device attribute ::cudaDevAttrConcurrentManagedAccess + * is zero for at least one of those GPUs, the location chosen for physical storage of managed + * memory is system-dependent. + * - On Linux, the location chosen will be device memory as long as the current set of active + * contexts are on devices that either have peer-to-peer support with each other or have a + * non-zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess. + * If there is an active context on a GPU that does not have a non-zero value for that device + * attribute and it does not have peer-to-peer support with the other devices that have active + * contexts on them, then the location for physical storage will be 'zero-copy' or host memory. + * Note that this means that managed memory that is located in device memory is migrated to + * host memory if a new context is created on a GPU that doesn't have a non-zero value for + * the device attribute and does not support peer-to-peer with at least one of the other devices + * that has an active context. This in turn implies that context creation may fail if there is + * insufficient host memory to migrate all managed allocations. + * - On Windows, the physical storage is always created in 'zero-copy' or host memory. + * All GPUs will reference the data at reduced bandwidth over the PCIe bus. In these + * circumstances, use of the environment variable CUDA_VISIBLE_DEVICES is recommended to + * restrict CUDA to only use those GPUs that have peer-to-peer support. + * Alternatively, users can also set CUDA_MANAGED_FORCE_DEVICE_ALLOC to a non-zero + * value to force the driver to always use device memory for physical storage. + * When this environment variable is set to a non-zero value, all devices used in + * that process that support managed memory have to be peer-to-peer compatible + * with each other. The error ::cudaErrorInvalidDevice will be returned if a device + * that supports managed memory is used and it is not peer-to-peer compatible with + * any of the other managed memory supporting devices that were previously used in + * that process, even if ::cudaDeviceReset has been called on those devices. These + * environment variables are described in the CUDA programming guide under the + * "CUDA environment variables" section. + * + * \param devPtr - Pointer to allocated device memory + * \param size - Requested allocation size in bytes + * \param flags - Must be either ::cudaMemAttachGlobal or ::cudaMemAttachHost (defaults to ::cudaMemAttachGlobal) + * + * \return + * ::cudaSuccess, + * ::cudaErrorMemoryAllocation, + * ::cudaErrorNotSupported, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMallocPitch, ::cudaFree, ::cudaMallocArray, ::cudaFreeArray, + * ::cudaMalloc3D, ::cudaMalloc3DArray, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaHostAlloc, ::cudaDeviceGetAttribute, ::cudaStreamAttachMemAsync, + * ::cuMemAllocManaged + */ +#if defined(__cplusplus) +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMallocManaged(void **devPtr, size_t size, unsigned int flags = cudaMemAttachGlobal); +#else +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMallocManaged(void **devPtr, size_t size, unsigned int flags); +#endif + +/** + * \brief Allocate memory on the device + * + * Allocates \p size bytes of linear memory on the device and returns in + * \p *devPtr a pointer to the allocated memory. The allocated memory is + * suitably aligned for any kind of variable. The memory is not cleared. + * ::cudaMalloc() returns ::cudaErrorMemoryAllocation in case of failure. + * + * The device version of ::cudaFree cannot be used with a \p *devPtr + * allocated using the host API, and vice versa. + * + * \param devPtr - Pointer to allocated device memory + * \param size - Requested allocation size in bytes + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMallocPitch, ::cudaFree, ::cudaMallocArray, ::cudaFreeArray, + * ::cudaMalloc3D, ::cudaMalloc3DArray, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaHostAlloc, + * ::cuMemAlloc + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMalloc(void **devPtr, size_t size); + +/** + * \brief Allocates page-locked memory on the host + * + * Allocates \p size bytes of host memory that is page-locked and accessible + * to the device. The driver tracks the virtual memory ranges allocated with + * this function and automatically accelerates calls to functions such as + * ::cudaMemcpy*(). Since the memory can be accessed directly by the device, + * it can be read or written with much higher bandwidth than pageable memory + * obtained with functions such as ::malloc(). Allocating excessive amounts of + * memory with ::cudaMallocHost() may degrade system performance, since it + * reduces the amount of memory available to the system for paging. As a + * result, this function is best used sparingly to allocate staging areas for + * data exchange between host and device. + * + * \param ptr - Pointer to allocated host memory + * \param size - Requested allocation size in bytes + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMalloc, ::cudaMallocPitch, ::cudaMallocArray, ::cudaMalloc3D, + * ::cudaMalloc3DArray, ::cudaHostAlloc, ::cudaFree, ::cudaFreeArray, + * \ref ::cudaMallocHost(void**, size_t, unsigned int) "cudaMallocHost (C++ API)", + * ::cudaFreeHost, ::cudaHostAlloc, + * ::cuMemAllocHost + */ +extern __host__ cudaError_t CUDARTAPI cudaMallocHost(void **ptr, size_t size); + +/** + * \brief Allocates pitched memory on the device + * + * Allocates at least \p width (in bytes) * \p height bytes of linear memory + * on the device and returns in \p *devPtr a pointer to the allocated memory. + * The function may pad the allocation to ensure that corresponding pointers + * in any given row will continue to meet the alignment requirements for + * coalescing as the address is updated from row to row. The pitch returned in + * \p *pitch by ::cudaMallocPitch() is the width in bytes of the allocation. + * The intended usage of \p pitch is as a separate parameter of the allocation, + * used to compute addresses within the 2D array. Given the row and column of + * an array element of type \p T, the address is computed as: + * \code + T* pElement = (T*)((char*)BaseAddress + Row * pitch) + Column; + \endcode + * + * For allocations of 2D arrays, it is recommended that programmers consider + * performing pitch allocations using ::cudaMallocPitch(). Due to pitch + * alignment restrictions in the hardware, this is especially true if the + * application will be performing 2D memory copies between different regions + * of device memory (whether linear memory or CUDA arrays). + * + * \param devPtr - Pointer to allocated pitched device memory + * \param pitch - Pitch for allocation + * \param width - Requested pitched allocation width (in bytes) + * \param height - Requested pitched allocation height + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMalloc, ::cudaFree, ::cudaMallocArray, ::cudaFreeArray, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaMalloc3D, ::cudaMalloc3DArray, + * ::cudaHostAlloc, + * ::cuMemAllocPitch + */ +extern __host__ cudaError_t CUDARTAPI cudaMallocPitch(void **devPtr, size_t *pitch, size_t width, size_t height); + +/** + * \brief Allocate an array on the device + * + * Allocates a CUDA array according to the ::cudaChannelFormatDesc structure + * \p desc and returns a handle to the new CUDA array in \p *array. + * + * The ::cudaChannelFormatDesc is defined as: + * \code + struct cudaChannelFormatDesc { + int x, y, z, w; + enum cudaChannelFormatKind f; + }; + \endcode + * where ::cudaChannelFormatKind is one of ::cudaChannelFormatKindSigned, + * ::cudaChannelFormatKindUnsigned, or ::cudaChannelFormatKindFloat. + * + * The \p flags parameter enables different options to be specified that affect + * the allocation, as follows. + * - ::cudaArrayDefault: This flag's value is defined to be 0 and provides default array allocation + * - ::cudaArraySurfaceLoadStore: Allocates an array that can be read from or written to using a surface reference + * - ::cudaArrayTextureGather: This flag indicates that texture gather operations will be performed on the array. + * - ::cudaArraySparse: Allocates a CUDA array without physical backing memory. The subregions within this sparse array + * can later be mapped onto a physical memory allocation by calling ::cuMemMapArrayAsync. + * The physical backing memory must be allocated via ::cuMemCreate. + * - ::cudaArrayDeferredMapping: Allocates a CUDA array without physical backing memory. The entire array can + * later be mapped onto a physical memory allocation by calling ::cuMemMapArrayAsync. + * The physical backing memory must be allocated via ::cuMemCreate. + * + * \p width and \p height must meet certain size requirements. See ::cudaMalloc3DArray() for more details. + * + * \param array - Pointer to allocated array in device memory + * \param desc - Requested channel format + * \param width - Requested array allocation width + * \param height - Requested array allocation height + * \param flags - Requested properties of allocated array + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMalloc, ::cudaMallocPitch, ::cudaFree, ::cudaFreeArray, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaMalloc3D, ::cudaMalloc3DArray, + * ::cudaHostAlloc, + * ::cuArrayCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaMallocArray(cudaArray_t *array, const struct cudaChannelFormatDesc *desc, size_t width, size_t height __dv(0), unsigned int flags __dv(0)); + +/** + * \brief Frees memory on the device + * + * Frees the memory space pointed to by \p devPtr, which must have been + * returned by a previous call to one of the following memory allocation APIs - + * ::cudaMalloc(), ::cudaMallocPitch(), ::cudaMallocManaged(), ::cudaMallocAsync(), + * ::cudaMallocFromPoolAsync(). + * + * Note - This API will not perform any implicit synchronization when the pointer was + * allocated with ::cudaMallocAsync or ::cudaMallocFromPoolAsync. Callers must ensure + * that all accesses to the pointer have completed before invoking ::cudaFree. For + * best performance and memory reuse, users should use ::cudaFreeAsync to free memory + * allocated via the stream ordered memory allocator. + * + * If ::cudaFree(\p devPtr) has already been called before, + * an error is returned. If \p devPtr is 0, no operation is performed. + * ::cudaFree() returns ::cudaErrorValue in case of failure. + * + * The device version of ::cudaFree cannot be used with a \p *devPtr + * allocated using the host API, and vice versa. + * + * \param devPtr - Device pointer to memory to free + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMalloc, ::cudaMallocPitch, ::cudaMallocManaged, ::cudaMallocArray, ::cudaFreeArray, ::cudaMallocAsync, ::cudaMallocFromPoolAsync + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaMalloc3D, ::cudaMalloc3DArray, ::cudaFreeAsync + * ::cudaHostAlloc, + * ::cuMemFree + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaFree(void *devPtr); + +/** + * \brief Frees page-locked memory + * + * Frees the memory space pointed to by \p hostPtr, which must have been + * returned by a previous call to ::cudaMallocHost() or ::cudaHostAlloc(). + * + * \param ptr - Pointer to memory to free + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMalloc, ::cudaMallocPitch, ::cudaFree, ::cudaMallocArray, + * ::cudaFreeArray, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaMalloc3D, ::cudaMalloc3DArray, ::cudaHostAlloc, + * ::cuMemFreeHost + */ +extern __host__ cudaError_t CUDARTAPI cudaFreeHost(void *ptr); + +/** + * \brief Frees an array on the device + * + * Frees the CUDA array \p array, which must have been returned by a + * previous call to ::cudaMallocArray(). If \p devPtr is 0, + * no operation is performed. + * + * \param array - Pointer to array to free + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMalloc, ::cudaMallocPitch, ::cudaFree, ::cudaMallocArray, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaHostAlloc, + * ::cuArrayDestroy + */ +extern __host__ cudaError_t CUDARTAPI cudaFreeArray(cudaArray_t array); + +/** + * \brief Frees a mipmapped array on the device + * + * Frees the CUDA mipmapped array \p mipmappedArray, which must have been + * returned by a previous call to ::cudaMallocMipmappedArray(). If \p devPtr + * is 0, no operation is performed. + * + * \param mipmappedArray - Pointer to mipmapped array to free + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMalloc, ::cudaMallocPitch, ::cudaFree, ::cudaMallocArray, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaHostAlloc, + * ::cuMipmappedArrayDestroy + */ +extern __host__ cudaError_t CUDARTAPI cudaFreeMipmappedArray(cudaMipmappedArray_t mipmappedArray); + + +/** + * \brief Allocates page-locked memory on the host + * + * Allocates \p size bytes of host memory that is page-locked and accessible + * to the device. The driver tracks the virtual memory ranges allocated with + * this function and automatically accelerates calls to functions such as + * ::cudaMemcpy(). Since the memory can be accessed directly by the device, it + * can be read or written with much higher bandwidth than pageable memory + * obtained with functions such as ::malloc(). Allocating excessive amounts of + * pinned memory may degrade system performance, since it reduces the amount + * of memory available to the system for paging. As a result, this function is + * best used sparingly to allocate staging areas for data exchange between host + * and device. + * + * The \p flags parameter enables different options to be specified that affect + * the allocation, as follows. + * - ::cudaHostAllocDefault: This flag's value is defined to be 0 and causes + * ::cudaHostAlloc() to emulate ::cudaMallocHost(). + * - ::cudaHostAllocPortable: The memory returned by this call will be + * considered as pinned memory by all CUDA contexts, not just the one that + * performed the allocation. + * - ::cudaHostAllocMapped: Maps the allocation into the CUDA address space. + * The device pointer to the memory may be obtained by calling + * ::cudaHostGetDevicePointer(). + * - ::cudaHostAllocWriteCombined: Allocates the memory as write-combined (WC). + * WC memory can be transferred across the PCI Express bus more quickly on some + * system configurations, but cannot be read efficiently by most CPUs. WC + * memory is a good option for buffers that will be written by the CPU and read + * by the device via mapped pinned memory or host->device transfers. + * + * All of these flags are orthogonal to one another: a developer may allocate + * memory that is portable, mapped and/or write-combined with no restrictions. + * + * In order for the ::cudaHostAllocMapped flag to have any effect, the CUDA context + * must support the ::cudaDeviceMapHost flag, which can be checked via + * ::cudaGetDeviceFlags(). The ::cudaDeviceMapHost flag is implicitly set for + * contexts created via the runtime API. + * + * The ::cudaHostAllocMapped flag may be specified on CUDA contexts for devices + * that do not support mapped pinned memory. The failure is deferred to + * ::cudaHostGetDevicePointer() because the memory may be mapped into other + * CUDA contexts via the ::cudaHostAllocPortable flag. + * + * Memory allocated by this function must be freed with ::cudaFreeHost(). + * + * \param pHost - Device pointer to allocated memory + * \param size - Requested allocation size in bytes + * \param flags - Requested properties of allocated memory + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaSetDeviceFlags, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, + * ::cudaGetDeviceFlags, + * ::cuMemHostAlloc + */ +extern __host__ cudaError_t CUDARTAPI cudaHostAlloc(void **pHost, size_t size, unsigned int flags); + +/** + * \brief Registers an existing host memory range for use by CUDA + * + * Page-locks the memory range specified by \p ptr and \p size and maps it + * for the device(s) as specified by \p flags. This memory range also is added + * to the same tracking mechanism as ::cudaHostAlloc() to automatically accelerate + * calls to functions such as ::cudaMemcpy(). Since the memory can be accessed + * directly by the device, it can be read or written with much higher bandwidth + * than pageable memory that has not been registered. Page-locking excessive + * amounts of memory may degrade system performance, since it reduces the amount + * of memory available to the system for paging. As a result, this function is + * best used sparingly to register staging areas for data exchange between + * host and device. + * + * On systems where ::pageableMemoryAccessUsesHostPageTables is true, ::cudaHostRegister + * will not page-lock the memory range specified by \p ptr but only populate + * unpopulated pages. + * + * ::cudaHostRegister is supported only on I/O coherent devices that have a non-zero + * value for the device attribute ::cudaDevAttrHostRegisterSupported. + * + * The \p flags parameter enables different options to be specified that + * affect the allocation, as follows. + * + * - ::cudaHostRegisterDefault: On a system with unified virtual addressing, + * the memory will be both mapped and portable. On a system with no unified + * virtual addressing, the memory will be neither mapped nor portable. + * + * - ::cudaHostRegisterPortable: The memory returned by this call will be + * considered as pinned memory by all CUDA contexts, not just the one that + * performed the allocation. + * + * - ::cudaHostRegisterMapped: Maps the allocation into the CUDA address + * space. The device pointer to the memory may be obtained by calling + * ::cudaHostGetDevicePointer(). + * + * - ::cudaHostRegisterIoMemory: The passed memory pointer is treated as + * pointing to some memory-mapped I/O space, e.g. belonging to a + * third-party PCIe device, and it will marked as non cache-coherent and + * contiguous. + * + * - ::cudaHostRegisterReadOnly: The passed memory pointer is treated as + * pointing to memory that is considered read-only by the device. On + * platforms without ::cudaDevAttrPageableMemoryAccessUsesHostPageTables, this + * flag is required in order to register memory mapped to the CPU as + * read-only. Support for the use of this flag can be queried from the device + * attribute cudaDeviceAttrReadOnlyHostRegisterSupported. Using this flag with + * a current context associated with a device that does not have this attribute + * set will cause ::cudaHostRegister to error with cudaErrorNotSupported. + * + * All of these flags are orthogonal to one another: a developer may page-lock + * memory that is portable or mapped with no restrictions. + * + * The CUDA context must have been created with the ::cudaMapHost flag in + * order for the ::cudaHostRegisterMapped flag to have any effect. + * + * The ::cudaHostRegisterMapped flag may be specified on CUDA contexts for + * devices that do not support mapped pinned memory. The failure is deferred + * to ::cudaHostGetDevicePointer() because the memory may be mapped into + * other CUDA contexts via the ::cudaHostRegisterPortable flag. + * + * For devices that have a non-zero value for the device attribute + * ::cudaDevAttrCanUseHostPointerForRegisteredMem, the memory + * can also be accessed from the device using the host pointer \p ptr. + * The device pointer returned by ::cudaHostGetDevicePointer() may or may not + * match the original host pointer \p ptr and depends on the devices visible to the + * application. If all devices visible to the application have a non-zero value for the + * device attribute, the device pointer returned by ::cudaHostGetDevicePointer() + * will match the original pointer \p ptr. If any device visible to the application + * has a zero value for the device attribute, the device pointer returned by + * ::cudaHostGetDevicePointer() will not match the original host pointer \p ptr, + * but it will be suitable for use on all devices provided Unified Virtual Addressing + * is enabled. In such systems, it is valid to access the memory using either pointer + * on devices that have a non-zero value for the device attribute. Note however that + * such devices should access the memory using only of the two pointers and not both. + * + * The memory page-locked by this function must be unregistered with ::cudaHostUnregister(). + * + * \param ptr - Host pointer to memory to page-lock + * \param size - Size in bytes of the address range to page-lock in bytes + * \param flags - Flags for allocation request + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation, + * ::cudaErrorHostMemoryAlreadyRegistered, + * ::cudaErrorNotSupported + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaHostUnregister, ::cudaHostGetFlags, ::cudaHostGetDevicePointer, + * ::cuMemHostRegister + */ +extern __host__ cudaError_t CUDARTAPI cudaHostRegister(void *ptr, size_t size, unsigned int flags); + +/** + * \brief Unregisters a memory range that was registered with cudaHostRegister + * + * Unmaps the memory range whose base address is specified by \p ptr, and makes + * it pageable again. + * + * The base address must be the same one specified to ::cudaHostRegister(). + * + * \param ptr - Host pointer to memory to unregister + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorHostMemoryNotRegistered + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaHostUnregister, + * ::cuMemHostUnregister + */ +extern __host__ cudaError_t CUDARTAPI cudaHostUnregister(void *ptr); + +/** + * \brief Passes back device pointer of mapped host memory allocated by + * cudaHostAlloc or registered by cudaHostRegister + * + * Passes back the device pointer corresponding to the mapped, pinned host + * buffer allocated by ::cudaHostAlloc() or registered by ::cudaHostRegister(). + * + * ::cudaHostGetDevicePointer() will fail if the ::cudaDeviceMapHost flag was + * not specified before deferred context creation occurred, or if called on a + * device that does not support mapped, pinned memory. + * + * For devices that have a non-zero value for the device attribute + * ::cudaDevAttrCanUseHostPointerForRegisteredMem, the memory + * can also be accessed from the device using the host pointer \p pHost. + * The device pointer returned by ::cudaHostGetDevicePointer() may or may not + * match the original host pointer \p pHost and depends on the devices visible to the + * application. If all devices visible to the application have a non-zero value for the + * device attribute, the device pointer returned by ::cudaHostGetDevicePointer() + * will match the original pointer \p pHost. If any device visible to the application + * has a zero value for the device attribute, the device pointer returned by + * ::cudaHostGetDevicePointer() will not match the original host pointer \p pHost, + * but it will be suitable for use on all devices provided Unified Virtual Addressing + * is enabled. In such systems, it is valid to access the memory using either pointer + * on devices that have a non-zero value for the device attribute. Note however that + * such devices should access the memory using only of the two pointers and not both. + * + * \p flags provides for future releases. For now, it must be set to 0. + * + * \param pDevice - Returned device pointer for mapped memory + * \param pHost - Requested host pointer mapping + * \param flags - Flags for extensions (must be 0 for now) + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaSetDeviceFlags, ::cudaHostAlloc, + * ::cuMemHostGetDevicePointer + */ +extern __host__ cudaError_t CUDARTAPI cudaHostGetDevicePointer(void **pDevice, void *pHost, unsigned int flags); + +/** + * \brief Passes back flags used to allocate pinned host memory allocated by + * cudaHostAlloc + * + * ::cudaHostGetFlags() will fail if the input pointer does not + * reside in an address range allocated by ::cudaHostAlloc(). + * + * \param pFlags - Returned flags word + * \param pHost - Host pointer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaHostAlloc, + * ::cuMemHostGetFlags + */ +extern __host__ cudaError_t CUDARTAPI cudaHostGetFlags(unsigned int *pFlags, void *pHost); + +/** + * \brief Allocates logical 1D, 2D, or 3D memory objects on the device + * + * Allocates at least \p width * \p height * \p depth bytes of linear memory + * on the device and returns a ::cudaPitchedPtr in which \p ptr is a pointer + * to the allocated memory. The function may pad the allocation to ensure + * hardware alignment requirements are met. The pitch returned in the \p pitch + * field of \p pitchedDevPtr is the width in bytes of the allocation. + * + * The returned ::cudaPitchedPtr contains additional fields \p xsize and + * \p ysize, the logical width and height of the allocation, which are + * equivalent to the \p width and \p height \p extent parameters provided by + * the programmer during allocation. + * + * For allocations of 2D and 3D objects, it is highly recommended that + * programmers perform allocations using ::cudaMalloc3D() or + * ::cudaMallocPitch(). Due to alignment restrictions in the hardware, this is + * especially true if the application will be performing memory copies + * involving 2D or 3D objects (whether linear memory or CUDA arrays). + * + * \param pitchedDevPtr - Pointer to allocated pitched device memory + * \param extent - Requested allocation size (\p width field in bytes) + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMallocPitch, ::cudaFree, ::cudaMemcpy3D, ::cudaMemset3D, + * ::cudaMalloc3DArray, ::cudaMallocArray, ::cudaFreeArray, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaHostAlloc, ::make_cudaPitchedPtr, ::make_cudaExtent, + * ::cuMemAllocPitch + */ +extern __host__ cudaError_t CUDARTAPI cudaMalloc3D(struct cudaPitchedPtr* pitchedDevPtr, struct cudaExtent extent); + +/** + * \brief Allocate an array on the device + * + * Allocates a CUDA array according to the ::cudaChannelFormatDesc structure + * \p desc and returns a handle to the new CUDA array in \p *array. + * + * The ::cudaChannelFormatDesc is defined as: + * \code + struct cudaChannelFormatDesc { + int x, y, z, w; + enum cudaChannelFormatKind f; + }; + \endcode + * where ::cudaChannelFormatKind is one of ::cudaChannelFormatKindSigned, + * ::cudaChannelFormatKindUnsigned, or ::cudaChannelFormatKindFloat. + * + * ::cudaMalloc3DArray() can allocate the following: + * + * - A 1D array is allocated if the height and depth extents are both zero. + * - A 2D array is allocated if only the depth extent is zero. + * - A 3D array is allocated if all three extents are non-zero. + * - A 1D layered CUDA array is allocated if only the height extent is zero and + * the cudaArrayLayered flag is set. Each layer is a 1D array. The number of layers is + * determined by the depth extent. + * - A 2D layered CUDA array is allocated if all three extents are non-zero and + * the cudaArrayLayered flag is set. Each layer is a 2D array. The number of layers is + * determined by the depth extent. + * - A cubemap CUDA array is allocated if all three extents are non-zero and the + * cudaArrayCubemap flag is set. Width must be equal to height, and depth must be six. A cubemap is + * a special type of 2D layered CUDA array, where the six layers represent the six faces of a cube. + * The order of the six layers in memory is the same as that listed in ::cudaGraphicsCubeFace. + * - A cubemap layered CUDA array is allocated if all three extents are non-zero, and both, + * cudaArrayCubemap and cudaArrayLayered flags are set. Width must be equal to height, and depth must be + * a multiple of six. A cubemap layered CUDA array is a special type of 2D layered CUDA array that consists + * of a collection of cubemaps. The first six layers represent the first cubemap, the next six layers form + * the second cubemap, and so on. + * + * + * The \p flags parameter enables different options to be specified that affect + * the allocation, as follows. + * - ::cudaArrayDefault: This flag's value is defined to be 0 and provides default array allocation + * - ::cudaArrayLayered: Allocates a layered CUDA array, with the depth extent indicating the number of layers + * - ::cudaArrayCubemap: Allocates a cubemap CUDA array. Width must be equal to height, and depth must be six. + * If the cudaArrayLayered flag is also set, depth must be a multiple of six. + * - ::cudaArraySurfaceLoadStore: Allocates a CUDA array that could be read from or written to using a surface + * reference. + * - ::cudaArrayTextureGather: This flag indicates that texture gather operations will be performed on the CUDA + * array. Texture gather can only be performed on 2D CUDA arrays. + * - ::cudaArraySparse: Allocates a CUDA array without physical backing memory. The subregions within this sparse array + * can later be mapped onto a physical memory allocation by calling ::cuMemMapArrayAsync. This flag can only be used for + * creating 2D, 3D or 2D layered sparse CUDA arrays. The physical backing memory must be allocated via ::cuMemCreate. + * - ::cudaArrayDeferredMapping: Allocates a CUDA array without physical backing memory. The entire array can + * later be mapped onto a physical memory allocation by calling ::cuMemMapArrayAsync. The physical backing memory must be allocated + * via ::cuMemCreate. + * + * The width, height and depth extents must meet certain size requirements as listed in the following table. + * All values are specified in elements. + * + * Note that 2D CUDA arrays have different size requirements if the ::cudaArrayTextureGather flag is set. In that + * case, the valid range for (width, height, depth) is ((1,maxTexture2DGather[0]), (1,maxTexture2DGather[1]), 0). + * + * \xmlonly + * + * + * + * + * + * + * + * CUDA array type + * Valid extents that must always be met {(width range in elements), + * (height range), (depth range)} + * Valid extents with cudaArraySurfaceLoadStore set {(width range in + * elements), (height range), (depth range)} + * + * + * + * + * 1D + * { (1,maxTexture1D), 0, 0 } + * { (1,maxSurface1D), 0, 0 } + * + * + * 2D + * { (1,maxTexture2D[0]), (1,maxTexture2D[1]), 0 } + * { (1,maxSurface2D[0]), (1,maxSurface2D[1]), 0 } + * + * + * 3D + * { (1,maxTexture3D[0]), (1,maxTexture3D[1]), (1,maxTexture3D[2]) } + * OR { (1,maxTexture3DAlt[0]), (1,maxTexture3DAlt[1]), + * (1,maxTexture3DAlt[2]) } + * { (1,maxSurface3D[0]), (1,maxSurface3D[1]), (1,maxSurface3D[2]) } + * + * + * 1D Layered + * { (1,maxTexture1DLayered[0]), 0, (1,maxTexture1DLayered[1]) } + * { (1,maxSurface1DLayered[0]), 0, (1,maxSurface1DLayered[1]) } + * + * + * 2D Layered + * { (1,maxTexture2DLayered[0]), (1,maxTexture2DLayered[1]), + * (1,maxTexture2DLayered[2]) } + * { (1,maxSurface2DLayered[0]), (1,maxSurface2DLayered[1]), + * (1,maxSurface2DLayered[2]) } + * + * + * Cubemap + * { (1,maxTextureCubemap), (1,maxTextureCubemap), 6 } + * { (1,maxSurfaceCubemap), (1,maxSurfaceCubemap), 6 } + * + * + * Cubemap Layered + * { (1,maxTextureCubemapLayered[0]), (1,maxTextureCubemapLayered[0]), + * (1,maxTextureCubemapLayered[1]) } + * { (1,maxSurfaceCubemapLayered[0]), (1,maxSurfaceCubemapLayered[0]), + * (1,maxSurfaceCubemapLayered[1]) } + * + * + * + *
+ * \endxmlonly + * + * \param array - Pointer to allocated array in device memory + * \param desc - Requested channel format + * \param extent - Requested allocation size (\p width field in elements) + * \param flags - Flags for extensions + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMalloc3D, ::cudaMalloc, ::cudaMallocPitch, ::cudaFree, + * ::cudaFreeArray, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaHostAlloc, + * ::make_cudaExtent, + * ::cuArray3DCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaMalloc3DArray(cudaArray_t *array, const struct cudaChannelFormatDesc* desc, struct cudaExtent extent, unsigned int flags __dv(0)); + +/** + * \brief Allocate a mipmapped array on the device + * + * Allocates a CUDA mipmapped array according to the ::cudaChannelFormatDesc structure + * \p desc and returns a handle to the new CUDA mipmapped array in \p *mipmappedArray. + * \p numLevels specifies the number of mipmap levels to be allocated. This value is + * clamped to the range [1, 1 + floor(log2(max(width, height, depth)))]. + * + * The ::cudaChannelFormatDesc is defined as: + * \code + struct cudaChannelFormatDesc { + int x, y, z, w; + enum cudaChannelFormatKind f; + }; + \endcode + * where ::cudaChannelFormatKind is one of ::cudaChannelFormatKindSigned, + * ::cudaChannelFormatKindUnsigned, or ::cudaChannelFormatKindFloat. + * + * ::cudaMallocMipmappedArray() can allocate the following: + * + * - A 1D mipmapped array is allocated if the height and depth extents are both zero. + * - A 2D mipmapped array is allocated if only the depth extent is zero. + * - A 3D mipmapped array is allocated if all three extents are non-zero. + * - A 1D layered CUDA mipmapped array is allocated if only the height extent is zero and + * the cudaArrayLayered flag is set. Each layer is a 1D mipmapped array. The number of layers is + * determined by the depth extent. + * - A 2D layered CUDA mipmapped array is allocated if all three extents are non-zero and + * the cudaArrayLayered flag is set. Each layer is a 2D mipmapped array. The number of layers is + * determined by the depth extent. + * - A cubemap CUDA mipmapped array is allocated if all three extents are non-zero and the + * cudaArrayCubemap flag is set. Width must be equal to height, and depth must be six. + * The order of the six layers in memory is the same as that listed in ::cudaGraphicsCubeFace. + * - A cubemap layered CUDA mipmapped array is allocated if all three extents are non-zero, and both, + * cudaArrayCubemap and cudaArrayLayered flags are set. Width must be equal to height, and depth must be + * a multiple of six. A cubemap layered CUDA mipmapped array is a special type of 2D layered CUDA mipmapped + * array that consists of a collection of cubemap mipmapped arrays. The first six layers represent the + * first cubemap mipmapped array, the next six layers form the second cubemap mipmapped array, and so on. + * + * + * The \p flags parameter enables different options to be specified that affect + * the allocation, as follows. + * - ::cudaArrayDefault: This flag's value is defined to be 0 and provides default mipmapped array allocation + * - ::cudaArrayLayered: Allocates a layered CUDA mipmapped array, with the depth extent indicating the number of layers + * - ::cudaArrayCubemap: Allocates a cubemap CUDA mipmapped array. Width must be equal to height, and depth must be six. + * If the cudaArrayLayered flag is also set, depth must be a multiple of six. + * - ::cudaArraySurfaceLoadStore: This flag indicates that individual mipmap levels of the CUDA mipmapped array + * will be read from or written to using a surface reference. + * - ::cudaArrayTextureGather: This flag indicates that texture gather operations will be performed on the CUDA + * array. Texture gather can only be performed on 2D CUDA mipmapped arrays, and the gather operations are + * performed only on the most detailed mipmap level. + * - ::cudaArraySparse: Allocates a CUDA mipmapped array without physical backing memory. The subregions within this sparse array + * can later be mapped onto a physical memory allocation by calling ::cuMemMapArrayAsync. This flag can only be used for creating + * 2D, 3D or 2D layered sparse CUDA mipmapped arrays. The physical backing memory must be allocated via ::cuMemCreate. + * - ::cudaArrayDeferredMapping: Allocates a CUDA mipmapped array without physical backing memory. The entire array can + * later be mapped onto a physical memory allocation by calling ::cuMemMapArrayAsync. The physical backing memory must be allocated + * via ::cuMemCreate. + * + * The width, height and depth extents must meet certain size requirements as listed in the following table. + * All values are specified in elements. + * + * \xmlonly + * + * + * + * + * + * + * + * CUDA array type + * Valid extents that must always be met {(width range in elements), + * (height range), (depth range)} + * Valid extents with cudaArraySurfaceLoadStore set {(width range in + * elements), (height range), (depth range)} + * + * + * + * + * 1D + * { (1,maxTexture1DMipmap), 0, 0 } + * { (1,maxSurface1D), 0, 0 } + * + * + * 2D + * { (1,maxTexture2DMipmap[0]), (1,maxTexture2DMipmap[1]), 0 } + * { (1,maxSurface2D[0]), (1,maxSurface2D[1]), 0 } + * + * + * 3D + * { (1,maxTexture3D[0]), (1,maxTexture3D[1]), (1,maxTexture3D[2]) } + * OR { (1,maxTexture3DAlt[0]), (1,maxTexture3DAlt[1]), + * (1,maxTexture3DAlt[2]) } + * { (1,maxSurface3D[0]), (1,maxSurface3D[1]), (1,maxSurface3D[2]) } + * + * + * 1D Layered + * { (1,maxTexture1DLayered[0]), 0, (1,maxTexture1DLayered[1]) } + * { (1,maxSurface1DLayered[0]), 0, (1,maxSurface1DLayered[1]) } + * + * + * 2D Layered + * { (1,maxTexture2DLayered[0]), (1,maxTexture2DLayered[1]), + * (1,maxTexture2DLayered[2]) } + * { (1,maxSurface2DLayered[0]), (1,maxSurface2DLayered[1]), + * (1,maxSurface2DLayered[2]) } + * + * + * Cubemap + * { (1,maxTextureCubemap), (1,maxTextureCubemap), 6 } + * { (1,maxSurfaceCubemap), (1,maxSurfaceCubemap), 6 } + * + * + * Cubemap Layered + * { (1,maxTextureCubemapLayered[0]), (1,maxTextureCubemapLayered[0]), + * (1,maxTextureCubemapLayered[1]) } + * { (1,maxSurfaceCubemapLayered[0]), (1,maxSurfaceCubemapLayered[0]), + * (1,maxSurfaceCubemapLayered[1]) } + * + * + * + *
+ * \endxmlonly + * + * \param mipmappedArray - Pointer to allocated mipmapped array in device memory + * \param desc - Requested channel format + * \param extent - Requested allocation size (\p width field in elements) + * \param numLevels - Number of mipmap levels to allocate + * \param flags - Flags for extensions + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMalloc3D, ::cudaMalloc, ::cudaMallocPitch, ::cudaFree, + * ::cudaFreeArray, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaHostAlloc, + * ::make_cudaExtent, + * ::cuMipmappedArrayCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaMallocMipmappedArray(cudaMipmappedArray_t *mipmappedArray, const struct cudaChannelFormatDesc* desc, struct cudaExtent extent, unsigned int numLevels, unsigned int flags __dv(0)); + +/** + * \brief Gets a mipmap level of a CUDA mipmapped array + * + * Returns in \p *levelArray a CUDA array that represents a single mipmap level + * of the CUDA mipmapped array \p mipmappedArray. + * + * If \p level is greater than the maximum number of levels in this mipmapped array, + * ::cudaErrorInvalidValue is returned. + * + * If \p mipmappedArray is NULL, + * ::cudaErrorInvalidResourceHandle is returned. + * + * \param levelArray - Returned mipmap level CUDA array + * \param mipmappedArray - CUDA mipmapped array + * \param level - Mipmap level + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMalloc3D, ::cudaMalloc, ::cudaMallocPitch, ::cudaFree, + * ::cudaFreeArray, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaHostAlloc, + * ::make_cudaExtent, + * ::cuMipmappedArrayGetLevel + */ +extern __host__ cudaError_t CUDARTAPI cudaGetMipmappedArrayLevel(cudaArray_t *levelArray, cudaMipmappedArray_const_t mipmappedArray, unsigned int level); + +/** + * \brief Copies data between 3D objects + * +\code +struct cudaExtent { + size_t width; + size_t height; + size_t depth; +}; +struct cudaExtent make_cudaExtent(size_t w, size_t h, size_t d); + +struct cudaPos { + size_t x; + size_t y; + size_t z; +}; +struct cudaPos make_cudaPos(size_t x, size_t y, size_t z); + +struct cudaMemcpy3DParms { + cudaArray_t srcArray; + struct cudaPos srcPos; + struct cudaPitchedPtr srcPtr; + cudaArray_t dstArray; + struct cudaPos dstPos; + struct cudaPitchedPtr dstPtr; + struct cudaExtent extent; + enum cudaMemcpyKind kind; +}; +\endcode + * + * ::cudaMemcpy3D() copies data betwen two 3D objects. The source and + * destination objects may be in either host memory, device memory, or a CUDA + * array. The source, destination, extent, and kind of copy performed is + * specified by the ::cudaMemcpy3DParms struct which should be initialized to + * zero before use: +\code +cudaMemcpy3DParms myParms = {0}; +\endcode + * + * The struct passed to ::cudaMemcpy3D() must specify one of \p srcArray or + * \p srcPtr and one of \p dstArray or \p dstPtr. Passing more than one + * non-zero source or destination will cause ::cudaMemcpy3D() to return an + * error. + * + * The \p srcPos and \p dstPos fields are optional offsets into the source and + * destination objects and are defined in units of each object's elements. The + * element for a host or device pointer is assumed to be unsigned char. + * + * The \p extent field defines the dimensions of the transferred area in + * elements. If a CUDA array is participating in the copy, the extent is + * defined in terms of that array's elements. If no CUDA array is + * participating in the copy then the extents are defined in elements of + * unsigned char. + * + * The \p kind field defines the direction of the copy. It must be one of + * ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * For ::cudaMemcpyHostToHost or ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost + * passed as kind and cudaArray type passed as source or destination, if the kind + * implies cudaArray type to be present on the host, ::cudaMemcpy3D() will + * disregard that implication and silently correct the kind based on the fact that + * cudaArray type can only be present on the device. + * + * If the source and destination are both arrays, ::cudaMemcpy3D() will return + * an error if they do not have the same element size. + * + * The source and destination object may not overlap. If overlapping source + * and destination objects are specified, undefined behavior will result. + * + * The source object must entirely contain the region defined by \p srcPos + * and \p extent. The destination object must entirely contain the region + * defined by \p dstPos and \p extent. + * + * ::cudaMemcpy3D() returns an error if the pitch of \p srcPtr or \p dstPtr + * exceeds the maximum allowed. The pitch of a ::cudaPitchedPtr allocated + * with ::cudaMalloc3D() will always be valid. + * + * \param p - 3D memory copy parameters + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidPitchValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_sync + * \note_init_rt + * \note_callback + * + * \sa ::cudaMalloc3D, ::cudaMalloc3DArray, ::cudaMemset3D, ::cudaMemcpy3DAsync, + * ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::make_cudaExtent, ::make_cudaPos, + * ::cuMemcpy3D + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpy3D(const struct cudaMemcpy3DParms *p); + +/** + * \brief Copies memory between devices + * + * Perform a 3D memory copy according to the parameters specified in + * \p p. See the definition of the ::cudaMemcpy3DPeerParms structure + * for documentation of its parameters. + * + * Note that this function is synchronous with respect to the host only if + * the source or destination of the transfer is host memory. Note also + * that this copy is serialized with respect to all pending and future + * asynchronous work in to the current device, the copy's source device, + * and the copy's destination device (use ::cudaMemcpy3DPeerAsync to avoid + * this synchronization). + * + * \param p - Parameters for the memory copy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidPitchValue + * \notefnerr + * \note_sync + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpyPeer, ::cudaMemcpyAsync, ::cudaMemcpyPeerAsync, + * ::cudaMemcpy3DPeerAsync, + * ::cuMemcpy3DPeer + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpy3DPeer(const struct cudaMemcpy3DPeerParms *p); + +/** + * \brief Copies data between 3D objects + * +\code +struct cudaExtent { + size_t width; + size_t height; + size_t depth; +}; +struct cudaExtent make_cudaExtent(size_t w, size_t h, size_t d); + +struct cudaPos { + size_t x; + size_t y; + size_t z; +}; +struct cudaPos make_cudaPos(size_t x, size_t y, size_t z); + +struct cudaMemcpy3DParms { + cudaArray_t srcArray; + struct cudaPos srcPos; + struct cudaPitchedPtr srcPtr; + cudaArray_t dstArray; + struct cudaPos dstPos; + struct cudaPitchedPtr dstPtr; + struct cudaExtent extent; + enum cudaMemcpyKind kind; +}; +\endcode + * + * ::cudaMemcpy3DAsync() copies data betwen two 3D objects. The source and + * destination objects may be in either host memory, device memory, or a CUDA + * array. The source, destination, extent, and kind of copy performed is + * specified by the ::cudaMemcpy3DParms struct which should be initialized to + * zero before use: +\code +cudaMemcpy3DParms myParms = {0}; +\endcode + * + * The struct passed to ::cudaMemcpy3DAsync() must specify one of \p srcArray + * or \p srcPtr and one of \p dstArray or \p dstPtr. Passing more than one + * non-zero source or destination will cause ::cudaMemcpy3DAsync() to return an + * error. + * + * The \p srcPos and \p dstPos fields are optional offsets into the source and + * destination objects and are defined in units of each object's elements. The + * element for a host or device pointer is assumed to be unsigned char. + * For CUDA arrays, positions must be in the range [0, 2048) for any + * dimension. + * + * The \p extent field defines the dimensions of the transferred area in + * elements. If a CUDA array is participating in the copy, the extent is + * defined in terms of that array's elements. If no CUDA array is + * participating in the copy then the extents are defined in elements of + * unsigned char. + * + * The \p kind field defines the direction of the copy. It must be one of + * ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * For ::cudaMemcpyHostToHost or ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost + * passed as kind and cudaArray type passed as source or destination, if the kind + * implies cudaArray type to be present on the host, ::cudaMemcpy3DAsync() will + * disregard that implication and silently correct the kind based on the fact that + * cudaArray type can only be present on the device. + * + * If the source and destination are both arrays, ::cudaMemcpy3DAsync() will + * return an error if they do not have the same element size. + * + * The source and destination object may not overlap. If overlapping source + * and destination objects are specified, undefined behavior will result. + * + * The source object must lie entirely within the region defined by \p srcPos + * and \p extent. The destination object must lie entirely within the region + * defined by \p dstPos and \p extent. + * + * ::cudaMemcpy3DAsync() returns an error if the pitch of \p srcPtr or + * \p dstPtr exceeds the maximum allowed. The pitch of a + * ::cudaPitchedPtr allocated with ::cudaMalloc3D() will always be valid. + * + * ::cudaMemcpy3DAsync() is asynchronous with respect to the host, so + * the call may return before the copy is complete. The copy can optionally + * be associated to a stream by passing a non-zero \p stream argument. If + * \p kind is ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost and \p stream + * is non-zero, the copy may overlap with operations in other streams. + * + * The device version of this function only handles device to device copies and + * cannot be given local or shared pointers. + * + * \param p - 3D memory copy parameters + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidPitchValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cudaMalloc3D, ::cudaMalloc3DArray, ::cudaMemset3D, ::cudaMemcpy3D, + * ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, :::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::make_cudaExtent, ::make_cudaPos, + * ::cuMemcpy3DAsync + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy3DAsync(const struct cudaMemcpy3DParms *p, cudaStream_t stream __dv(0)); + +/** + * \brief Copies memory between devices asynchronously. + * + * Perform a 3D memory copy according to the parameters specified in + * \p p. See the definition of the ::cudaMemcpy3DPeerParms structure + * for documentation of its parameters. + * + * \param p - Parameters for the memory copy + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidPitchValue + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpyPeer, ::cudaMemcpyAsync, ::cudaMemcpyPeerAsync, + * ::cudaMemcpy3DPeerAsync, + * ::cuMemcpy3DPeerAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpy3DPeerAsync(const struct cudaMemcpy3DPeerParms *p, cudaStream_t stream __dv(0)); + +/** + * \brief Gets free and total device memory + * + * Returns in \p *total the total amount of memory available to the the current context. + * Returns in \p *free the amount of memory on the device that is free according to the OS. + * CUDA is not guaranteed to be able to allocate all of the memory that the OS reports as free. + * In a multi-tenet situation, free estimate returned is prone to race condition where + * a new allocation/free done by a different process or a different thread in the same + * process between the time when free memory was estimated and reported, will result in + * deviation in free value reported and actual free memory. + * + * The integrated GPU on Tegra shares memory with CPU and other component + * of the SoC. The free and total values returned by the API excludes + * the SWAP memory space maintained by the OS on some platforms. + * The OS may move some of the memory pages into swap area as the GPU or + * CPU allocate or access memory. See Tegra app note on how to calculate + * total and free memory on Tegra. + * + * \param free - Returned free memory in bytes + * \param total - Returned total memory in bytes + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorLaunchFailure + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cuMemGetInfo + */ +extern __host__ cudaError_t CUDARTAPI cudaMemGetInfo(size_t *free, size_t *total); + +/** + * \brief Gets info about the specified cudaArray + * + * Returns in \p *desc, \p *extent and \p *flags respectively, the type, shape + * and flags of \p array. + * + * Any of \p *desc, \p *extent and \p *flags may be specified as NULL. + * + * \param desc - Returned array type + * \param extent - Returned array shape. 2D arrays will have depth of zero + * \param flags - Returned array flags + * \param array - The ::cudaArray to get info for + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cuArrayGetDescriptor, + * ::cuArray3DGetDescriptor + */ +extern __host__ cudaError_t CUDARTAPI cudaArrayGetInfo(struct cudaChannelFormatDesc *desc, struct cudaExtent *extent, unsigned int *flags, cudaArray_t array); + +/** + * \brief Gets a CUDA array plane from a CUDA array + * + * Returns in \p pPlaneArray a CUDA array that represents a single format plane + * of the CUDA array \p hArray. + * + * If \p planeIdx is greater than the maximum number of planes in this array or if the array does + * not have a multi-planar format e.g: ::cudaChannelFormatKindNV12, then ::cudaErrorInvalidValue is returned. + * + * Note that if the \p hArray has format ::cudaChannelFormatKindNV12, then passing in 0 for \p planeIdx returns + * a CUDA array of the same size as \p hArray but with one 8-bit channel and ::cudaChannelFormatKindUnsigned as its format kind. + * If 1 is passed for \p planeIdx, then the returned CUDA array has half the height and width + * of \p hArray with two 8-bit channels and ::cudaChannelFormatKindUnsigned as its format kind. + * + * \param pPlaneArray - Returned CUDA array referenced by the \p planeIdx + * \param hArray - CUDA array + * \param planeIdx - Plane index + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * + * \sa + * ::cuArrayGetPlane + */ +extern __host__ cudaError_t CUDARTAPI cudaArrayGetPlane(cudaArray_t *pPlaneArray, cudaArray_t hArray, unsigned int planeIdx); + +/** + * \brief Returns the memory requirements of a CUDA array + * + * Returns the memory requirements of a CUDA array in \p memoryRequirements + * If the CUDA array is not allocated with flag ::cudaArrayDeferredMapping + * ::cudaErrorInvalidValue will be returned. + * + * The returned value in ::cudaArrayMemoryRequirements::size + * represents the total size of the CUDA array. + * The returned value in ::cudaArrayMemoryRequirements::alignment + * represents the alignment necessary for mapping the CUDA array. + * + * \return + * ::cudaSuccess + * ::cudaErrorInvalidValue + * + * \param[out] memoryRequirements - Pointer to ::cudaArrayMemoryRequirements + * \param[in] array - CUDA array to get the memory requirements of + * \param[in] device - Device to get the memory requirements for + * \sa ::cudaMipmappedArrayGetMemoryRequirements + */ +extern __host__ cudaError_t CUDARTAPI cudaArrayGetMemoryRequirements(struct cudaArrayMemoryRequirements *memoryRequirements, cudaArray_t array, int device); + +/** + * \brief Returns the memory requirements of a CUDA mipmapped array + * + * Returns the memory requirements of a CUDA mipmapped array in \p memoryRequirements + * If the CUDA mipmapped array is not allocated with flag ::cudaArrayDeferredMapping + * ::cudaErrorInvalidValue will be returned. + * + * The returned value in ::cudaArrayMemoryRequirements::size + * represents the total size of the CUDA mipmapped array. + * The returned value in ::cudaArrayMemoryRequirements::alignment + * represents the alignment necessary for mapping the CUDA mipmapped + * array. + * + * \return + * ::cudaSuccess + * ::cudaErrorInvalidValue + * + * \param[out] memoryRequirements - Pointer to ::cudaArrayMemoryRequirements + * \param[in] mipmap - CUDA mipmapped array to get the memory requirements of + * \param[in] device - Device to get the memory requirements for + * \sa ::cudaArrayGetMemoryRequirements + */ +extern __host__ cudaError_t CUDARTAPI cudaMipmappedArrayGetMemoryRequirements(struct cudaArrayMemoryRequirements *memoryRequirements, cudaMipmappedArray_t mipmap, int device); + +/** + * \brief Returns the layout properties of a sparse CUDA array + * + * Returns the layout properties of a sparse CUDA array in \p sparseProperties. + * If the CUDA array is not allocated with flag ::cudaArraySparse + * ::cudaErrorInvalidValue will be returned. + * + * If the returned value in ::cudaArraySparseProperties::flags contains ::cudaArraySparsePropertiesSingleMipTail, + * then ::cudaArraySparseProperties::miptailSize represents the total size of the array. Otherwise, it will be zero. + * Also, the returned value in ::cudaArraySparseProperties::miptailFirstLevel is always zero. + * Note that the \p array must have been allocated using ::cudaMallocArray or ::cudaMalloc3DArray. For CUDA arrays obtained + * using ::cudaMipmappedArrayGetLevel, ::cudaErrorInvalidValue will be returned. Instead, ::cudaMipmappedArrayGetSparseProperties + * must be used to obtain the sparse properties of the entire CUDA mipmapped array to which \p array belongs to. + * + * \return + * ::cudaSuccess + * ::cudaErrorInvalidValue + * + * \param[out] sparseProperties - Pointer to return the ::cudaArraySparseProperties + * \param[in] array - The CUDA array to get the sparse properties of + * + * \sa + * ::cudaMipmappedArrayGetSparseProperties, + * ::cuMemMapArrayAsync + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaArrayGetSparseProperties(struct cudaArraySparseProperties *sparseProperties, cudaArray_t array); +#endif + +/** + * \brief Returns the layout properties of a sparse CUDA mipmapped array + * + * Returns the sparse array layout properties in \p sparseProperties. + * If the CUDA mipmapped array is not allocated with flag ::cudaArraySparse + * ::cudaErrorInvalidValue will be returned. + * + * For non-layered CUDA mipmapped arrays, ::cudaArraySparseProperties::miptailSize returns the + * size of the mip tail region. The mip tail region includes all mip levels whose width, height or depth + * is less than that of the tile. + * For layered CUDA mipmapped arrays, if ::cudaArraySparseProperties::flags contains ::cudaArraySparsePropertiesSingleMipTail, + * then ::cudaArraySparseProperties::miptailSize specifies the size of the mip tail of all layers combined. + * Otherwise, ::cudaArraySparseProperties::miptailSize specifies mip tail size per layer. + * The returned value of ::cudaArraySparseProperties::miptailFirstLevel is valid only if ::cudaArraySparseProperties::miptailSize is non-zero. + * + * \return + * ::cudaSuccess + * ::cudaErrorInvalidValue + * + * \param[out] sparseProperties - Pointer to return ::cudaArraySparseProperties + * \param[in] mipmap - The CUDA mipmapped array to get the sparse properties of + * + * \sa + * ::cudaArrayGetSparseProperties, + * ::cuMemMapArrayAsync + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaMipmappedArrayGetSparseProperties(struct cudaArraySparseProperties *sparseProperties, cudaMipmappedArray_t mipmap); +#endif + +/** + * \brief Copies data between host and device + * + * Copies \p count bytes from the memory area pointed to by \p src to the + * memory area pointed to by \p dst, where \p kind specifies the direction + * of the copy, and must be one of ::cudaMemcpyHostToHost, + * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. Calling + * ::cudaMemcpy() with dst and src pointers that do not match the direction of + * the copy results in an undefined behavior. + * + * \param dst - Destination memory address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_init_rt + * \note_callback + * + * \note_sync + * \note_memcpy + * + * \sa ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpyDtoH, + * ::cuMemcpyHtoD, + * ::cuMemcpyDtoD, + * ::cuMemcpy + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpy(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind); + +/** + * \brief Copies memory between two devices + * + * Copies memory from one device to memory on another device. \p dst is the + * base device pointer of the destination memory and \p dstDevice is the + * destination device. \p src is the base device pointer of the source memory + * and \p srcDevice is the source device. \p count specifies the number of bytes + * to copy. + * + * Note that this function is asynchronous with respect to the host, but + * serialized with respect all pending and future asynchronous work in to the + * current device, \p srcDevice, and \p dstDevice (use ::cudaMemcpyPeerAsync + * to avoid this synchronization). + * + * \param dst - Destination device pointer + * \param dstDevice - Destination device + * \param src - Source device pointer + * \param srcDevice - Source device + * \param count - Size of memory copy in bytes + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidDevice + * \notefnerr + * \note_sync + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpyAsync, ::cudaMemcpyPeerAsync, + * ::cudaMemcpy3DPeerAsync, + * ::cuMemcpyPeer + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpyPeer(void *dst, int dstDevice, const void *src, int srcDevice, size_t count); + +/** + * \brief Copies data between host and device + * + * Copies a matrix (\p height rows of \p width bytes each) from the memory + * area pointed to by \p src to the memory area pointed to by \p dst, where + * \p kind specifies the direction of the copy, and must be one of + * ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. \p dpitch and + * \p spitch are the widths in memory in bytes of the 2D arrays pointed to by + * \p dst and \p src, including any padding added to the end of each row. The + * memory areas may not overlap. \p width must not exceed either \p dpitch or + * \p spitch. Calling ::cudaMemcpy2D() with \p dst and \p src pointers that do + * not match the direction of the copy results in an undefined behavior. + * ::cudaMemcpy2D() returns an error if \p dpitch or \p spitch exceeds + * the maximum allowed. + * + * \param dst - Destination memory address + * \param dpitch - Pitch of destination memory + * \param src - Source memory address + * \param spitch - Pitch of source memory + * \param width - Width of matrix transfer (columns in bytes) + * \param height - Height of matrix transfer (rows) + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidPitchValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_init_rt + * \note_callback + * \note_memcpy + * + * \sa ::cudaMemcpy, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpy2D, + * ::cuMemcpy2DUnaligned + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpy2D(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind); + +/** + * \brief Copies data between host and device + * + * Copies a matrix (\p height rows of \p width bytes each) from the memory + * area pointed to by \p src to the CUDA array \p dst starting at + * \p hOffset rows and \p wOffset bytes from the upper left corner, + * where \p kind specifies the direction of the copy, and must be one + * of ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * \p spitch is the width in memory in bytes of the 2D array pointed to by + * \p src, including any padding added to the end of each row. \p wOffset + + * \p width must not exceed the width of the CUDA array \p dst. \p width must + * not exceed \p spitch. ::cudaMemcpy2DToArray() returns an error if \p spitch + * exceeds the maximum allowed. + * + * \param dst - Destination memory address + * \param wOffset - Destination starting X offset (columns in bytes) + * \param hOffset - Destination starting Y offset (rows) + * \param src - Source memory address + * \param spitch - Pitch of source memory + * \param width - Width of matrix transfer (columns in bytes) + * \param height - Height of matrix transfer (rows) + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidPitchValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_sync + * \note_init_rt + * \note_callback + * \note_memcpy + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpy2D, + * ::cuMemcpy2DUnaligned + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpy2DToArray(cudaArray_t dst, size_t wOffset, size_t hOffset, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind); + +/** + * \brief Copies data between host and device + * + * Copies a matrix (\p height rows of \p width bytes each) from the CUDA + * array \p src starting at \p hOffset rows and \p wOffset bytes from the + * upper left corner to the memory area pointed to by \p dst, where + * \p kind specifies the direction of the copy, and must be one of + * ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. \p dpitch is the + * width in memory in bytes of the 2D array pointed to by \p dst, including any + * padding added to the end of each row. \p wOffset + \p width must not exceed + * the width of the CUDA array \p src. \p width must not exceed \p dpitch. + * ::cudaMemcpy2DFromArray() returns an error if \p dpitch exceeds the maximum + * allowed. + * + * \param dst - Destination memory address + * \param dpitch - Pitch of destination memory + * \param src - Source memory address + * \param wOffset - Source starting X offset (columns in bytes) + * \param hOffset - Source starting Y offset (rows) + * \param width - Width of matrix transfer (columns in bytes) + * \param height - Height of matrix transfer (rows) + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidPitchValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_sync + * \note_init_rt + * \note_callback + * \note_memcpy + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpy2D, + * ::cuMemcpy2DUnaligned + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpy2DFromArray(void *dst, size_t dpitch, cudaArray_const_t src, size_t wOffset, size_t hOffset, size_t width, size_t height, enum cudaMemcpyKind kind); + +/** + * \brief Copies data between host and device + * + * Copies a matrix (\p height rows of \p width bytes each) from the CUDA + * array \p src starting at \p hOffsetSrc rows and \p wOffsetSrc bytes from the + * upper left corner to the CUDA array \p dst starting at \p hOffsetDst rows + * and \p wOffsetDst bytes from the upper left corner, where \p kind + * specifies the direction of the copy, and must be one of + * ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * \p wOffsetDst + \p width must not exceed the width of the CUDA array \p dst. + * \p wOffsetSrc + \p width must not exceed the width of the CUDA array \p src. + * + * \param dst - Destination memory address + * \param wOffsetDst - Destination starting X offset (columns in bytes) + * \param hOffsetDst - Destination starting Y offset (rows) + * \param src - Source memory address + * \param wOffsetSrc - Source starting X offset (columns in bytes) + * \param hOffsetSrc - Source starting Y offset (rows) + * \param width - Width of matrix transfer (columns in bytes) + * \param height - Height of matrix transfer (rows) + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_sync + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpy2D, + * ::cuMemcpy2DUnaligned + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpy2DArrayToArray(cudaArray_t dst, size_t wOffsetDst, size_t hOffsetDst, cudaArray_const_t src, size_t wOffsetSrc, size_t hOffsetSrc, size_t width, size_t height, enum cudaMemcpyKind kind __dv(cudaMemcpyDeviceToDevice)); + +/** + * \brief Copies data to the given symbol on the device + * + * Copies \p count bytes from the memory area pointed to by \p src + * to the memory area pointed to by \p offset bytes from the start of symbol + * \p symbol. The memory areas may not overlap. \p symbol is a variable that + * resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of + * transfer is inferred from the pointer values. However, ::cudaMemcpyDefault + * is only allowed on systems that support unified virtual addressing. + * + * \param symbol - Device symbol address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorInvalidMemcpyDirection, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_sync + * \note_string_api_deprecation + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpy, + * ::cuMemcpyHtoD, + * ::cuMemcpyDtoD + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpyToSymbol(const void *symbol, const void *src, size_t count, size_t offset __dv(0), enum cudaMemcpyKind kind __dv(cudaMemcpyHostToDevice)); + +/** + * \brief Copies data from the given symbol on the device + * + * Copies \p count bytes from the memory area pointed to by \p offset bytes + * from the start of symbol \p symbol to the memory area pointed to by \p dst. + * The memory areas may not overlap. \p symbol is a variable that + * resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyDeviceToHost, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of + * transfer is inferred from the pointer values. However, ::cudaMemcpyDefault + * is only allowed on systems that support unified virtual addressing. + * + * \param dst - Destination memory address + * \param symbol - Device symbol address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorInvalidMemcpyDirection, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_sync + * \note_string_api_deprecation + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpy, + * ::cuMemcpyDtoH, + * ::cuMemcpyDtoD + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpyFromSymbol(void *dst, const void *symbol, size_t count, size_t offset __dv(0), enum cudaMemcpyKind kind __dv(cudaMemcpyDeviceToHost)); + + +/** + * \brief Copies data between host and device + * + * Copies \p count bytes from the memory area pointed to by \p src to the + * memory area pointed to by \p dst, where \p kind specifies the + * direction of the copy, and must be one of ::cudaMemcpyHostToHost, + * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * The memory areas may not overlap. Calling ::cudaMemcpyAsync() with \p dst and + * \p src pointers that do not match the direction of the copy results in an + * undefined behavior. + * + * ::cudaMemcpyAsync() is asynchronous with respect to the host, so the call + * may return before the copy is complete. The copy can optionally be + * associated to a stream by passing a non-zero \p stream argument. If \p kind + * is ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost and the \p stream is + * non-zero, the copy may overlap with operations in other streams. + * + * The device version of this function only handles device to device copies and + * cannot be given local or shared pointers. + * + * \param dst - Destination memory address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param kind - Type of transfer + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * \note_memcpy + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpyAsync, + * ::cuMemcpyDtoHAsync, + * ::cuMemcpyHtoDAsync, + * ::cuMemcpyDtoDAsync + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpyAsync(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + +/** + * \brief Copies memory between two devices asynchronously. + * + * Copies memory from one device to memory on another device. \p dst is the + * base device pointer of the destination memory and \p dstDevice is the + * destination device. \p src is the base device pointer of the source memory + * and \p srcDevice is the source device. \p count specifies the number of bytes + * to copy. + * + * Note that this function is asynchronous with respect to the host and all work + * on other devices. + * + * \param dst - Destination device pointer + * \param dstDevice - Destination device + * \param src - Source device pointer + * \param srcDevice - Source device + * \param count - Size of memory copy in bytes + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidDevice + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpyPeer, ::cudaMemcpyAsync, + * ::cudaMemcpy3DPeerAsync, + * ::cuMemcpyPeerAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpyPeerAsync(void *dst, int dstDevice, const void *src, int srcDevice, size_t count, cudaStream_t stream __dv(0)); + +/** + * \brief Copies data between host and device + * + * Copies a matrix (\p height rows of \p width bytes each) from the memory + * area pointed to by \p src to the memory area pointed to by \p dst, where + * \p kind specifies the direction of the copy, and must be one of + * ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * \p dpitch and \p spitch are the widths in memory in bytes of the 2D arrays + * pointed to by \p dst and \p src, including any padding added to the end of + * each row. The memory areas may not overlap. \p width must not exceed either + * \p dpitch or \p spitch. + * + * Calling ::cudaMemcpy2DAsync() with \p dst and \p src pointers that do not + * match the direction of the copy results in an undefined behavior. + * ::cudaMemcpy2DAsync() returns an error if \p dpitch or \p spitch is greater + * than the maximum allowed. + * + * ::cudaMemcpy2DAsync() is asynchronous with respect to the host, so + * the call may return before the copy is complete. The copy can optionally + * be associated to a stream by passing a non-zero \p stream argument. If + * \p kind is ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost and + * \p stream is non-zero, the copy may overlap with operations in other + * streams. + * + * The device version of this function only handles device to device copies and + * cannot be given local or shared pointers. + * + * \param dst - Destination memory address + * \param dpitch - Pitch of destination memory + * \param src - Source memory address + * \param spitch - Pitch of source memory + * \param width - Width of matrix transfer (columns in bytes) + * \param height - Height of matrix transfer (rows) + * \param kind - Type of transfer + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidPitchValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * \note_memcpy + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpy2DAsync + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy2DAsync(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + +/** + * \brief Copies data between host and device + * + * Copies a matrix (\p height rows of \p width bytes each) from the memory + * area pointed to by \p src to the CUDA array \p dst starting at \p hOffset + * rows and \p wOffset bytes from the upper left corner, where \p kind specifies + * the direction of the copy, and must be one of ::cudaMemcpyHostToHost, + * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * \p spitch is the width in memory in bytes of the 2D array pointed to by + * \p src, including any padding added to the end of each row. \p wOffset + + * \p width must not exceed the width of the CUDA array \p dst. \p width must + * not exceed \p spitch. ::cudaMemcpy2DToArrayAsync() returns an error if + * \p spitch exceeds the maximum allowed. + * + * ::cudaMemcpy2DToArrayAsync() is asynchronous with respect to the host, so + * the call may return before the copy is complete. The copy can optionally + * be associated to a stream by passing a non-zero \p stream argument. If + * \p kind is ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost and + * \p stream is non-zero, the copy may overlap with operations in other + * streams. + * + * \param dst - Destination memory address + * \param wOffset - Destination starting X offset (columns in bytes) + * \param hOffset - Destination starting Y offset (rows) + * \param src - Source memory address + * \param spitch - Pitch of source memory + * \param width - Width of matrix transfer (columns in bytes) + * \param height - Height of matrix transfer (rows) + * \param kind - Type of transfer + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidPitchValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * \note_memcpy + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpy2DAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpy2DToArrayAsync(cudaArray_t dst, size_t wOffset, size_t hOffset, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + +/** + * \brief Copies data between host and device + * + * Copies a matrix (\p height rows of \p width bytes each) from the CUDA + * array \p src starting at \p hOffset rows and \p wOffset bytes from the + * upper left corner to the memory area pointed to by \p dst, + * where \p kind specifies the direction of the copy, and must be one of + * ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * \p dpitch is the width in memory in bytes of the 2D + * array pointed to by \p dst, including any padding added to the end of each + * row. \p wOffset + \p width must not exceed the width of the CUDA array + * \p src. \p width must not exceed \p dpitch. ::cudaMemcpy2DFromArrayAsync() + * returns an error if \p dpitch exceeds the maximum allowed. + * + * ::cudaMemcpy2DFromArrayAsync() is asynchronous with respect to the host, so + * the call may return before the copy is complete. The copy can optionally be + * associated to a stream by passing a non-zero \p stream argument. If \p kind + * is ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost and \p stream is + * non-zero, the copy may overlap with operations in other streams. + * + * \param dst - Destination memory address + * \param dpitch - Pitch of destination memory + * \param src - Source memory address + * \param wOffset - Source starting X offset (columns in bytes) + * \param hOffset - Source starting Y offset (rows) + * \param width - Width of matrix transfer (columns in bytes) + * \param height - Height of matrix transfer (rows) + * \param kind - Type of transfer + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidPitchValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * \note_memcpy + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpy2DAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpy2DFromArrayAsync(void *dst, size_t dpitch, cudaArray_const_t src, size_t wOffset, size_t hOffset, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + +/** + * \brief Copies data to the given symbol on the device + * + * Copies \p count bytes from the memory area pointed to by \p src + * to the memory area pointed to by \p offset bytes from the start of symbol + * \p symbol. The memory areas may not overlap. \p symbol is a variable that + * resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of transfer + * is inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * ::cudaMemcpyToSymbolAsync() is asynchronous with respect to the host, so + * the call may return before the copy is complete. The copy can optionally + * be associated to a stream by passing a non-zero \p stream argument. If + * \p kind is ::cudaMemcpyHostToDevice and \p stream is non-zero, the copy + * may overlap with operations in other streams. + * + * \param symbol - Device symbol address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorInvalidMemcpyDirection, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_async + * \note_null_stream + * \note_string_api_deprecation + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpyAsync, + * ::cuMemcpyHtoDAsync, + * ::cuMemcpyDtoDAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpyToSymbolAsync(const void *symbol, const void *src, size_t count, size_t offset, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + +/** + * \brief Copies data from the given symbol on the device + * + * Copies \p count bytes from the memory area pointed to by \p offset bytes + * from the start of symbol \p symbol to the memory area pointed to by \p dst. + * The memory areas may not overlap. \p symbol is a variable that resides in + * global or constant memory space. \p kind can be either + * ::cudaMemcpyDeviceToHost, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of transfer + * is inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * ::cudaMemcpyFromSymbolAsync() is asynchronous with respect to the host, so + * the call may return before the copy is complete. The copy can optionally be + * associated to a stream by passing a non-zero \p stream argument. If \p kind + * is ::cudaMemcpyDeviceToHost and \p stream is non-zero, the copy may overlap + * with operations in other streams. + * + * \param dst - Destination memory address + * \param symbol - Device symbol address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorInvalidMemcpyDirection, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_async + * \note_null_stream + * \note_string_api_deprecation + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, + * ::cuMemcpyAsync, + * ::cuMemcpyDtoHAsync, + * ::cuMemcpyDtoDAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpyFromSymbolAsync(void *dst, const void *symbol, size_t count, size_t offset, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + + +/** + * \brief Initializes or sets device memory to a value + * + * Fills the first \p count bytes of the memory area pointed to by \p devPtr + * with the constant byte value \p value. + * + * Note that this function is asynchronous with respect to the host unless + * \p devPtr refers to pinned host memory. + * + * \param devPtr - Pointer to device memory + * \param value - Value to set for each byte of specified memory + * \param count - Size in bytes to set + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \notefnerr + * \note_memset + * \note_init_rt + * \note_callback + * + * \sa + * ::cuMemsetD8, + * ::cuMemsetD16, + * ::cuMemsetD32 + */ +extern __host__ cudaError_t CUDARTAPI cudaMemset(void *devPtr, int value, size_t count); + +/** + * \brief Initializes or sets device memory to a value + * + * Sets to the specified value \p value a matrix (\p height rows of \p width + * bytes each) pointed to by \p dstPtr. \p pitch is the width in bytes of the + * 2D array pointed to by \p dstPtr, including any padding added to the end + * of each row. This function performs fastest when the pitch is one that has + * been passed back by ::cudaMallocPitch(). + * + * Note that this function is asynchronous with respect to the host unless + * \p devPtr refers to pinned host memory. + * + * \param devPtr - Pointer to 2D device memory + * \param pitch - Pitch in bytes of 2D device memory(Unused if \p height is 1) + * \param value - Value to set for each byte of specified memory + * \param width - Width of matrix set (columns in bytes) + * \param height - Height of matrix set (rows) + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \notefnerr + * \note_memset + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemset, ::cudaMemset3D, ::cudaMemsetAsync, + * ::cudaMemset2DAsync, ::cudaMemset3DAsync, + * ::cuMemsetD2D8, + * ::cuMemsetD2D16, + * ::cuMemsetD2D32 + */ +extern __host__ cudaError_t CUDARTAPI cudaMemset2D(void *devPtr, size_t pitch, int value, size_t width, size_t height); + +/** + * \brief Initializes or sets device memory to a value + * + * Initializes each element of a 3D array to the specified value \p value. + * The object to initialize is defined by \p pitchedDevPtr. The \p pitch field + * of \p pitchedDevPtr is the width in memory in bytes of the 3D array pointed + * to by \p pitchedDevPtr, including any padding added to the end of each row. + * The \p xsize field specifies the logical width of each row in bytes, while + * the \p ysize field specifies the height of each 2D slice in rows. + * The \p pitch field of \p pitchedDevPtr is ignored when \p height and \p depth + * are both equal to 1. + * + * The extents of the initialized region are specified as a \p width in bytes, + * a \p height in rows, and a \p depth in slices. + * + * Extents with \p width greater than or equal to the \p xsize of + * \p pitchedDevPtr may perform significantly faster than extents narrower + * than the \p xsize. Secondarily, extents with \p height equal to the + * \p ysize of \p pitchedDevPtr will perform faster than when the \p height is + * shorter than the \p ysize. + * + * This function performs fastest when the \p pitchedDevPtr has been allocated + * by ::cudaMalloc3D(). + * + * Note that this function is asynchronous with respect to the host unless + * \p pitchedDevPtr refers to pinned host memory. + * + * \param pitchedDevPtr - Pointer to pitched device memory + * \param value - Value to set for each byte of specified memory + * \param extent - Size parameters for where to set device memory (\p width field in bytes) + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \notefnerr + * \note_memset + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemset, ::cudaMemset2D, + * ::cudaMemsetAsync, ::cudaMemset2DAsync, ::cudaMemset3DAsync, + * ::cudaMalloc3D, ::make_cudaPitchedPtr, + * ::make_cudaExtent + */ +extern __host__ cudaError_t CUDARTAPI cudaMemset3D(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent); + +/** + * \brief Initializes or sets device memory to a value + * + * Fills the first \p count bytes of the memory area pointed to by \p devPtr + * with the constant byte value \p value. + * + * ::cudaMemsetAsync() is asynchronous with respect to the host, so + * the call may return before the memset is complete. The operation can optionally + * be associated to a stream by passing a non-zero \p stream argument. + * If \p stream is non-zero, the operation may overlap with operations in other streams. + * + * The device version of this function only handles device to device copies and + * cannot be given local or shared pointers. + * + * \param devPtr - Pointer to device memory + * \param value - Value to set for each byte of specified memory + * \param count - Size in bytes to set + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \notefnerr + * \note_memset + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemset, ::cudaMemset2D, ::cudaMemset3D, + * ::cudaMemset2DAsync, ::cudaMemset3DAsync, + * ::cuMemsetD8Async, + * ::cuMemsetD16Async, + * ::cuMemsetD32Async + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemsetAsync(void *devPtr, int value, size_t count, cudaStream_t stream __dv(0)); + +/** + * \brief Initializes or sets device memory to a value + * + * Sets to the specified value \p value a matrix (\p height rows of \p width + * bytes each) pointed to by \p dstPtr. \p pitch is the width in bytes of the + * 2D array pointed to by \p dstPtr, including any padding added to the end + * of each row. This function performs fastest when the pitch is one that has + * been passed back by ::cudaMallocPitch(). + * + * ::cudaMemset2DAsync() is asynchronous with respect to the host, so + * the call may return before the memset is complete. The operation can optionally + * be associated to a stream by passing a non-zero \p stream argument. + * If \p stream is non-zero, the operation may overlap with operations in other streams. + * + * The device version of this function only handles device to device copies and + * cannot be given local or shared pointers. + * + * \param devPtr - Pointer to 2D device memory + * \param pitch - Pitch in bytes of 2D device memory(Unused if \p height is 1) + * \param value - Value to set for each byte of specified memory + * \param width - Width of matrix set (columns in bytes) + * \param height - Height of matrix set (rows) + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \notefnerr + * \note_memset + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemset, ::cudaMemset2D, ::cudaMemset3D, + * ::cudaMemsetAsync, ::cudaMemset3DAsync, + * ::cuMemsetD2D8Async, + * ::cuMemsetD2D16Async, + * ::cuMemsetD2D32Async + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset2DAsync(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream __dv(0)); + +/** + * \brief Initializes or sets device memory to a value + * + * Initializes each element of a 3D array to the specified value \p value. + * The object to initialize is defined by \p pitchedDevPtr. The \p pitch field + * of \p pitchedDevPtr is the width in memory in bytes of the 3D array pointed + * to by \p pitchedDevPtr, including any padding added to the end of each row. + * The \p xsize field specifies the logical width of each row in bytes, while + * the \p ysize field specifies the height of each 2D slice in rows. + * The \p pitch field of \p pitchedDevPtr is ignored when \p height and \p depth + * are both equal to 1. + * + * The extents of the initialized region are specified as a \p width in bytes, + * a \p height in rows, and a \p depth in slices. + * + * Extents with \p width greater than or equal to the \p xsize of + * \p pitchedDevPtr may perform significantly faster than extents narrower + * than the \p xsize. Secondarily, extents with \p height equal to the + * \p ysize of \p pitchedDevPtr will perform faster than when the \p height is + * shorter than the \p ysize. + * + * This function performs fastest when the \p pitchedDevPtr has been allocated + * by ::cudaMalloc3D(). + * + * ::cudaMemset3DAsync() is asynchronous with respect to the host, so + * the call may return before the memset is complete. The operation can optionally + * be associated to a stream by passing a non-zero \p stream argument. + * If \p stream is non-zero, the operation may overlap with operations in other streams. + * + * The device version of this function only handles device to device copies and + * cannot be given local or shared pointers. + * + * \param pitchedDevPtr - Pointer to pitched device memory + * \param value - Value to set for each byte of specified memory + * \param extent - Size parameters for where to set device memory (\p width field in bytes) + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \notefnerr + * \note_memset + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemset, ::cudaMemset2D, ::cudaMemset3D, + * ::cudaMemsetAsync, ::cudaMemset2DAsync, + * ::cudaMalloc3D, ::make_cudaPitchedPtr, + * ::make_cudaExtent + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset3DAsync(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream __dv(0)); + +/** + * \brief Finds the address associated with a CUDA symbol + * + * Returns in \p *devPtr the address of symbol \p symbol on the device. + * \p symbol is a variable that resides in global or constant memory space. + * If \p symbol cannot be found, or if \p symbol is not declared in the + * global or constant memory space, \p *devPtr is unchanged and the error + * ::cudaErrorInvalidSymbol is returned. + * + * \param devPtr - Return device pointer associated with symbol + * \param symbol - Device symbol address + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_string_api_deprecation + * \note_init_rt + * \note_callback + * + * \sa + * \ref ::cudaGetSymbolAddress(void**, const T&) "cudaGetSymbolAddress (C++ API)", + * \ref ::cudaGetSymbolSize(size_t*, const void*) "cudaGetSymbolSize (C API)", + * ::cuModuleGetGlobal + */ +extern __host__ cudaError_t CUDARTAPI cudaGetSymbolAddress(void **devPtr, const void *symbol); + +/** + * \brief Finds the size of the object associated with a CUDA symbol + * + * Returns in \p *size the size of symbol \p symbol. \p symbol is a variable that + * resides in global or constant memory space. If \p symbol cannot be found, or + * if \p symbol is not declared in global or constant memory space, \p *size is + * unchanged and the error ::cudaErrorInvalidSymbol is returned. + * + * \param size - Size of object associated with symbol + * \param symbol - Device symbol address + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_string_api_deprecation + * \note_init_rt + * \note_callback + * + * \sa + * \ref ::cudaGetSymbolAddress(void**, const void*) "cudaGetSymbolAddress (C API)", + * \ref ::cudaGetSymbolSize(size_t*, const T&) "cudaGetSymbolSize (C++ API)", + * ::cuModuleGetGlobal + */ +extern __host__ cudaError_t CUDARTAPI cudaGetSymbolSize(size_t *size, const void *symbol); + +/** + * \brief Prefetches memory to the specified destination device + * + * Prefetches memory to the specified destination device. \p devPtr is the + * base device pointer of the memory to be prefetched and \p dstDevice is the + * destination device. \p count specifies the number of bytes to copy. \p stream + * is the stream in which the operation is enqueued. The memory range must refer + * to managed memory allocated via ::cudaMallocManaged or declared via __managed__ variables. + * + * Passing in cudaCpuDeviceId for \p dstDevice will prefetch the data to host memory. If + * \p dstDevice is a GPU, then the device attribute ::cudaDevAttrConcurrentManagedAccess + * must be non-zero. Additionally, \p stream must be associated with a device that has a + * non-zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess. + * + * The start address and end address of the memory range will be rounded down and rounded up + * respectively to be aligned to CPU page size before the prefetch operation is enqueued + * in the stream. + * + * If no physical memory has been allocated for this region, then this memory region + * will be populated and mapped on the destination device. If there's insufficient + * memory to prefetch the desired region, the Unified Memory driver may evict pages from other + * ::cudaMallocManaged allocations to host memory in order to make room. Device memory + * allocated using ::cudaMalloc or ::cudaMallocArray will not be evicted. + * + * By default, any mappings to the previous location of the migrated pages are removed and + * mappings for the new location are only setup on \p dstDevice. The exact behavior however + * also depends on the settings applied to this memory range via ::cudaMemAdvise as described + * below: + * + * If ::cudaMemAdviseSetReadMostly was set on any subset of this memory range, + * then that subset will create a read-only copy of the pages on \p dstDevice. + * + * If ::cudaMemAdviseSetPreferredLocation was called on any subset of this memory + * range, then the pages will be migrated to \p dstDevice even if \p dstDevice is not the + * preferred location of any pages in the memory range. + * + * If ::cudaMemAdviseSetAccessedBy was called on any subset of this memory range, + * then mappings to those pages from all the appropriate processors are updated to + * refer to the new location if establishing such a mapping is possible. Otherwise, + * those mappings are cleared. + * + * Note that this API is not required for functionality and only serves to improve performance + * by allowing the application to migrate data to a suitable location before it is accessed. + * Memory accesses to this range are always coherent and are allowed even when the data is + * actively being migrated. + * + * Note that this function is asynchronous with respect to the host and all work + * on other devices. + * + * \param devPtr - Pointer to be prefetched + * \param count - Size in bytes + * \param dstDevice - Destination device to prefetch to + * \param stream - Stream to enqueue prefetch operation + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidDevice + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpyPeer, ::cudaMemcpyAsync, + * ::cudaMemcpy3DPeerAsync, ::cudaMemAdvise, + * ::cuMemPrefetchAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaMemPrefetchAsync(const void *devPtr, size_t count, int dstDevice, cudaStream_t stream __dv(0)); + +/** + * \brief Advise about the usage of a given memory range + * + * Advise the Unified Memory subsystem about the usage pattern for the memory range + * starting at \p devPtr with a size of \p count bytes. The start address and end address of the memory + * range will be rounded down and rounded up respectively to be aligned to CPU page size before the + * advice is applied. The memory range must refer to managed memory allocated via ::cudaMallocManaged + * or declared via __managed__ variables. The memory range could also refer to system-allocated pageable + * memory provided it represents a valid, host-accessible region of memory and all additional constraints + * imposed by \p advice as outlined below are also satisfied. Specifying an invalid system-allocated pageable + * memory range results in an error being returned. + * + * The \p advice parameter can take the following values: + * - ::cudaMemAdviseSetReadMostly: This implies that the data is mostly going to be read + * from and only occasionally written to. Any read accesses from any processor to this region will create a + * read-only copy of at least the accessed pages in that processor's memory. Additionally, if ::cudaMemPrefetchAsync + * is called on this region, it will create a read-only copy of the data on the destination processor. + * If any processor writes to this region, all copies of the corresponding page will be invalidated + * except for the one where the write occurred. The \p device argument is ignored for this advice. + * Note that for a page to be read-duplicated, the accessing processor must either be the CPU or a GPU + * that has a non-zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess. + * Also, if a context is created on a device that does not have the device attribute + * ::cudaDevAttrConcurrentManagedAccess set, then read-duplication will not occur until + * all such contexts are destroyed. + * If the memory region refers to valid system-allocated pageable memory, then the accessing device must + * have a non-zero value for the device attribute ::cudaDevAttrPageableMemoryAccess for a read-only + * copy to be created on that device. Note however that if the accessing device also has a non-zero value for the + * device attribute ::cudaDevAttrPageableMemoryAccessUsesHostPageTables, then setting this advice + * will not create a read-only copy when that device accesses this memory region. + * + * - ::cudaMemAdviceUnsetReadMostly: Undoes the effect of ::cudaMemAdviceReadMostly and also prevents the + * Unified Memory driver from attempting heuristic read-duplication on the memory range. Any read-duplicated + * copies of the data will be collapsed into a single copy. The location for the collapsed + * copy will be the preferred location if the page has a preferred location and one of the read-duplicated + * copies was resident at that location. Otherwise, the location chosen is arbitrary. + * + * - ::cudaMemAdviseSetPreferredLocation: This advice sets the preferred location for the + * data to be the memory belonging to \p device. Passing in cudaCpuDeviceId for \p device sets the + * preferred location as host memory. If \p device is a GPU, then it must have a non-zero value for the + * device attribute ::cudaDevAttrConcurrentManagedAccess. Setting the preferred location + * does not cause data to migrate to that location immediately. Instead, it guides the migration policy + * when a fault occurs on that memory region. If the data is already in its preferred location and the + * faulting processor can establish a mapping without requiring the data to be migrated, then + * data migration will be avoided. On the other hand, if the data is not in its preferred location + * or if a direct mapping cannot be established, then it will be migrated to the processor accessing + * it. It is important to note that setting the preferred location does not prevent data prefetching + * done using ::cudaMemPrefetchAsync. + * Having a preferred location can override the page thrash detection and resolution logic in the Unified + * Memory driver. Normally, if a page is detected to be constantly thrashing between for example host and device + * memory, the page may eventually be pinned to host memory by the Unified Memory driver. But + * if the preferred location is set as device memory, then the page will continue to thrash indefinitely. + * If ::cudaMemAdviseSetReadMostly is also set on this memory region or any subset of it, then the + * policies associated with that advice will override the policies of this advice, unless read accesses from + * \p device will not result in a read-only copy being created on that device as outlined in description for + * the advice ::cudaMemAdviseSetReadMostly. + * If the memory region refers to valid system-allocated pageable memory, then \p device must have a non-zero + * value for the device attribute ::cudaDevAttrPageableMemoryAccess. Additionally, if \p device has + * a non-zero value for the device attribute ::cudaDevAttrPageableMemoryAccessUsesHostPageTables, + * then this call has no effect. Note however that this behavior may change in the future. + * + * - ::cudaMemAdviseUnsetPreferredLocation: Undoes the effect of ::cudaMemAdviseSetPreferredLocation + * and changes the preferred location to none. + * + * - ::cudaMemAdviseSetAccessedBy: This advice implies that the data will be accessed by \p device. + * Passing in ::cudaCpuDeviceId for \p device will set the advice for the CPU. If \p device is a GPU, then + * the device attribute ::cudaDevAttrConcurrentManagedAccess must be non-zero. + * This advice does not cause data migration and has no impact on the location of the data per se. Instead, + * it causes the data to always be mapped in the specified processor's page tables, as long as the + * location of the data permits a mapping to be established. If the data gets migrated for any reason, + * the mappings are updated accordingly. + * This advice is recommended in scenarios where data locality is not important, but avoiding faults is. + * Consider for example a system containing multiple GPUs with peer-to-peer access enabled, where the + * data located on one GPU is occasionally accessed by peer GPUs. In such scenarios, migrating data + * over to the other GPUs is not as important because the accesses are infrequent and the overhead of + * migration may be too high. But preventing faults can still help improve performance, and so having + * a mapping set up in advance is useful. Note that on CPU access of this data, the data may be migrated + * to host memory because the CPU typically cannot access device memory directly. Any GPU that had the + * ::cudaMemAdviceSetAccessedBy flag set for this data will now have its mapping updated to point to the + * page in host memory. + * If ::cudaMemAdviseSetReadMostly is also set on this memory region or any subset of it, then the + * policies associated with that advice will override the policies of this advice. Additionally, if the + * preferred location of this memory region or any subset of it is also \p device, then the policies + * associated with ::cudaMemAdviseSetPreferredLocation will override the policies of this advice. + * If the memory region refers to valid system-allocated pageable memory, then \p device must have a non-zero + * value for the device attribute ::cudaDevAttrPageableMemoryAccess. Additionally, if \p device has + * a non-zero value for the device attribute ::cudaDevAttrPageableMemoryAccessUsesHostPageTables, + * then this call has no effect. + * + * - ::cudaMemAdviseUnsetAccessedBy: Undoes the effect of ::cudaMemAdviseSetAccessedBy. Any mappings to + * the data from \p device may be removed at any time causing accesses to result in non-fatal page faults. + * If the memory region refers to valid system-allocated pageable memory, then \p device must have a non-zero + * value for the device attribute ::cudaDevAttrPageableMemoryAccess. Additionally, if \p device has + * a non-zero value for the device attribute ::cudaDevAttrPageableMemoryAccessUsesHostPageTables, + * then this call has no effect. + * + * \param devPtr - Pointer to memory to set the advice for + * \param count - Size in bytes of the memory range + * \param advice - Advice to be applied for the specified memory range + * \param device - Device to apply the advice for + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidDevice + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpyPeer, ::cudaMemcpyAsync, + * ::cudaMemcpy3DPeerAsync, ::cudaMemPrefetchAsync, + * ::cuMemAdvise + */ +extern __host__ cudaError_t CUDARTAPI cudaMemAdvise(const void *devPtr, size_t count, enum cudaMemoryAdvise advice, int device); + +/** +* \brief Query an attribute of a given memory range +* +* Query an attribute about the memory range starting at \p devPtr with a size of \p count bytes. The +* memory range must refer to managed memory allocated via ::cudaMallocManaged or declared via +* __managed__ variables. +* +* The \p attribute parameter can take the following values: +* - ::cudaMemRangeAttributeReadMostly: If this attribute is specified, \p data will be interpreted +* as a 32-bit integer, and \p dataSize must be 4. The result returned will be 1 if all pages in the given +* memory range have read-duplication enabled, or 0 otherwise. +* - ::cudaMemRangeAttributePreferredLocation: If this attribute is specified, \p data will be +* interpreted as a 32-bit integer, and \p dataSize must be 4. The result returned will be a GPU device +* id if all pages in the memory range have that GPU as their preferred location, or it will be cudaCpuDeviceId +* if all pages in the memory range have the CPU as their preferred location, or it will be cudaInvalidDeviceId +* if either all the pages don't have the same preferred location or some of the pages don't have a +* preferred location at all. Note that the actual location of the pages in the memory range at the time of +* the query may be different from the preferred location. +* - ::cudaMemRangeAttributeAccessedBy: If this attribute is specified, \p data will be interpreted +* as an array of 32-bit integers, and \p dataSize must be a non-zero multiple of 4. The result returned +* will be a list of device ids that had ::cudaMemAdviceSetAccessedBy set for that entire memory range. +* If any device does not have that advice set for the entire memory range, that device will not be included. +* If \p data is larger than the number of devices that have that advice set for that memory range, +* cudaInvalidDeviceId will be returned in all the extra space provided. For ex., if \p dataSize is 12 +* (i.e. \p data has 3 elements) and only device 0 has the advice set, then the result returned will be +* { 0, cudaInvalidDeviceId, cudaInvalidDeviceId }. If \p data is smaller than the number of devices that have +* that advice set, then only as many devices will be returned as can fit in the array. There is no +* guarantee on which specific devices will be returned, however. +* - ::cudaMemRangeAttributeLastPrefetchLocation: If this attribute is specified, \p data will be +* interpreted as a 32-bit integer, and \p dataSize must be 4. The result returned will be the last location +* to which all pages in the memory range were prefetched explicitly via ::cudaMemPrefetchAsync. This will either be +* a GPU id or cudaCpuDeviceId depending on whether the last location for prefetch was a GPU or the CPU +* respectively. If any page in the memory range was never explicitly prefetched or if all pages were not +* prefetched to the same location, cudaInvalidDeviceId will be returned. Note that this simply returns the +* last location that the applicaton requested to prefetch the memory range to. It gives no indication as to +* whether the prefetch operation to that location has completed or even begun. +* +* \param data - A pointers to a memory location where the result +* of each attribute query will be written to. +* \param dataSize - Array containing the size of data +* \param attribute - The attribute to query +* \param devPtr - Start of the range to query +* \param count - Size of the range to query + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemRangeGetAttributes, ::cudaMemPrefetchAsync, + * ::cudaMemAdvise, + * ::cuMemRangeGetAttribute + */ +extern __host__ cudaError_t CUDARTAPI cudaMemRangeGetAttribute(void *data, size_t dataSize, enum cudaMemRangeAttribute attribute, const void *devPtr, size_t count); + +/** + * \brief Query attributes of a given memory range. + * + * Query attributes of the memory range starting at \p devPtr with a size of \p count bytes. The + * memory range must refer to managed memory allocated via ::cudaMallocManaged or declared via + * __managed__ variables. The \p attributes array will be interpreted to have \p numAttributes + * entries. The \p dataSizes array will also be interpreted to have \p numAttributes entries. + * The results of the query will be stored in \p data. + * + * The list of supported attributes are given below. Please refer to ::cudaMemRangeGetAttribute for + * attribute descriptions and restrictions. + * + * - ::cudaMemRangeAttributeReadMostly + * - ::cudaMemRangeAttributePreferredLocation + * - ::cudaMemRangeAttributeAccessedBy + * - ::cudaMemRangeAttributeLastPrefetchLocation + * + * \param data - A two-dimensional array containing pointers to memory + * locations where the result of each attribute query will be written to. + * \param dataSizes - Array containing the sizes of each result + * \param attributes - An array of attributes to query + * (numAttributes and the number of attributes in this array should match) + * \param numAttributes - Number of attributes to query + * \param devPtr - Start of the range to query + * \param count - Size of the range to query + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemRangeGetAttribute, ::cudaMemAdvise, + * ::cudaMemPrefetchAsync, + * ::cuMemRangeGetAttributes + */ +extern __host__ cudaError_t CUDARTAPI cudaMemRangeGetAttributes(void **data, size_t *dataSizes, enum cudaMemRangeAttribute *attributes, size_t numAttributes, const void *devPtr, size_t count); + +/** @} */ /* END CUDART_MEMORY */ + +/** + * \defgroup CUDART_MEMORY_DEPRECATED Memory Management [DEPRECATED] + * + * ___MANBRIEF___ deprecated memory management functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes deprecated memory management functions of the CUDA runtime + * application programming interface. + * + * Some functions have overloaded C++ API template versions documented separately in the + * \ref CUDART_HIGHLEVEL "C++ API Routines" module. + * + * @{ + */ + +/** + * \brief Copies data between host and device + * + * \deprecated + * + * Copies \p count bytes from the memory area pointed to by \p src to the + * CUDA array \p dst starting at \p hOffset rows and \p wOffset bytes from + * the upper left corner, where \p kind specifies the direction + * of the copy, and must be one of ::cudaMemcpyHostToHost, + * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * \param dst - Destination memory address + * \param wOffset - Destination starting X offset (columns in bytes) + * \param hOffset - Destination starting Y offset (rows) + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_sync + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpyFromArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpyArrayToArray, ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpyToArrayAsync, ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpyFromArrayAsync, ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpyHtoA, + * ::cuMemcpyDtoA + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaMemcpyToArray(cudaArray_t dst, size_t wOffset, size_t hOffset, const void *src, size_t count, enum cudaMemcpyKind kind); + +/** + * \brief Copies data between host and device + * + * \deprecated + * + * Copies \p count bytes from the CUDA array \p src starting at \p hOffset rows + * and \p wOffset bytes from the upper left corner to the memory area pointed to + * by \p dst, where \p kind specifies the direction of the copy, and must be one of + * ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * \param dst - Destination memory address + * \param src - Source memory address + * \param wOffset - Source starting X offset (columns in bytes) + * \param hOffset - Source starting Y offset (rows) + * \param count - Size in bytes to copy + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_sync + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, ::cudaMemcpyToArray, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpyArrayToArray, ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpyToArrayAsync, ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpyFromArrayAsync, ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpyAtoH, + * ::cuMemcpyAtoD + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaMemcpyFromArray(void *dst, cudaArray_const_t src, size_t wOffset, size_t hOffset, size_t count, enum cudaMemcpyKind kind); + +/** + * \brief Copies data between host and device + * + * \deprecated + * + * Copies \p count bytes from the CUDA array \p src starting at \p hOffsetSrc + * rows and \p wOffsetSrc bytes from the upper left corner to the CUDA array + * \p dst starting at \p hOffsetDst rows and \p wOffsetDst bytes from the upper + * left corner, where \p kind specifies the direction of the copy, and must be one of + * ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * \param dst - Destination memory address + * \param wOffsetDst - Destination starting X offset (columns in bytes) + * \param hOffsetDst - Destination starting Y offset (rows) + * \param src - Source memory address + * \param wOffsetSrc - Source starting X offset (columns in bytes) + * \param hOffsetSrc - Source starting Y offset (rows) + * \param count - Size in bytes to copy + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, ::cudaMemcpyToArray, + * ::cudaMemcpy2DToArray, ::cudaMemcpyFromArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpyToArrayAsync, ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpyFromArrayAsync, ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpyAtoA + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaMemcpyArrayToArray(cudaArray_t dst, size_t wOffsetDst, size_t hOffsetDst, cudaArray_const_t src, size_t wOffsetSrc, size_t hOffsetSrc, size_t count, enum cudaMemcpyKind kind __dv(cudaMemcpyDeviceToDevice)); + +/** + * \brief Copies data between host and device + * + * \deprecated + * + * Copies \p count bytes from the memory area pointed to by \p src to the + * CUDA array \p dst starting at \p hOffset rows and \p wOffset bytes from + * the upper left corner, where \p kind specifies the + * direction of the copy, and must be one of ::cudaMemcpyHostToHost, + * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * ::cudaMemcpyToArrayAsync() is asynchronous with respect to the host, so + * the call may return before the copy is complete. The copy can optionally + * be associated to a stream by passing a non-zero \p stream argument. If \p + * kind is ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost and \p stream + * is non-zero, the copy may overlap with operations in other streams. + * + * \param dst - Destination memory address + * \param wOffset - Destination starting X offset (columns in bytes) + * \param hOffset - Destination starting Y offset (rows) + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param kind - Type of transfer + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, ::cudaMemcpyToArray, + * ::cudaMemcpy2DToArray, ::cudaMemcpyFromArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpyArrayToArray, ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpyFromArrayAsync, ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpyHtoAAsync, + * ::cuMemcpy2DAsync + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaMemcpyToArrayAsync(cudaArray_t dst, size_t wOffset, size_t hOffset, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + +/** + * \brief Copies data between host and device + * + * \deprecated + * + * Copies \p count bytes from the CUDA array \p src starting at \p hOffset rows + * and \p wOffset bytes from the upper left corner to the memory area pointed to + * by \p dst, where \p kind specifies the direction of the copy, and must be one of + * ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * ::cudaMemcpyFromArrayAsync() is asynchronous with respect to the host, so + * the call may return before the copy is complete. The copy can optionally + * be associated to a stream by passing a non-zero \p stream argument. If \p + * kind is ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost and \p stream + * is non-zero, the copy may overlap with operations in other streams. + * + * \param dst - Destination memory address + * \param src - Source memory address + * \param wOffset - Source starting X offset (columns in bytes) + * \param hOffset - Source starting Y offset (rows) + * \param count - Size in bytes to copy + * \param kind - Type of transfer + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, ::cudaMemcpyToArray, + * ::cudaMemcpy2DToArray, ::cudaMemcpyFromArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpyArrayToArray, ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpyToArrayAsync, ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpyAtoHAsync, + * ::cuMemcpy2DAsync + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaMemcpyFromArrayAsync(void *dst, cudaArray_const_t src, size_t wOffset, size_t hOffset, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + +/** @} */ /* END CUDART_MEMORY_DEPRECATED */ + +/** + * \defgroup CUDART_MEMORY_POOLS Stream Ordered Memory Allocator + * + * ___MANBRIEF___ Functions for performing allocation and free operations in stream order. + * Functions for controlling the behavior of the underlying allocator. + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * + * @{ + * + * \section CUDART_MEMORY_POOLS_overview overview + * + * The asynchronous allocator allows the user to allocate and free in stream order. + * All asynchronous accesses of the allocation must happen between + * the stream executions of the allocation and the free. If the memory is accessed + * outside of the promised stream order, a use before allocation / use after free error + * will cause undefined behavior. + * + * The allocator is free to reallocate the memory as long as it can guarantee + * that compliant memory accesses will not overlap temporally. + * The allocator may refer to internal stream ordering as well as inter-stream dependencies + * (such as CUDA events and null stream dependencies) when establishing the temporal guarantee. + * The allocator may also insert inter-stream dependencies to establish the temporal guarantee. + * + * \section CUDART_MEMORY_POOLS_support Supported Platforms + * + * Whether or not a device supports the integrated stream ordered memory allocator + * may be queried by calling ::cudaDeviceGetAttribute() with the device attribute + * ::cudaDevAttrMemoryPoolsSupported. + */ + +/** + * \brief Allocates memory with stream ordered semantics + * + * Inserts an allocation operation into \p hStream. + * A pointer to the allocated memory is returned immediately in *dptr. + * The allocation must not be accessed until the the allocation operation completes. + * The allocation comes from the memory pool associated with the stream's device. + * + * \note The default memory pool of a device contains device memory from that device. + * \note Basic stream ordering allows future work submitted into the same stream to use the allocation. + * Stream query, stream synchronize, and CUDA events can be used to guarantee that the allocation + * operation completes before work submitted in a separate stream runs. + * \note During stream capture, this function results in the creation of an allocation node. In this case, + * the allocation is owned by the graph instead of the memory pool. The memory pool's properties + * are used to set the node's creation parameters. + * + * \param[out] devPtr - Returned device pointer + * \param[in] size - Number of bytes to allocate + * \param[in] hStream - The stream establishing the stream ordering contract and the memory pool to allocate from + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorNotSupported, + * ::cudaErrorOutOfMemory, + * \notefnerr + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cuMemAllocAsync, + * \ref ::cudaMallocAsync(void** ptr, size_t size, cudaMemPool_t memPool, cudaStream_t stream) "cudaMallocAsync (C++ API)", + * ::cudaMallocFromPoolAsync, ::cudaFreeAsync, ::cudaDeviceSetMemPool, ::cudaDeviceGetDefaultMemPool, ::cudaDeviceGetMemPool, ::cudaMemPoolSetAccess, ::cudaMemPoolSetAttribute, ::cudaMemPoolGetAttribute + */ +extern __host__ cudaError_t CUDARTAPI cudaMallocAsync(void **devPtr, size_t size, cudaStream_t hStream); + +/** + * \brief Frees memory with stream ordered semantics + * + * Inserts a free operation into \p hStream. + * The allocation must not be accessed after stream execution reaches the free. + * After this API returns, accessing the memory from any subsequent work launched on the GPU + * or querying its pointer attributes results in undefined behavior. + * + * \note During stream capture, this function results in the creation of a free node and + * must therefore be passed the address of a graph allocation. + * + * \param dptr - memory to free + * \param hStream - The stream establishing the stream ordering promise + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorNotSupported + * \notefnerr + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cuMemFreeAsync, ::cudaMallocAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaFreeAsync(void *devPtr, cudaStream_t hStream); + +/** + * \brief Tries to release memory back to the OS + * + * Releases memory back to the OS until the pool contains fewer than minBytesToKeep + * reserved bytes, or there is no more memory that the allocator can safely release. + * The allocator cannot release OS allocations that back outstanding asynchronous allocations. + * The OS allocations may happen at different granularity from the user allocations. + * + * \note: Allocations that have not been freed count as outstanding. + * \note: Allocations that have been asynchronously freed but whose completion has + * not been observed on the host (eg. by a synchronize) can count as outstanding. + * + * \param[in] pool - The memory pool to trim + * \param[in] minBytesToKeep - If the pool has less than minBytesToKeep reserved, + * the TrimTo operation is a no-op. Otherwise the pool will be guaranteed to have + * at least minBytesToKeep bytes reserved after the operation. + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_callback + * + * \sa ::cuMemPoolTrimTo, ::cudaMallocAsync, ::cudaFreeAsync, ::cudaDeviceGetDefaultMemPool, ::cudaDeviceGetMemPool, ::cudaMemPoolCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaMemPoolTrimTo(cudaMemPool_t memPool, size_t minBytesToKeep); + +/** + * \brief Sets attributes of a memory pool + * + * Supported attributes are: + * - ::cudaMemPoolAttrReleaseThreshold: (value type = cuuint64_t) + * Amount of reserved memory in bytes to hold onto before trying + * to release memory back to the OS. When more than the release + * threshold bytes of memory are held by the memory pool, the + * allocator will try to release memory back to the OS on the + * next call to stream, event or context synchronize. (default 0) + * - ::cudaMemPoolReuseFollowEventDependencies: (value type = int) + * Allow ::cudaMallocAsync to use memory asynchronously freed + * in another stream as long as a stream ordering dependency + * of the allocating stream on the free action exists. + * Cuda events and null stream interactions can create the required + * stream ordered dependencies. (default enabled) + * - ::cudaMemPoolReuseAllowOpportunistic: (value type = int) + * Allow reuse of already completed frees when there is no dependency + * between the free and allocation. (default enabled) + * - ::cudaMemPoolReuseAllowInternalDependencies: (value type = int) + * Allow ::cudaMallocAsync to insert new stream dependencies + * in order to establish the stream ordering required to reuse + * a piece of memory released by ::cudaFreeAsync (default enabled). + * - ::cudaMemPoolAttrReservedMemHigh: (value type = cuuint64_t) + * Reset the high watermark that tracks the amount of backing memory that was + * allocated for the memory pool. It is illegal to set this attribute to a non-zero value. + * - ::cudaMemPoolAttrUsedMemHigh: (value type = cuuint64_t) + * Reset the high watermark that tracks the amount of used memory that was + * allocated for the memory pool. It is illegal to set this attribute to a non-zero value. + * + * \param[in] pool - The memory pool to modify + * \param[in] attr - The attribute to modify + * \param[in] value - Pointer to the value to assign + * + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_callback + * + * \sa ::cuMemPoolSetAttribute, ::cudaMallocAsync, ::cudaFreeAsync, ::cudaDeviceGetDefaultMemPool, ::cudaDeviceGetMemPool, ::cudaMemPoolCreate + + */ +extern __host__ cudaError_t CUDARTAPI cudaMemPoolSetAttribute(cudaMemPool_t memPool, enum cudaMemPoolAttr attr, void *value ); + +/** + * \brief Gets attributes of a memory pool + * + * Supported attributes are: + * - ::cudaMemPoolAttrReleaseThreshold: (value type = cuuint64_t) + * Amount of reserved memory in bytes to hold onto before trying + * to release memory back to the OS. When more than the release + * threshold bytes of memory are held by the memory pool, the + * allocator will try to release memory back to the OS on the + * next call to stream, event or context synchronize. (default 0) + * - ::cudaMemPoolReuseFollowEventDependencies: (value type = int) + * Allow ::cudaMallocAsync to use memory asynchronously freed + * in another stream as long as a stream ordering dependency + * of the allocating stream on the free action exists. + * Cuda events and null stream interactions can create the required + * stream ordered dependencies. (default enabled) + * - ::cudaMemPoolReuseAllowOpportunistic: (value type = int) + * Allow reuse of already completed frees when there is no dependency + * between the free and allocation. (default enabled) + * - ::cudaMemPoolReuseAllowInternalDependencies: (value type = int) + * Allow ::cudaMallocAsync to insert new stream dependencies + * in order to establish the stream ordering required to reuse + * a piece of memory released by ::cudaFreeAsync (default enabled). + * - ::cudaMemPoolAttrReservedMemCurrent: (value type = cuuint64_t) + * Amount of backing memory currently allocated for the mempool. + * - ::cudaMemPoolAttrReservedMemHigh: (value type = cuuint64_t) + * High watermark of backing memory allocated for the mempool since + * the last time it was reset. + * - ::cudaMemPoolAttrUsedMemCurrent: (value type = cuuint64_t) + * Amount of memory from the pool that is currently in use by the application. + * - ::cudaMemPoolAttrUsedMemHigh: (value type = cuuint64_t) + * High watermark of the amount of memory from the pool that was in use by the + * application since the last time it was reset. + * + * \param[in] pool - The memory pool to get attributes of + * \param[in] attr - The attribute to get + * \param[in] value - Retrieved value + * + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_callback + * + * \sa ::cuMemPoolGetAttribute, ::cudaMallocAsync, ::cudaFreeAsync, ::cudaDeviceGetDefaultMemPool, ::cudaDeviceGetMemPool, ::cudaMemPoolCreate + + */ +extern __host__ cudaError_t CUDARTAPI cudaMemPoolGetAttribute(cudaMemPool_t memPool, enum cudaMemPoolAttr attr, void *value ); + +/** + * \brief Controls visibility of pools between devices + * + * \param[in] pool - The pool being modified + * \param[in] map - Array of access descriptors. Each descriptor instructs the access to enable for a single gpu + * \param[in] count - Number of descriptors in the map array. + * + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * + * \sa ::cuMemPoolSetAccess, ::cudaMemPoolGetAccess, ::cudaMallocAsync, cudaFreeAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaMemPoolSetAccess(cudaMemPool_t memPool, const struct cudaMemAccessDesc *descList, size_t count); + +/** + * \brief Returns the accessibility of a pool from a device + * + * Returns the accessibility of the pool's memory from the specified location. + * + * \param[out] flags - the accessibility of the pool from the specified location + * \param[in] memPool - the pool being queried + * \param[in] location - the location accessing the pool + * + * \sa ::cuMemPoolGetAccess, ::cudaMemPoolSetAccess + */ +extern __host__ cudaError_t CUDARTAPI cudaMemPoolGetAccess(enum cudaMemAccessFlags *flags, cudaMemPool_t memPool, struct cudaMemLocation *location); + +/** + * \brief Creates a memory pool + * + * Creates a CUDA memory pool and returns the handle in \p pool. The \p poolProps determines + * the properties of the pool such as the backing device and IPC capabilities. + * + * By default, the pool's memory will be accessible from the device it is allocated on. + * + * \note Specifying cudaMemHandleTypeNone creates a memory pool that will not support IPC. + * + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorNotSupported + * + * \sa ::cuMemPoolCreate, ::cudaDeviceSetMemPool, ::cudaMallocFromPoolAsync, ::cudaMemPoolExportToShareableHandle, ::cudaDeviceGetDefaultMemPool, ::cudaDeviceGetMemPool + + */ +extern __host__ cudaError_t CUDARTAPI cudaMemPoolCreate(cudaMemPool_t *memPool, const struct cudaMemPoolProps *poolProps); + +/** + * \brief Destroys the specified memory pool + * + * If any pointers obtained from this pool haven't been freed or + * the pool has free operations that haven't completed + * when ::cudaMemPoolDestroy is invoked, the function will return immediately and the + * resources associated with the pool will be released automatically + * once there are no more outstanding allocations. + * + * Destroying the current mempool of a device sets the default mempool of + * that device as the current mempool for that device. + * + * \note A device's default memory pool cannot be destroyed. + * + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * + * \sa cuMemPoolDestroy, ::cudaFreeAsync, ::cudaDeviceSetMemPool, ::cudaDeviceGetDefaultMemPool, ::cudaDeviceGetMemPool, ::cudaMemPoolCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaMemPoolDestroy(cudaMemPool_t memPool); + +/** + * \brief Allocates memory from a specified pool with stream ordered semantics. + * + * Inserts an allocation operation into \p hStream. + * A pointer to the allocated memory is returned immediately in *dptr. + * The allocation must not be accessed until the the allocation operation completes. + * The allocation comes from the specified memory pool. + * + * \note + * - The specified memory pool may be from a device different than that of the specified \p hStream. + * + * - Basic stream ordering allows future work submitted into the same stream to use the allocation. + * Stream query, stream synchronize, and CUDA events can be used to guarantee that the allocation + * operation completes before work submitted in a separate stream runs. + * + * \note During stream capture, this function results in the creation of an allocation node. In this case, + * the allocation is owned by the graph instead of the memory pool. The memory pool's properties + * are used to set the node's creation parameters. + * + * \param[out] ptr - Returned device pointer + * \param[in] bytesize - Number of bytes to allocate + * \param[in] memPool - The pool to allocate from + * \param[in] stream - The stream establishing the stream ordering semantic + * + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorNotSupported, + * ::cudaErrorOutOfMemory + * + * \sa ::cuMemAllocFromPoolAsync, + * \ref ::cudaMallocAsync(void** ptr, size_t size, cudaMemPool_t memPool, cudaStream_t stream) "cudaMallocAsync (C++ API)", + * ::cudaMallocAsync, ::cudaFreeAsync, ::cudaDeviceGetDefaultMemPool, ::cudaMemPoolCreate, ::cudaMemPoolSetAccess, ::cudaMemPoolSetAttribute + */ +extern __host__ cudaError_t CUDARTAPI cudaMallocFromPoolAsync(void **ptr, size_t size, cudaMemPool_t memPool, cudaStream_t stream); + +/** + * \brief Exports a memory pool to the requested handle type. + * + * Given an IPC capable mempool, create an OS handle to share the pool with another process. + * A recipient process can convert the shareable handle into a mempool with ::cudaMemPoolImportFromShareableHandle. + * Individual pointers can then be shared with the ::cudaMemPoolExportPointer and ::cudaMemPoolImportPointer APIs. + * The implementation of what the shareable handle is and how it can be transferred is defined by the requested + * handle type. + * + * \note: To create an IPC capable mempool, create a mempool with a CUmemAllocationHandleType other than cudaMemHandleTypeNone. + * + * \param[out] handle_out - pointer to the location in which to store the requested handle + * \param[in] pool - pool to export + * \param[in] handleType - the type of handle to create + * \param[in] flags - must be 0 + * + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorOutOfMemory + * + * \sa ::cuMemPoolExportToShareableHandle, ::cudaMemPoolImportFromShareableHandle, ::cudaMemPoolExportPointer, ::cudaMemPoolImportPointer + */ +extern __host__ cudaError_t CUDARTAPI cudaMemPoolExportToShareableHandle( + void *shareableHandle, + cudaMemPool_t memPool, + enum cudaMemAllocationHandleType handleType, + unsigned int flags); + +/** + * \brief imports a memory pool from a shared handle. + * + * Specific allocations can be imported from the imported pool with ::cudaMemPoolImportPointer. + * + * \note Imported memory pools do not support creating new allocations. + * As such imported memory pools may not be used in ::cudaDeviceSetMemPool + * or ::cudaMallocFromPoolAsync calls. + * + * \param[out] pool_out - Returned memory pool + * \param[in] handle - OS handle of the pool to open + * \param[in] handleType - The type of handle being imported + * \param[in] flags - must be 0 + * + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorOutOfMemory + * + * \sa ::cuMemPoolImportFromShareableHandle, ::cudaMemPoolExportToShareableHandle, ::cudaMemPoolExportPointer, ::cudaMemPoolImportPointer + */ +extern __host__ cudaError_t CUDARTAPI cudaMemPoolImportFromShareableHandle( + cudaMemPool_t *memPool, + void *shareableHandle, + enum cudaMemAllocationHandleType handleType, + unsigned int flags); + +/** + * \brief Export data to share a memory pool allocation between processes. + * + * Constructs \p shareData_out for sharing a specific allocation from an already shared memory pool. + * The recipient process can import the allocation with the ::cudaMemPoolImportPointer api. + * The data is not a handle and may be shared through any IPC mechanism. + * + * \param[out] shareData_out - Returned export data + * \param[in] ptr - pointer to memory being exported + * + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorOutOfMemory + * + * \sa ::cuMemPoolExportPointer, ::cudaMemPoolExportToShareableHandle, ::cudaMemPoolImportFromShareableHandle, ::cudaMemPoolImportPointer + */ +extern __host__ cudaError_t CUDARTAPI cudaMemPoolExportPointer(struct cudaMemPoolPtrExportData *exportData, void *ptr); + +/** + * \brief Import a memory pool allocation from another process. + * + * Returns in \p ptr_out a pointer to the imported memory. + * The imported memory must not be accessed before the allocation operation completes + * in the exporting process. The imported memory must be freed from all importing processes before + * being freed in the exporting process. The pointer may be freed with cudaFree + * or cudaFreeAsync. If ::cudaFreeAsync is used, the free must be completed + * on the importing process before the free operation on the exporting process. + * + * \note The ::cudaFreeAsync api may be used in the exporting process before + * the ::cudaFreeAsync operation completes in its stream as long as the + * ::cudaFreeAsync in the exporting process specifies a stream with + * a stream dependency on the importing process's ::cudaFreeAsync. + * + * \param[out] ptr_out - pointer to imported memory + * \param[in] pool - pool from which to import + * \param[in] shareData - data specifying the memory to import + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_OUT_OF_MEMORY + * + * \sa ::cuMemPoolImportPointer, ::cudaMemPoolExportToShareableHandle, ::cudaMemPoolImportFromShareableHandle, ::cudaMemPoolExportPointer + */ +extern __host__ cudaError_t CUDARTAPI cudaMemPoolImportPointer(void **ptr, cudaMemPool_t memPool, struct cudaMemPoolPtrExportData *exportData); + +/** @} */ /* END CUDART_MEMORY_POOLS */ + +/** + * \defgroup CUDART_UNIFIED Unified Addressing + * + * ___MANBRIEF___ unified addressing functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the unified addressing functions of the CUDA + * runtime application programming interface. + * + * @{ + * + * \section CUDART_UNIFIED_overview Overview + * + * CUDA devices can share a unified address space with the host. + * For these devices there is no distinction between a device + * pointer and a host pointer -- the same pointer value may be + * used to access memory from the host program and from a kernel + * running on the device (with exceptions enumerated below). + * + * \section CUDART_UNIFIED_support Supported Platforms + * + * Whether or not a device supports unified addressing may be + * queried by calling ::cudaGetDeviceProperties() with the device + * property ::cudaDeviceProp::unifiedAddressing. + * + * Unified addressing is automatically enabled in 64-bit processes . + * + * \section CUDART_UNIFIED_lookup Looking Up Information from Pointer Values + * + * It is possible to look up information about the memory which backs a + * pointer value. For instance, one may want to know if a pointer points + * to host or device memory. As another example, in the case of device + * memory, one may want to know on which CUDA device the memory + * resides. These properties may be queried using the function + * ::cudaPointerGetAttributes() + * + * Since pointers are unique, it is not necessary to specify information + * about the pointers specified to ::cudaMemcpy() and other copy functions. + * The copy direction ::cudaMemcpyDefault may be used to specify that the + * CUDA runtime should infer the location of the pointer from its value. + * + * \section CUDART_UNIFIED_automaphost Automatic Mapping of Host Allocated Host Memory + * + * All host memory allocated through all devices using ::cudaMallocHost() and + * ::cudaHostAlloc() is always directly accessible from all devices that + * support unified addressing. This is the case regardless of whether or + * not the flags ::cudaHostAllocPortable and ::cudaHostAllocMapped are + * specified. + * + * The pointer value through which allocated host memory may be accessed + * in kernels on all devices that support unified addressing is the same + * as the pointer value through which that memory is accessed on the host. + * It is not necessary to call ::cudaHostGetDevicePointer() to get the device + * pointer for these allocations. + * + * Note that this is not the case for memory allocated using the flag + * ::cudaHostAllocWriteCombined, as discussed below. + * + * \section CUDART_UNIFIED_autopeerregister Direct Access of Peer Memory + + * Upon enabling direct access from a device that supports unified addressing + * to another peer device that supports unified addressing using + * ::cudaDeviceEnablePeerAccess() all memory allocated in the peer device using + * ::cudaMalloc() and ::cudaMallocPitch() will immediately be accessible + * by the current device. The device pointer value through + * which any peer's memory may be accessed in the current device + * is the same pointer value through which that memory may be + * accessed from the peer device. + * + * \section CUDART_UNIFIED_exceptions Exceptions, Disjoint Addressing + * + * Not all memory may be accessed on devices through the same pointer + * value through which they are accessed on the host. These exceptions + * are host memory registered using ::cudaHostRegister() and host memory + * allocated using the flag ::cudaHostAllocWriteCombined. For these + * exceptions, there exists a distinct host and device address for the + * memory. The device address is guaranteed to not overlap any valid host + * pointer range and is guaranteed to have the same value across all devices + * that support unified addressing. + * + * This device address may be queried using ::cudaHostGetDevicePointer() + * when a device using unified addressing is current. Either the host + * or the unified device pointer value may be used to refer to this memory + * in ::cudaMemcpy() and similar functions using the ::cudaMemcpyDefault + * memory direction. + * + */ + +/** + * \brief Returns attributes about a specified pointer + * + * Returns in \p *attributes the attributes of the pointer \p ptr. + * If pointer was not allocated in, mapped by or registered with context + * supporting unified addressing ::cudaErrorInvalidValue is returned. + * + * \note In CUDA 11.0 forward passing host pointer will return ::cudaMemoryTypeUnregistered + * in ::cudaPointerAttributes::type and call will return ::cudaSuccess. + * + * The ::cudaPointerAttributes structure is defined as: + * \code + struct cudaPointerAttributes { + enum cudaMemoryType type; + int device; + void *devicePointer; + void *hostPointer; + } + \endcode + * In this structure, the individual fields mean + * + * - \ref ::cudaPointerAttributes::type identifies type of memory. It can be + * ::cudaMemoryTypeUnregistered for unregistered host memory, + * ::cudaMemoryTypeHost for registered host memory, ::cudaMemoryTypeDevice for device + * memory or ::cudaMemoryTypeManaged for managed memory. + * + * - \ref ::cudaPointerAttributes::device "device" is the device against which + * \p ptr was allocated. If \p ptr has memory type ::cudaMemoryTypeDevice + * then this identifies the device on which the memory referred to by \p ptr + * physically resides. If \p ptr has memory type ::cudaMemoryTypeHost then this + * identifies the device which was current when the allocation was made + * (and if that device is deinitialized then this allocation will vanish + * with that device's state). + * + * - \ref ::cudaPointerAttributes::devicePointer "devicePointer" is + * the device pointer alias through which the memory referred to by \p ptr + * may be accessed on the current device. + * If the memory referred to by \p ptr cannot be accessed directly by the + * current device then this is NULL. + * + * - \ref ::cudaPointerAttributes::hostPointer "hostPointer" is + * the host pointer alias through which the memory referred to by \p ptr + * may be accessed on the host. + * If the memory referred to by \p ptr cannot be accessed directly by the + * host then this is NULL. + * + * \param attributes - Attributes for the specified pointer + * \param ptr - Pointer to get attributes for + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * + * \sa ::cudaGetDeviceCount, ::cudaGetDevice, ::cudaSetDevice, + * ::cudaChooseDevice, + * ::cudaInitDevice, + * ::cuPointerGetAttributes + */ +extern __host__ cudaError_t CUDARTAPI cudaPointerGetAttributes(struct cudaPointerAttributes *attributes, const void *ptr); + +/** @} */ /* END CUDART_UNIFIED */ + +/** + * \defgroup CUDART_PEER Peer Device Memory Access + * + * ___MANBRIEF___ peer device memory access functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the peer device memory access functions of the CUDA runtime + * application programming interface. + * + * @{ + */ + +/** + * \brief Queries if a device may directly access a peer device's memory. + * + * Returns in \p *canAccessPeer a value of 1 if device \p device is capable of + * directly accessing memory from \p peerDevice and 0 otherwise. If direct + * access of \p peerDevice from \p device is possible, then access may be + * enabled by calling ::cudaDeviceEnablePeerAccess(). + * + * \param canAccessPeer - Returned access capability + * \param device - Device from which allocations on \p peerDevice are to + * be directly accessed. + * \param peerDevice - Device on which the allocations to be directly accessed + * by \p device reside. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceEnablePeerAccess, + * ::cudaDeviceDisablePeerAccess, + * ::cuDeviceCanAccessPeer + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceCanAccessPeer(int *canAccessPeer, int device, int peerDevice); + +/** + * \brief Enables direct access to memory allocations on a peer device. + * + * On success, all allocations from \p peerDevice will immediately be accessible by + * the current device. They will remain accessible until access is explicitly + * disabled using ::cudaDeviceDisablePeerAccess() or either device is reset using + * ::cudaDeviceReset(). + * + * Note that access granted by this call is unidirectional and that in order to access + * memory on the current device from \p peerDevice, a separate symmetric call + * to ::cudaDeviceEnablePeerAccess() is required. + * + * Note that there are both device-wide and system-wide limitations per system + * configuration, as noted in the CUDA Programming Guide under the section + * "Peer-to-Peer Memory Access". + * + * Returns ::cudaErrorInvalidDevice if ::cudaDeviceCanAccessPeer() indicates + * that the current device cannot directly access memory from \p peerDevice. + * + * Returns ::cudaErrorPeerAccessAlreadyEnabled if direct access of + * \p peerDevice from the current device has already been enabled. + * + * Returns ::cudaErrorInvalidValue if \p flags is not 0. + * + * \param peerDevice - Peer device to enable direct access to from the current device + * \param flags - Reserved for future use and must be set to 0 + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorPeerAccessAlreadyEnabled, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceCanAccessPeer, + * ::cudaDeviceDisablePeerAccess, + * ::cuCtxEnablePeerAccess + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceEnablePeerAccess(int peerDevice, unsigned int flags); + +/** + * \brief Disables direct access to memory allocations on a peer device. + * + * Returns ::cudaErrorPeerAccessNotEnabled if direct access to memory on + * \p peerDevice has not yet been enabled from the current device. + * + * \param peerDevice - Peer device to disable direct access to + * + * \return + * ::cudaSuccess, + * ::cudaErrorPeerAccessNotEnabled, + * ::cudaErrorInvalidDevice + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceCanAccessPeer, + * ::cudaDeviceEnablePeerAccess, + * ::cuCtxDisablePeerAccess + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceDisablePeerAccess(int peerDevice); + +/** @} */ /* END CUDART_PEER */ + +/** \defgroup CUDART_OPENGL OpenGL Interoperability */ + +/** \defgroup CUDART_OPENGL_DEPRECATED OpenGL Interoperability [DEPRECATED] */ + +/** \defgroup CUDART_D3D9 Direct3D 9 Interoperability */ + +/** \defgroup CUDART_D3D9_DEPRECATED Direct3D 9 Interoperability [DEPRECATED] */ + +/** \defgroup CUDART_D3D10 Direct3D 10 Interoperability */ + +/** \defgroup CUDART_D3D10_DEPRECATED Direct3D 10 Interoperability [DEPRECATED] */ + +/** \defgroup CUDART_D3D11 Direct3D 11 Interoperability */ + +/** \defgroup CUDART_D3D11_DEPRECATED Direct3D 11 Interoperability [DEPRECATED] */ + +/** \defgroup CUDART_VDPAU VDPAU Interoperability */ + +/** \defgroup CUDART_EGL EGL Interoperability */ + +/** + * \defgroup CUDART_INTEROP Graphics Interoperability + * + * ___MANBRIEF___ graphics interoperability functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the graphics interoperability functions of the CUDA + * runtime application programming interface. + * + * @{ + */ + +/** + * \brief Unregisters a graphics resource for access by CUDA + * + * Unregisters the graphics resource \p resource so it is not accessible by + * CUDA unless registered again. + * + * If \p resource is invalid then ::cudaErrorInvalidResourceHandle is + * returned. + * + * \param resource - Resource to unregister + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorUnknown + * \notefnerr + * \note_init_rt + * \note_callback + * \note_destroy_ub + * + * \sa + * ::cudaGraphicsD3D9RegisterResource, + * ::cudaGraphicsD3D10RegisterResource, + * ::cudaGraphicsD3D11RegisterResource, + * ::cudaGraphicsGLRegisterBuffer, + * ::cudaGraphicsGLRegisterImage, + * ::cuGraphicsUnregisterResource + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphicsUnregisterResource(cudaGraphicsResource_t resource); + +/** + * \brief Set usage flags for mapping a graphics resource + * + * Set \p flags for mapping the graphics resource \p resource. + * + * Changes to \p flags will take effect the next time \p resource is mapped. + * The \p flags argument may be any of the following: + * - ::cudaGraphicsMapFlagsNone: Specifies no hints about how \p resource will + * be used. It is therefore assumed that CUDA may read from or write to \p resource. + * - ::cudaGraphicsMapFlagsReadOnly: Specifies that CUDA will not write to \p resource. + * - ::cudaGraphicsMapFlagsWriteDiscard: Specifies CUDA will not read from \p resource and will + * write over the entire contents of \p resource, so none of the data + * previously stored in \p resource will be preserved. + * + * If \p resource is presently mapped for access by CUDA then ::cudaErrorUnknown is returned. + * If \p flags is not one of the above values then ::cudaErrorInvalidValue is returned. + * + * \param resource - Registered resource to set flags for + * \param flags - Parameters for resource mapping + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphicsMapResources, + * ::cuGraphicsResourceSetMapFlags + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphicsResourceSetMapFlags(cudaGraphicsResource_t resource, unsigned int flags); + +/** + * \brief Map graphics resources for access by CUDA + * + * Maps the \p count graphics resources in \p resources for access by CUDA. + * + * The resources in \p resources may be accessed by CUDA until they + * are unmapped. The graphics API from which \p resources were registered + * should not access any resources while they are mapped by CUDA. If an + * application does so, the results are undefined. + * + * This function provides the synchronization guarantee that any graphics calls + * issued before ::cudaGraphicsMapResources() will complete before any subsequent CUDA + * work issued in \p stream begins. + * + * If \p resources contains any duplicate entries then ::cudaErrorInvalidResourceHandle + * is returned. If any of \p resources are presently mapped for access by + * CUDA then ::cudaErrorUnknown is returned. + * + * \param count - Number of resources to map + * \param resources - Resources to map for CUDA + * \param stream - Stream for synchronization + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorUnknown + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphicsResourceGetMappedPointer, + * ::cudaGraphicsSubResourceGetMappedArray, + * ::cudaGraphicsUnmapResources, + * ::cuGraphicsMapResources + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphicsMapResources(int count, cudaGraphicsResource_t *resources, cudaStream_t stream __dv(0)); + +/** + * \brief Unmap graphics resources. + * + * Unmaps the \p count graphics resources in \p resources. + * + * Once unmapped, the resources in \p resources may not be accessed by CUDA + * until they are mapped again. + * + * This function provides the synchronization guarantee that any CUDA work issued + * in \p stream before ::cudaGraphicsUnmapResources() will complete before any + * subsequently issued graphics work begins. + * + * If \p resources contains any duplicate entries then ::cudaErrorInvalidResourceHandle + * is returned. If any of \p resources are not presently mapped for access by + * CUDA then ::cudaErrorUnknown is returned. + * + * \param count - Number of resources to unmap + * \param resources - Resources to unmap + * \param stream - Stream for synchronization + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorUnknown + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphicsMapResources, + * ::cuGraphicsUnmapResources + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphicsUnmapResources(int count, cudaGraphicsResource_t *resources, cudaStream_t stream __dv(0)); + +/** + * \brief Get an device pointer through which to access a mapped graphics resource. + * + * Returns in \p *devPtr a pointer through which the mapped graphics resource + * \p resource may be accessed. + * Returns in \p *size the size of the memory in bytes which may be accessed from that pointer. + * The value set in \p devPtr may change every time that \p resource is mapped. + * + * If \p resource is not a buffer then it cannot be accessed via a pointer and + * ::cudaErrorUnknown is returned. + * If \p resource is not mapped then ::cudaErrorUnknown is returned. + * * + * \param devPtr - Returned pointer through which \p resource may be accessed + * \param size - Returned size of the buffer accessible starting at \p *devPtr + * \param resource - Mapped resource to access + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorUnknown + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphicsMapResources, + * ::cudaGraphicsSubResourceGetMappedArray, + * ::cuGraphicsResourceGetMappedPointer + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphicsResourceGetMappedPointer(void **devPtr, size_t *size, cudaGraphicsResource_t resource); + +/** + * \brief Get an array through which to access a subresource of a mapped graphics resource. + * + * Returns in \p *array an array through which the subresource of the mapped + * graphics resource \p resource which corresponds to array index \p arrayIndex + * and mipmap level \p mipLevel may be accessed. The value set in \p array may + * change every time that \p resource is mapped. + * + * If \p resource is not a texture then it cannot be accessed via an array and + * ::cudaErrorUnknown is returned. + * If \p arrayIndex is not a valid array index for \p resource then + * ::cudaErrorInvalidValue is returned. + * If \p mipLevel is not a valid mipmap level for \p resource then + * ::cudaErrorInvalidValue is returned. + * If \p resource is not mapped then ::cudaErrorUnknown is returned. + * + * \param array - Returned array through which a subresource of \p resource may be accessed + * \param resource - Mapped resource to access + * \param arrayIndex - Array index for array textures or cubemap face + * index as defined by ::cudaGraphicsCubeFace for + * cubemap textures for the subresource to access + * \param mipLevel - Mipmap level for the subresource to access + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorUnknown + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphicsResourceGetMappedPointer, + * ::cuGraphicsSubResourceGetMappedArray + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphicsSubResourceGetMappedArray(cudaArray_t *array, cudaGraphicsResource_t resource, unsigned int arrayIndex, unsigned int mipLevel); + +/** + * \brief Get a mipmapped array through which to access a mapped graphics resource. + * + * Returns in \p *mipmappedArray a mipmapped array through which the mapped + * graphics resource \p resource may be accessed. The value set in \p mipmappedArray may + * change every time that \p resource is mapped. + * + * If \p resource is not a texture then it cannot be accessed via an array and + * ::cudaErrorUnknown is returned. + * If \p resource is not mapped then ::cudaErrorUnknown is returned. + * + * \param mipmappedArray - Returned mipmapped array through which \p resource may be accessed + * \param resource - Mapped resource to access + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorUnknown + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphicsResourceGetMappedPointer, + * ::cuGraphicsResourceGetMappedMipmappedArray + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphicsResourceGetMappedMipmappedArray(cudaMipmappedArray_t *mipmappedArray, cudaGraphicsResource_t resource); + +/** @} */ /* END CUDART_INTEROP */ + +/** + * \defgroup CUDART_TEXTURE_OBJECT Texture Object Management + * + * ___MANBRIEF___ texture object management functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the low level texture object management functions + * of the CUDA runtime application programming interface. The texture + * object API is only supported on devices of compute capability 3.0 or higher. + * + * @{ + */ + +/** + * \brief Get the channel descriptor of an array + * + * Returns in \p *desc the channel descriptor of the CUDA array \p array. + * + * \param desc - Channel format + * \param array - Memory array on device + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa \ref ::cudaCreateChannelDesc(int, int, int, int, cudaChannelFormatKind) "cudaCreateChannelDesc (C API)", + * ::cudaCreateTextureObject, ::cudaCreateSurfaceObject + */ +extern __host__ cudaError_t CUDARTAPI cudaGetChannelDesc(struct cudaChannelFormatDesc *desc, cudaArray_const_t array); + +/** + * \brief Returns a channel descriptor using the specified format + * + * Returns a channel descriptor with format \p f and number of bits of each + * component \p x, \p y, \p z, and \p w. The ::cudaChannelFormatDesc is + * defined as: + * \code + struct cudaChannelFormatDesc { + int x, y, z, w; + enum cudaChannelFormatKind f; + }; + * \endcode + * + * where ::cudaChannelFormatKind is one of ::cudaChannelFormatKindSigned, + * ::cudaChannelFormatKindUnsigned, or ::cudaChannelFormatKindFloat. + * + * \param x - X component + * \param y - Y component + * \param z - Z component + * \param w - W component + * \param f - Channel format + * + * \return + * Channel descriptor with format \p f + * + * \sa \ref ::cudaCreateChannelDesc(void) "cudaCreateChannelDesc (C++ API)", + * ::cudaGetChannelDesc, ::cudaCreateTextureObject, ::cudaCreateSurfaceObject + */ +extern __host__ struct cudaChannelFormatDesc CUDARTAPI cudaCreateChannelDesc(int x, int y, int z, int w, enum cudaChannelFormatKind f); + +/** + * \brief Creates a texture object + * + * Creates a texture object and returns it in \p pTexObject. \p pResDesc describes + * the data to texture from. \p pTexDesc describes how the data should be sampled. + * \p pResViewDesc is an optional argument that specifies an alternate format for + * the data described by \p pResDesc, and also describes the subresource region + * to restrict access to when texturing. \p pResViewDesc can only be specified if + * the type of resource is a CUDA array or a CUDA mipmapped array. + * + * Texture objects are only supported on devices of compute capability 3.0 or higher. + * Additionally, a texture object is an opaque value, and, as such, should only be + * accessed through CUDA API calls. + * + * The ::cudaResourceDesc structure is defined as: + * \code + struct cudaResourceDesc { + enum cudaResourceType resType; + + union { + struct { + cudaArray_t array; + } array; + struct { + cudaMipmappedArray_t mipmap; + } mipmap; + struct { + void *devPtr; + struct cudaChannelFormatDesc desc; + size_t sizeInBytes; + } linear; + struct { + void *devPtr; + struct cudaChannelFormatDesc desc; + size_t width; + size_t height; + size_t pitchInBytes; + } pitch2D; + } res; + }; + * \endcode + * where: + * - ::cudaResourceDesc::resType specifies the type of resource to texture from. + * CUresourceType is defined as: + * \code + enum cudaResourceType { + cudaResourceTypeArray = 0x00, + cudaResourceTypeMipmappedArray = 0x01, + cudaResourceTypeLinear = 0x02, + cudaResourceTypePitch2D = 0x03 + }; + * \endcode + * + * \par + * If ::cudaResourceDesc::resType is set to ::cudaResourceTypeArray, ::cudaResourceDesc::res::array::array + * must be set to a valid CUDA array handle. + * + * \par + * If ::cudaResourceDesc::resType is set to ::cudaResourceTypeMipmappedArray, ::cudaResourceDesc::res::mipmap::mipmap + * must be set to a valid CUDA mipmapped array handle and ::cudaTextureDesc::normalizedCoords must be set to true. + * + * \par + * If ::cudaResourceDesc::resType is set to ::cudaResourceTypeLinear, ::cudaResourceDesc::res::linear::devPtr + * must be set to a valid device pointer, that is aligned to ::cudaDeviceProp::textureAlignment. + * ::cudaResourceDesc::res::linear::desc describes the format and the number of components per array element. ::cudaResourceDesc::res::linear::sizeInBytes + * specifies the size of the array in bytes. The total number of elements in the linear address range cannot exceed + * ::cudaDeviceProp::maxTexture1DLinear. The number of elements is computed as (sizeInBytes / sizeof(desc)). + * + * \par + * If ::cudaResourceDesc::resType is set to ::cudaResourceTypePitch2D, ::cudaResourceDesc::res::pitch2D::devPtr + * must be set to a valid device pointer, that is aligned to ::cudaDeviceProp::textureAlignment. + * ::cudaResourceDesc::res::pitch2D::desc describes the format and the number of components per array element. ::cudaResourceDesc::res::pitch2D::width + * and ::cudaResourceDesc::res::pitch2D::height specify the width and height of the array in elements, and cannot exceed + * ::cudaDeviceProp::maxTexture2DLinear[0] and ::cudaDeviceProp::maxTexture2DLinear[1] respectively. + * ::cudaResourceDesc::res::pitch2D::pitchInBytes specifies the pitch between two rows in bytes and has to be aligned to + * ::cudaDeviceProp::texturePitchAlignment. Pitch cannot exceed ::cudaDeviceProp::maxTexture2DLinear[2]. + * + * + * The ::cudaTextureDesc struct is defined as + * \code + struct cudaTextureDesc { + enum cudaTextureAddressMode addressMode[3]; + enum cudaTextureFilterMode filterMode; + enum cudaTextureReadMode readMode; + int sRGB; + float borderColor[4]; + int normalizedCoords; + unsigned int maxAnisotropy; + enum cudaTextureFilterMode mipmapFilterMode; + float mipmapLevelBias; + float minMipmapLevelClamp; + float maxMipmapLevelClamp; + int disableTrilinearOptimization; + int seamlessCubemap; + }; + * \endcode + * where + * - ::cudaTextureDesc::addressMode specifies the addressing mode for each dimension of the texture data. ::cudaTextureAddressMode is defined as: + * \code + enum cudaTextureAddressMode { + cudaAddressModeWrap = 0, + cudaAddressModeClamp = 1, + cudaAddressModeMirror = 2, + cudaAddressModeBorder = 3 + }; + * \endcode + * This is ignored if ::cudaResourceDesc::resType is ::cudaResourceTypeLinear. Also, if ::cudaTextureDesc::normalizedCoords + * is set to zero, ::cudaAddressModeWrap and ::cudaAddressModeMirror won't be supported and will be switched to ::cudaAddressModeClamp. + * + * - ::cudaTextureDesc::filterMode specifies the filtering mode to be used when fetching from the texture. ::cudaTextureFilterMode is defined as: + * \code + enum cudaTextureFilterMode { + cudaFilterModePoint = 0, + cudaFilterModeLinear = 1 + }; + * \endcode + * This is ignored if ::cudaResourceDesc::resType is ::cudaResourceTypeLinear. + * + * - ::cudaTextureDesc::readMode specifies whether integer data should be converted to floating point or not. ::cudaTextureReadMode is defined as: + * \code + enum cudaTextureReadMode { + cudaReadModeElementType = 0, + cudaReadModeNormalizedFloat = 1 + }; + * \endcode + * Note that this applies only to 8-bit and 16-bit integer formats. 32-bit integer format would not be promoted, regardless of + * whether or not this ::cudaTextureDesc::readMode is set ::cudaReadModeNormalizedFloat is specified. + * + * - ::cudaTextureDesc::sRGB specifies whether sRGB to linear conversion should be performed during texture fetch. + * + * - ::cudaTextureDesc::borderColor specifies the float values of color. where: + * ::cudaTextureDesc::borderColor[0] contains value of 'R', + * ::cudaTextureDesc::borderColor[1] contains value of 'G', + * ::cudaTextureDesc::borderColor[2] contains value of 'B', + * ::cudaTextureDesc::borderColor[3] contains value of 'A' + * Note that application using integer border color values will need to these values to float. + * The values are set only when the addressing mode specified by ::cudaTextureDesc::addressMode is cudaAddressModeBorder. + * + * - ::cudaTextureDesc::normalizedCoords specifies whether the texture coordinates will be normalized or not. + * + * - ::cudaTextureDesc::maxAnisotropy specifies the maximum anistropy ratio to be used when doing anisotropic filtering. This value will be + * clamped to the range [1,16]. + * + * - ::cudaTextureDesc::mipmapFilterMode specifies the filter mode when the calculated mipmap level lies between two defined mipmap levels. + * + * - ::cudaTextureDesc::mipmapLevelBias specifies the offset to be applied to the calculated mipmap level. + * + * - ::cudaTextureDesc::minMipmapLevelClamp specifies the lower end of the mipmap level range to clamp access to. + * + * - ::cudaTextureDesc::maxMipmapLevelClamp specifies the upper end of the mipmap level range to clamp access to. + * + * - ::cudaTextureDesc::disableTrilinearOptimization specifies whether the trilinear filtering optimizations will be disabled. + * + * - ::cudaTextureDesc::seamlessCubemap specifies whether seamless cube map filtering is enabled. This flag can only be specified if the + * underlying resource is a CUDA array or a CUDA mipmapped array that was created with the flag ::cudaArrayCubemap. + * When seamless cube map filtering is enabled, texture address modes specified by ::cudaTextureDesc::addressMode are ignored. + * Instead, if the ::cudaTextureDesc::filterMode is set to ::cudaFilterModePoint the address mode ::cudaAddressModeClamp will be applied for all dimensions. + * If the ::cudaTextureDesc::filterMode is set to ::cudaFilterModeLinear seamless cube map filtering will be performed when sampling along the cube face borders. + * + * The ::cudaResourceViewDesc struct is defined as + * \code + struct cudaResourceViewDesc { + enum cudaResourceViewFormat format; + size_t width; + size_t height; + size_t depth; + unsigned int firstMipmapLevel; + unsigned int lastMipmapLevel; + unsigned int firstLayer; + unsigned int lastLayer; + }; + * \endcode + * where: + * - ::cudaResourceViewDesc::format specifies how the data contained in the CUDA array or CUDA mipmapped array should + * be interpreted. Note that this can incur a change in size of the texture data. If the resource view format is a block + * compressed format, then the underlying CUDA array or CUDA mipmapped array has to have a 32-bit unsigned integer format + * with 2 or 4 channels, depending on the block compressed format. For ex., BC1 and BC4 require the underlying CUDA array to have + * a 32-bit unsigned int with 2 channels. The other BC formats require the underlying resource to have the same 32-bit unsigned int + * format but with 4 channels. + * + * - ::cudaResourceViewDesc::width specifies the new width of the texture data. If the resource view format is a block + * compressed format, this value has to be 4 times the original width of the resource. For non block compressed formats, + * this value has to be equal to that of the original resource. + * + * - ::cudaResourceViewDesc::height specifies the new height of the texture data. If the resource view format is a block + * compressed format, this value has to be 4 times the original height of the resource. For non block compressed formats, + * this value has to be equal to that of the original resource. + * + * - ::cudaResourceViewDesc::depth specifies the new depth of the texture data. This value has to be equal to that of the + * original resource. + * + * - ::cudaResourceViewDesc::firstMipmapLevel specifies the most detailed mipmap level. This will be the new mipmap level zero. + * For non-mipmapped resources, this value has to be zero.::cudaTextureDesc::minMipmapLevelClamp and ::cudaTextureDesc::maxMipmapLevelClamp + * will be relative to this value. For ex., if the firstMipmapLevel is set to 2, and a minMipmapLevelClamp of 1.2 is specified, + * then the actual minimum mipmap level clamp will be 3.2. + * + * - ::cudaResourceViewDesc::lastMipmapLevel specifies the least detailed mipmap level. For non-mipmapped resources, this value + * has to be zero. + * + * - ::cudaResourceViewDesc::firstLayer specifies the first layer index for layered textures. This will be the new layer zero. + * For non-layered resources, this value has to be zero. + * + * - ::cudaResourceViewDesc::lastLayer specifies the last layer index for layered textures. For non-layered resources, + * this value has to be zero. + * + * + * \param pTexObject - Texture object to create + * \param pResDesc - Resource descriptor + * \param pTexDesc - Texture descriptor + * \param pResViewDesc - Resource view descriptor + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaDestroyTextureObject, + * ::cuTexObjectCreate + */ + +extern __host__ cudaError_t CUDARTAPI cudaCreateTextureObject(cudaTextureObject_t *pTexObject, const struct cudaResourceDesc *pResDesc, const struct cudaTextureDesc *pTexDesc, const struct cudaResourceViewDesc *pResViewDesc); + +/** + * \brief Destroys a texture object + * + * Destroys the texture object specified by \p texObject. + * + * \param texObject - Texture object to destroy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * \note_destroy_ub + * + * \sa + * ::cudaCreateTextureObject, + * ::cuTexObjectDestroy + */ +extern __host__ cudaError_t CUDARTAPI cudaDestroyTextureObject(cudaTextureObject_t texObject); + +/** + * \brief Returns a texture object's resource descriptor + * + * Returns the resource descriptor for the texture object specified by \p texObject. + * + * \param pResDesc - Resource descriptor + * \param texObject - Texture object + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaCreateTextureObject, + * ::cuTexObjectGetResourceDesc + */ +extern __host__ cudaError_t CUDARTAPI cudaGetTextureObjectResourceDesc(struct cudaResourceDesc *pResDesc, cudaTextureObject_t texObject); + +/** + * \brief Returns a texture object's texture descriptor + * + * Returns the texture descriptor for the texture object specified by \p texObject. + * + * \param pTexDesc - Texture descriptor + * \param texObject - Texture object + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaCreateTextureObject, + * ::cuTexObjectGetTextureDesc + */ +extern __host__ cudaError_t CUDARTAPI cudaGetTextureObjectTextureDesc(struct cudaTextureDesc *pTexDesc, cudaTextureObject_t texObject); + +/** + * \brief Returns a texture object's resource view descriptor + * + * Returns the resource view descriptor for the texture object specified by \p texObject. + * If no resource view was specified, ::cudaErrorInvalidValue is returned. + * + * \param pResViewDesc - Resource view descriptor + * \param texObject - Texture object + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaCreateTextureObject, + * ::cuTexObjectGetResourceViewDesc + */ +extern __host__ cudaError_t CUDARTAPI cudaGetTextureObjectResourceViewDesc(struct cudaResourceViewDesc *pResViewDesc, cudaTextureObject_t texObject); + +/** @} */ /* END CUDART_TEXTURE_OBJECT */ + +/** + * \defgroup CUDART_SURFACE_OBJECT Surface Object Management + * + * ___MANBRIEF___ surface object management functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the low level texture object management functions + * of the CUDA runtime application programming interface. The surface object + * API is only supported on devices of compute capability 3.0 or higher. + * + * @{ + */ + +/** + * \brief Creates a surface object + * + * Creates a surface object and returns it in \p pSurfObject. \p pResDesc describes + * the data to perform surface load/stores on. ::cudaResourceDesc::resType must be + * ::cudaResourceTypeArray and ::cudaResourceDesc::res::array::array + * must be set to a valid CUDA array handle. + * + * Surface objects are only supported on devices of compute capability 3.0 or higher. + * Additionally, a surface object is an opaque value, and, as such, should only be + * accessed through CUDA API calls. + * + * \param pSurfObject - Surface object to create + * \param pResDesc - Resource descriptor + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidChannelDescriptor, + * ::cudaErrorInvalidResourceHandle + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaDestroySurfaceObject, + * ::cuSurfObjectCreate + */ + +extern __host__ cudaError_t CUDARTAPI cudaCreateSurfaceObject(cudaSurfaceObject_t *pSurfObject, const struct cudaResourceDesc *pResDesc); + +/** + * \brief Destroys a surface object + * + * Destroys the surface object specified by \p surfObject. + * + * \param surfObject - Surface object to destroy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * \note_destroy_ub + * + * \sa + * ::cudaCreateSurfaceObject, + * ::cuSurfObjectDestroy + */ +extern __host__ cudaError_t CUDARTAPI cudaDestroySurfaceObject(cudaSurfaceObject_t surfObject); + +/** + * \brief Returns a surface object's resource descriptor + * Returns the resource descriptor for the surface object specified by \p surfObject. + * + * \param pResDesc - Resource descriptor + * \param surfObject - Surface object + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaCreateSurfaceObject, + * ::cuSurfObjectGetResourceDesc + */ +extern __host__ cudaError_t CUDARTAPI cudaGetSurfaceObjectResourceDesc(struct cudaResourceDesc *pResDesc, cudaSurfaceObject_t surfObject); + +/** @} */ /* END CUDART_SURFACE_OBJECT */ + +/** + * \defgroup CUDART__VERSION Version Management + * + * @{ + */ + +/** + * \brief Returns the latest version of CUDA supported by the driver + * + * Returns in \p *driverVersion the latest version of CUDA supported by + * the driver. The version is returned as (1000 × major + 10 × minor). + * For example, CUDA 9.2 would be represented by 9020. If no driver is installed, + * then 0 is returned as the driver version. + * + * This function automatically returns ::cudaErrorInvalidValue + * if \p driverVersion is NULL. + * + * \param driverVersion - Returns the CUDA driver version. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaRuntimeGetVersion, + * ::cuDriverGetVersion + */ +extern __host__ cudaError_t CUDARTAPI cudaDriverGetVersion(int *driverVersion); + +/** + * \brief Returns the CUDA Runtime version + * + * Returns in \p *runtimeVersion the version number of the current CUDA + * Runtime instance. The version is returned as + * (1000 × major + 10 × minor). For example, + * CUDA 9.2 would be represented by 9020. + * + * As of CUDA 12.0, this function no longer initializes CUDA. The purpose + * of this API is solely to return a compile-time constant stating the + * CUDA Toolkit version in the above format. + * + * This function automatically returns ::cudaErrorInvalidValue if + * the \p runtimeVersion argument is NULL. + * + * \param runtimeVersion - Returns the CUDA Runtime version. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaDriverGetVersion, + * ::cuDriverGetVersion + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaRuntimeGetVersion(int *runtimeVersion); + +/** @} */ /* END CUDART__VERSION */ + +/** + * \defgroup CUDART_GRAPH Graph Management + * + * ___MANBRIEF___ graph management functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the graph management functions of CUDA + * runtime application programming interface. + * + * @{ + */ + +/** + * \brief Creates a graph + * + * Creates an empty graph, which is returned via \p pGraph. + * + * \param pGraph - Returns newly created graph + * \param flags - Graph creation flags, must be 0 + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemsetNode, + * ::cudaGraphInstantiate, + * ::cudaGraphDestroy, + * ::cudaGraphGetNodes, + * ::cudaGraphGetRootNodes, + * ::cudaGraphGetEdges, + * ::cudaGraphClone + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphCreate(cudaGraph_t *pGraph, unsigned int flags); + +/** + * \brief Creates a kernel execution node and adds it to a graph + * + * Creates a new kernel execution node and adds it to \p graph with \p numDependencies + * dependencies specified via \p pDependencies and arguments specified in \p pNodeParams. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p pGraphNode. + * + * The cudaKernelNodeParams structure is defined as: + * + * \code + * struct cudaKernelNodeParams + * { + * void* func; + * dim3 gridDim; + * dim3 blockDim; + * unsigned int sharedMemBytes; + * void **kernelParams; + * void **extra; + * }; + * \endcode + * + * When the graph is launched, the node will invoke kernel \p func on a (\p gridDim.x x + * \p gridDim.y x \p gridDim.z) grid of blocks. Each block contains + * (\p blockDim.x x \p blockDim.y x \p blockDim.z) threads. + * + * \p sharedMem sets the amount of dynamic shared memory that will be + * available to each thread block. + * + * Kernel parameters to \p func can be specified in one of two ways: + * + * 1) Kernel parameters can be specified via \p kernelParams. If the kernel has N + * parameters, then \p kernelParams needs to be an array of N pointers. Each pointer, + * from \p kernelParams[0] to \p kernelParams[N-1], points to the region of memory from which the actual + * parameter will be copied. The number of kernel parameters and their offsets and sizes do not need + * to be specified as that information is retrieved directly from the kernel's image. + * + * 2) Kernel parameters can also be packaged by the application into a single buffer that is passed in + * via \p extra. This places the burden on the application of knowing each kernel + * parameter's size and alignment/padding within the buffer. The \p extra parameter exists + * to allow this function to take additional less commonly used arguments. \p extra specifies + * a list of names of extra settings and their corresponding values. Each extra setting name is + * immediately followed by the corresponding value. The list must be terminated with either NULL or + * CU_LAUNCH_PARAM_END. + * + * - ::CU_LAUNCH_PARAM_END, which indicates the end of the \p extra + * array; + * - ::CU_LAUNCH_PARAM_BUFFER_POINTER, which specifies that the next + * value in \p extra will be a pointer to a buffer + * containing all the kernel parameters for launching kernel + * \p func; + * - ::CU_LAUNCH_PARAM_BUFFER_SIZE, which specifies that the next + * value in \p extra will be a pointer to a size_t + * containing the size of the buffer specified with + * ::CU_LAUNCH_PARAM_BUFFER_POINTER; + * + * The error ::cudaErrorInvalidValue will be returned if kernel parameters are specified with both + * \p kernelParams and \p extra (i.e. both \p kernelParams and + * \p extra are non-NULL). + * + * The \p kernelParams or \p extra array, as well as the argument values it points to, + * are copied during this call. + * + * \note Kernels launched using graphs must not use texture and surface references. Reading or + * writing through any texture or surface reference is undefined behavior. + * This restriction does not apply to texture and surface objects. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param pNodeParams - Parameters for the GPU execution node + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidDeviceFunction + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaLaunchKernel, + * ::cudaGraphKernelNodeGetParams, + * ::cudaGraphKernelNodeSetParams, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemsetNode + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphAddKernelNode(cudaGraphNode_t *pGraphNode, cudaGraph_t graph, const cudaGraphNode_t *pDependencies, size_t numDependencies, const struct cudaKernelNodeParams *pNodeParams); + +/** + * \brief Returns a kernel node's parameters + * + * Returns the parameters of kernel node \p node in \p pNodeParams. + * The \p kernelParams or \p extra array returned in \p pNodeParams, + * as well as the argument values it points to, are owned by the node. + * This memory remains valid until the node is destroyed or its + * parameters are modified, and should not be modified + * directly. Use ::cudaGraphKernelNodeSetParams to update the + * parameters of this node. + * + * The params will contain either \p kernelParams or \p extra, + * according to which of these was most recently set on the node. + * + * \param node - Node to get the parameters for + * \param pNodeParams - Pointer to return the parameters + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidDeviceFunction + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaLaunchKernel, + * ::cudaGraphAddKernelNode, + * ::cudaGraphKernelNodeSetParams + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphKernelNodeGetParams(cudaGraphNode_t node, struct cudaKernelNodeParams *pNodeParams); + +/** + * \brief Sets a kernel node's parameters + * + * Sets the parameters of kernel node \p node to \p pNodeParams. + * + * \param node - Node to set the parameters for + * \param pNodeParams - Parameters to copy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorMemoryAllocation + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaLaunchKernel, + * ::cudaGraphAddKernelNode, + * ::cudaGraphKernelNodeGetParams + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphKernelNodeSetParams(cudaGraphNode_t node, const struct cudaKernelNodeParams *pNodeParams); + +/** + * \brief Copies attributes from source node to destination node. + * + * Copies attributes from source node \p src to destination node \p dst. + * Both node must have the same context. + * + * \param[out] dst Destination node + * \param[in] src Source node + * For list of attributes see ::cudaKernelNodeAttrID + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidContext + * \notefnerr + * + * \sa + * ::cudaAccessPolicyWindow + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphKernelNodeCopyAttributes( + cudaGraphNode_t hSrc, + cudaGraphNode_t hDst); + +/** + * \brief Queries node attribute. + * + * Queries attribute \p attr from node \p hNode and stores it in corresponding + * member of \p value_out. + * + * \param[in] hNode + * \param[in] attr + * \param[out] value_out + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * + * \sa + * ::cudaAccessPolicyWindow + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphKernelNodeGetAttribute( + cudaGraphNode_t hNode, + cudaKernelNodeAttrID attr, + cudaKernelNodeAttrValue *value_out); + +/** + * \brief Sets node attribute. + * + * Sets attribute \p attr on node \p hNode from corresponding attribute of + * \p value. + * + * \param[out] hNode + * \param[in] attr + * \param[out] value + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * + * \sa + * ::cudaAccessPolicyWindow + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphKernelNodeSetAttribute( + cudaGraphNode_t hNode, + cudaKernelNodeAttrID attr, + const cudaKernelNodeAttrValue *value); + +/** + * \brief Creates a memcpy node and adds it to a graph + * + * Creates a new memcpy node and adds it to \p graph with \p numDependencies + * dependencies specified via \p pDependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p pGraphNode. + * + * When the graph is launched, the node will perform the memcpy described by \p pCopyParams. + * See ::cudaMemcpy3D() for a description of the structure and its restrictions. + * + * Memcpy nodes have some additional restrictions with regards to managed memory, if the + * system contains at least one device which has a zero value for the device attribute + * ::cudaDevAttrConcurrentManagedAccess. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param pCopyParams - Parameters for the memory copy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpy3D, + * ::cudaGraphAddMemcpyNodeToSymbol, + * ::cudaGraphAddMemcpyNodeFromSymbol, + * ::cudaGraphAddMemcpyNode1D, + * ::cudaGraphMemcpyNodeGetParams, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemsetNode + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphAddMemcpyNode(cudaGraphNode_t *pGraphNode, cudaGraph_t graph, const cudaGraphNode_t *pDependencies, size_t numDependencies, const struct cudaMemcpy3DParms *pCopyParams); + +/** + * \brief Creates a memcpy node to copy to a symbol on the device and adds it to a graph + * + * Creates a new memcpy node to copy to \p symbol and adds it to \p graph with + * \p numDependencies dependencies specified via \p pDependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p pGraphNode. + * + * When the graph is launched, the node will copy \p count bytes from the memory area + * pointed to by \p src to the memory area pointed to by \p offset bytes from the start + * of symbol \p symbol. The memory areas may not overlap. \p symbol is a variable that + * resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of + * transfer is inferred from the pointer values. However, ::cudaMemcpyDefault + * is only allowed on systems that support unified virtual addressing. + * + * Memcpy nodes have some additional restrictions with regards to managed memory, if the + * system contains at least one device which has a zero value for the device attribute + * ::cudaDevAttrConcurrentManagedAccess. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param symbol - Device symbol address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpyToSymbol, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemcpyNodeFromSymbol, + * ::cudaGraphMemcpyNodeGetParams, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsToSymbol, + * ::cudaGraphMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemsetNode + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphAddMemcpyNodeToSymbol( + cudaGraphNode_t *pGraphNode, + cudaGraph_t graph, + const cudaGraphNode_t *pDependencies, + size_t numDependencies, + const void* symbol, + const void* src, + size_t count, + size_t offset, + enum cudaMemcpyKind kind); +#endif + +/** + * \brief Creates a memcpy node to copy from a symbol on the device and adds it to a graph + * + * Creates a new memcpy node to copy from \p symbol and adds it to \p graph with + * \p numDependencies dependencies specified via \p pDependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p pGraphNode. + * + * When the graph is launched, the node will copy \p count bytes from the memory area + * pointed to by \p offset bytes from the start of symbol \p symbol to the memory area + * pointed to by \p dst. The memory areas may not overlap. \p symbol is a variable + * that resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyDeviceToHost, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of transfer + * is inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * Memcpy nodes have some additional restrictions with regards to managed memory, if the + * system contains at least one device which has a zero value for the device attribute + * ::cudaDevAttrConcurrentManagedAccess. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param dst - Destination memory address + * \param symbol - Device symbol address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpyFromSymbol, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemcpyNodeToSymbol, + * ::cudaGraphMemcpyNodeGetParams, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphMemcpyNodeSetParamsToSymbol, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemsetNode + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphAddMemcpyNodeFromSymbol( + cudaGraphNode_t* pGraphNode, + cudaGraph_t graph, + const cudaGraphNode_t* pDependencies, + size_t numDependencies, + void* dst, + const void* symbol, + size_t count, + size_t offset, + enum cudaMemcpyKind kind); +#endif + +/** + * \brief Creates a 1D memcpy node and adds it to a graph + * + * Creates a new 1D memcpy node and adds it to \p graph with \p numDependencies + * dependencies specified via \p pDependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p pGraphNode. + * + * When the graph is launched, the node will copy \p count bytes from the memory + * area pointed to by \p src to the memory area pointed to by \p dst, where + * \p kind specifies the direction of the copy, and must be one of + * ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. Launching a + * memcpy node with dst and src pointers that do not match the direction of + * the copy results in an undefined behavior. + * + * Memcpy nodes have some additional restrictions with regards to managed memory, if the + * system contains at least one device which has a zero value for the device attribute + * ::cudaDevAttrConcurrentManagedAccess. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param dst - Destination memory address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpy, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphMemcpyNodeGetParams, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParams1D, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemsetNode + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphAddMemcpyNode1D( + cudaGraphNode_t *pGraphNode, + cudaGraph_t graph, + const cudaGraphNode_t *pDependencies, + size_t numDependencies, + void* dst, + const void* src, + size_t count, + enum cudaMemcpyKind kind); +#endif + +/** + * \brief Returns a memcpy node's parameters + * + * Returns the parameters of memcpy node \p node in \p pNodeParams. + * + * \param node - Node to get the parameters for + * \param pNodeParams - Pointer to return the parameters + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpy3D, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphMemcpyNodeSetParams + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphMemcpyNodeGetParams(cudaGraphNode_t node, struct cudaMemcpy3DParms *pNodeParams); + +/** + * \brief Sets a memcpy node's parameters + * + * Sets the parameters of memcpy node \p node to \p pNodeParams. + * + * \param node - Node to set the parameters for + * \param pNodeParams - Parameters to copy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpy3D, + * ::cudaGraphMemcpyNodeSetParamsToSymbol, + * ::cudaGraphMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphMemcpyNodeSetParams1D, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphMemcpyNodeGetParams + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphMemcpyNodeSetParams(cudaGraphNode_t node, const struct cudaMemcpy3DParms *pNodeParams); + +/** + * \brief Sets a memcpy node's parameters to copy to a symbol on the device + * + * Sets the parameters of memcpy node \p node to the copy described by the provided parameters. + * + * When the graph is launched, the node will copy \p count bytes from the memory area + * pointed to by \p src to the memory area pointed to by \p offset bytes from the start + * of symbol \p symbol. The memory areas may not overlap. \p symbol is a variable that + * resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of + * transfer is inferred from the pointer values. However, ::cudaMemcpyDefault + * is only allowed on systems that support unified virtual addressing. + * + * \param node - Node to set the parameters for + * \param symbol - Device symbol address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpyToSymbol, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphMemcpyNodeGetParams + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphMemcpyNodeSetParamsToSymbol( + cudaGraphNode_t node, + const void* symbol, + const void* src, + size_t count, + size_t offset, + enum cudaMemcpyKind kind); +#endif + +/** + * \brief Sets a memcpy node's parameters to copy from a symbol on the device + * + * Sets the parameters of memcpy node \p node to the copy described by the provided parameters. + * + * When the graph is launched, the node will copy \p count bytes from the memory area + * pointed to by \p offset bytes from the start of symbol \p symbol to the memory area + * pointed to by \p dst. The memory areas may not overlap. \p symbol is a variable + * that resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyDeviceToHost, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of transfer + * is inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * \param node - Node to set the parameters for + * \param dst - Destination memory address + * \param symbol - Device symbol address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpyFromSymbol, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsToSymbol, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphMemcpyNodeGetParams + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphMemcpyNodeSetParamsFromSymbol( + cudaGraphNode_t node, + void* dst, + const void* symbol, + size_t count, + size_t offset, + enum cudaMemcpyKind kind); +#endif + +/** + * \brief Sets a memcpy node's parameters to perform a 1-dimensional copy + * + * Sets the parameters of memcpy node \p node to the copy described by the provided parameters. + * + * When the graph is launched, the node will copy \p count bytes from the memory + * area pointed to by \p src to the memory area pointed to by \p dst, where + * \p kind specifies the direction of the copy, and must be one of + * ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. Launching a + * memcpy node with dst and src pointers that do not match the direction of + * the copy results in an undefined behavior. + * + * \param node - Node to set the parameters for + * \param dst - Destination memory address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpy, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphMemcpyNodeGetParams + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphMemcpyNodeSetParams1D( + cudaGraphNode_t node, + void* dst, + const void* src, + size_t count, + enum cudaMemcpyKind kind); +#endif + +/** + * \brief Creates a memset node and adds it to a graph + * + * Creates a new memset node and adds it to \p graph with \p numDependencies + * dependencies specified via \p pDependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p pGraphNode. + * + * The element size must be 1, 2, or 4 bytes. + * When the graph is launched, the node will perform the memset described by \p pMemsetParams. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param pMemsetParams - Parameters for the memory set + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidDevice + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemset2D, + * ::cudaGraphMemsetNodeGetParams, + * ::cudaGraphMemsetNodeSetParams, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemcpyNode + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphAddMemsetNode(cudaGraphNode_t *pGraphNode, cudaGraph_t graph, const cudaGraphNode_t *pDependencies, size_t numDependencies, const struct cudaMemsetParams *pMemsetParams); + +/** + * \brief Returns a memset node's parameters + * + * Returns the parameters of memset node \p node in \p pNodeParams. + * + * \param node - Node to get the parameters for + * \param pNodeParams - Pointer to return the parameters + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemset2D, + * ::cudaGraphAddMemsetNode, + * ::cudaGraphMemsetNodeSetParams + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphMemsetNodeGetParams(cudaGraphNode_t node, struct cudaMemsetParams *pNodeParams); + +/** + * \brief Sets a memset node's parameters + * + * Sets the parameters of memset node \p node to \p pNodeParams. + * + * \param node - Node to set the parameters for + * \param pNodeParams - Parameters to copy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemset2D, + * ::cudaGraphAddMemsetNode, + * ::cudaGraphMemsetNodeGetParams + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphMemsetNodeSetParams(cudaGraphNode_t node, const struct cudaMemsetParams *pNodeParams); + +/** + * \brief Creates a host execution node and adds it to a graph + * + * Creates a new CPU execution node and adds it to \p graph with \p numDependencies + * dependencies specified via \p pDependencies and arguments specified in \p pNodeParams. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p pGraphNode. + * + * When the graph is launched, the node will invoke the specified CPU function. + * Host nodes are not supported under MPS with pre-Volta GPUs. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param pNodeParams - Parameters for the host node + * + * \return + * ::cudaSuccess, + * ::cudaErrorNotSupported, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaLaunchHostFunc, + * ::cudaGraphHostNodeGetParams, + * ::cudaGraphHostNodeSetParams, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemsetNode + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphAddHostNode(cudaGraphNode_t *pGraphNode, cudaGraph_t graph, const cudaGraphNode_t *pDependencies, size_t numDependencies, const struct cudaHostNodeParams *pNodeParams); + +/** + * \brief Returns a host node's parameters + * + * Returns the parameters of host node \p node in \p pNodeParams. + * + * \param node - Node to get the parameters for + * \param pNodeParams - Pointer to return the parameters + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaLaunchHostFunc, + * ::cudaGraphAddHostNode, + * ::cudaGraphHostNodeSetParams + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphHostNodeGetParams(cudaGraphNode_t node, struct cudaHostNodeParams *pNodeParams); + +/** + * \brief Sets a host node's parameters + * + * Sets the parameters of host node \p node to \p nodeParams. + * + * \param node - Node to set the parameters for + * \param pNodeParams - Parameters to copy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaLaunchHostFunc, + * ::cudaGraphAddHostNode, + * ::cudaGraphHostNodeGetParams + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphHostNodeSetParams(cudaGraphNode_t node, const struct cudaHostNodeParams *pNodeParams); + +/** + * \brief Creates a child graph node and adds it to a graph + * + * Creates a new node which executes an embedded graph, and adds it to \p graph with + * \p numDependencies dependencies specified via \p pDependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p pGraphNode. + * + * If \p hGraph contains allocation or free nodes, this call will return an error. + * + * The node executes an embedded child graph. The child graph is cloned in this call. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param childGraph - The graph to clone into this node + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphChildGraphNodeGetGraph, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemsetNode, + * ::cudaGraphClone + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphAddChildGraphNode(cudaGraphNode_t *pGraphNode, cudaGraph_t graph, const cudaGraphNode_t *pDependencies, size_t numDependencies, cudaGraph_t childGraph); + +/** + * \brief Gets a handle to the embedded graph of a child graph node + * + * Gets a handle to the embedded graph in a child graph node. This call + * does not clone the graph. Changes to the graph will be reflected in + * the node, and the node retains ownership of the graph. + * + * Allocation and free nodes cannot be added to the returned graph. + * Attempting to do so will return an error. + * + * \param node - Node to get the embedded graph for + * \param pGraph - Location to store a handle to the graph + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphNodeFindInClone + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphChildGraphNodeGetGraph(cudaGraphNode_t node, cudaGraph_t *pGraph); + +/** + * \brief Creates an empty node and adds it to a graph + * + * Creates a new node which performs no operation, and adds it to \p graph with + * \p numDependencies dependencies specified via \p pDependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p pGraphNode. + * + * An empty node performs no operation during execution, but can be used for + * transitive ordering. For example, a phased execution graph with 2 groups of n + * nodes with a barrier between them can be represented using an empty node and + * 2*n dependency edges, rather than no empty node and n^2 dependency edges. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemsetNode + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphAddEmptyNode(cudaGraphNode_t *pGraphNode, cudaGraph_t graph, const cudaGraphNode_t *pDependencies, size_t numDependencies); + +/** + * \brief Creates an event record node and adds it to a graph + * + * Creates a new event record node and adds it to \p hGraph with \p numDependencies + * dependencies specified via \p dependencies and event specified in \p event. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p phGraphNode. + * + * Each launch of the graph will record \p event to capture execution of the + * node's dependencies. + * + * These nodes may not be used in loops or conditionals. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param event - Event for the node + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddEventWaitNode, + * ::cudaEventRecordWithFlags, + * ::cudaStreamWaitEvent, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemsetNode + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphAddEventRecordNode(cudaGraphNode_t *pGraphNode, cudaGraph_t graph, const cudaGraphNode_t *pDependencies, size_t numDependencies, cudaEvent_t event); +#endif + +/** + * \brief Returns the event associated with an event record node + * + * Returns the event of event record node \p hNode in \p event_out. + * + * \param hNode - Node to get the event for + * \param event_out - Pointer to return the event + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddEventRecordNode, + * ::cudaGraphEventRecordNodeSetEvent, + * ::cudaGraphEventWaitNodeGetEvent, + * ::cudaEventRecordWithFlags, + * ::cudaStreamWaitEvent + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphEventRecordNodeGetEvent(cudaGraphNode_t node, cudaEvent_t *event_out); +#endif + +/** + * \brief Sets an event record node's event + * + * Sets the event of event record node \p hNode to \p event. + * + * \param hNode - Node to set the event for + * \param event - Event to use + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddEventRecordNode, + * ::cudaGraphEventRecordNodeGetEvent, + * ::cudaGraphEventWaitNodeSetEvent, + * ::cudaEventRecordWithFlags, + * ::cudaStreamWaitEvent + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphEventRecordNodeSetEvent(cudaGraphNode_t node, cudaEvent_t event); +#endif + +/** + * \brief Creates an event wait node and adds it to a graph + * + * Creates a new event wait node and adds it to \p hGraph with \p numDependencies + * dependencies specified via \p dependencies and event specified in \p event. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p phGraphNode. + * + * The graph node will wait for all work captured in \p event. See ::cuEventRecord() + * for details on what is captured by an event. The synchronization will be performed + * efficiently on the device when applicable. \p event may be from a different context + * or device than the launch stream. + * + * These nodes may not be used in loops or conditionals. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param event - Event for the node + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddEventRecordNode, + * ::cudaEventRecordWithFlags, + * ::cudaStreamWaitEvent, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemsetNode + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphAddEventWaitNode(cudaGraphNode_t *pGraphNode, cudaGraph_t graph, const cudaGraphNode_t *pDependencies, size_t numDependencies, cudaEvent_t event); +#endif + +/** + * \brief Returns the event associated with an event wait node + * + * Returns the event of event wait node \p hNode in \p event_out. + * + * \param hNode - Node to get the event for + * \param event_out - Pointer to return the event + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddEventWaitNode, + * ::cudaGraphEventWaitNodeSetEvent, + * ::cudaGraphEventRecordNodeGetEvent, + * ::cudaEventRecordWithFlags, + * ::cudaStreamWaitEvent + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphEventWaitNodeGetEvent(cudaGraphNode_t node, cudaEvent_t *event_out); +#endif + +/** + * \brief Sets an event wait node's event + * + * Sets the event of event wait node \p hNode to \p event. + * + * \param hNode - Node to set the event for + * \param event - Event to use + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddEventWaitNode, + * ::cudaGraphEventWaitNodeGetEvent, + * ::cudaGraphEventRecordNodeSetEvent, + * ::cudaEventRecordWithFlags, + * ::cudaStreamWaitEvent + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphEventWaitNodeSetEvent(cudaGraphNode_t node, cudaEvent_t event); +#endif + +/** + * \brief Creates an external semaphore signal node and adds it to a graph + * + * Creates a new external semaphore signal node and adds it to \p graph with \p + * numDependencies dependencies specified via \p dependencies and arguments specified + * in \p nodeParams. It is possible for \p numDependencies to be 0, in which case the + * node will be placed at the root of the graph. \p dependencies may not have any + * duplicate entries. A handle to the new node will be returned in \p pGraphNode. + * + * Performs a signal operation on a set of externally allocated semaphore objects + * when the node is launched. The operation(s) will occur after all of the node's + * dependencies have completed. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param nodeParams - Parameters for the node + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphExternalSemaphoresSignalNodeGetParams, + * ::cudaGraphExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphAddExternalSemaphoresWaitNode, + * ::cudaImportExternalSemaphore, + * ::cudaSignalExternalSemaphoresAsync, + * ::cudaWaitExternalSemaphoresAsync, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddEventRecordNode, + * ::cudaGraphAddEventWaitNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemsetNode + */ +#if __CUDART_API_VERSION >= 11020 +extern __host__ cudaError_t CUDARTAPI cudaGraphAddExternalSemaphoresSignalNode(cudaGraphNode_t *pGraphNode, cudaGraph_t graph, const cudaGraphNode_t *pDependencies, size_t numDependencies, const struct cudaExternalSemaphoreSignalNodeParams *nodeParams); +#endif + +/** + * \brief Returns an external semaphore signal node's parameters + * + * Returns the parameters of an external semaphore signal node \p hNode in \p params_out. + * The \p extSemArray and \p paramsArray returned in \p params_out, + * are owned by the node. This memory remains valid until the node is destroyed or its + * parameters are modified, and should not be modified + * directly. Use ::cudaGraphExternalSemaphoresSignalNodeSetParams to update the + * parameters of this node. + * + * \param hNode - Node to get the parameters for + * \param params_out - Pointer to return the parameters + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaLaunchKernel, + * ::cudaGraphAddExternalSemaphoresSignalNode, + * ::cudaGraphExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphAddExternalSemaphoresWaitNode, + * ::cudaSignalExternalSemaphoresAsync, + * ::cudaWaitExternalSemaphoresAsync + */ +#if __CUDART_API_VERSION >= 11020 +extern __host__ cudaError_t CUDARTAPI cudaGraphExternalSemaphoresSignalNodeGetParams(cudaGraphNode_t hNode, struct cudaExternalSemaphoreSignalNodeParams *params_out); +#endif + +/** + * \brief Sets an external semaphore signal node's parameters + * + * Sets the parameters of an external semaphore signal node \p hNode to \p nodeParams. + * + * \param hNode - Node to set the parameters for + * \param nodeParams - Parameters to copy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddExternalSemaphoresSignalNode, + * ::cudaGraphExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphAddExternalSemaphoresWaitNode, + * ::cudaSignalExternalSemaphoresAsync, + * ::cudaWaitExternalSemaphoresAsync + */ +#if __CUDART_API_VERSION >= 11020 +extern __host__ cudaError_t CUDARTAPI cudaGraphExternalSemaphoresSignalNodeSetParams(cudaGraphNode_t hNode, const struct cudaExternalSemaphoreSignalNodeParams *nodeParams); +#endif + +/** + * \brief Creates an external semaphore wait node and adds it to a graph + * + * Creates a new external semaphore wait node and adds it to \p graph with \p numDependencies + * dependencies specified via \p dependencies and arguments specified in \p nodeParams. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. A handle + * to the new node will be returned in \p pGraphNode. + * + * Performs a wait operation on a set of externally allocated semaphore objects + * when the node is launched. The node's dependencies will not be launched until + * the wait operation has completed. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param nodeParams - Parameters for the node + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphExternalSemaphoresWaitNodeGetParams, + * ::cudaGraphExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphAddExternalSemaphoresSignalNode, + * ::cudaImportExternalSemaphore, + * ::cudaSignalExternalSemaphoresAsync, + * ::cudaWaitExternalSemaphoresAsync, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddEventRecordNode, + * ::cudaGraphAddEventWaitNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemsetNode + */ +#if __CUDART_API_VERSION >= 11020 +extern __host__ cudaError_t CUDARTAPI cudaGraphAddExternalSemaphoresWaitNode(cudaGraphNode_t *pGraphNode, cudaGraph_t graph, const cudaGraphNode_t *pDependencies, size_t numDependencies, const struct cudaExternalSemaphoreWaitNodeParams *nodeParams); +#endif + +/** + * \brief Returns an external semaphore wait node's parameters + * + * Returns the parameters of an external semaphore wait node \p hNode in \p params_out. + * The \p extSemArray and \p paramsArray returned in \p params_out, + * are owned by the node. This memory remains valid until the node is destroyed or its + * parameters are modified, and should not be modified + * directly. Use ::cudaGraphExternalSemaphoresSignalNodeSetParams to update the + * parameters of this node. + * + * \param hNode - Node to get the parameters for + * \param params_out - Pointer to return the parameters + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaLaunchKernel, + * ::cudaGraphAddExternalSemaphoresWaitNode, + * ::cudaGraphExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphAddExternalSemaphoresWaitNode, + * ::cudaSignalExternalSemaphoresAsync, + * ::cudaWaitExternalSemaphoresAsync + */ +#if __CUDART_API_VERSION >= 11020 +extern __host__ cudaError_t CUDARTAPI cudaGraphExternalSemaphoresWaitNodeGetParams(cudaGraphNode_t hNode, struct cudaExternalSemaphoreWaitNodeParams *params_out); +#endif + +/** + * \brief Sets an external semaphore wait node's parameters + * + * Sets the parameters of an external semaphore wait node \p hNode to \p nodeParams. + * + * \param hNode - Node to set the parameters for + * \param nodeParams - Parameters to copy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddExternalSemaphoresWaitNode, + * ::cudaGraphExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphAddExternalSemaphoresWaitNode, + * ::cudaSignalExternalSemaphoresAsync, + * ::cudaWaitExternalSemaphoresAsync + */ +#if __CUDART_API_VERSION >= 11020 +extern __host__ cudaError_t CUDARTAPI cudaGraphExternalSemaphoresWaitNodeSetParams(cudaGraphNode_t hNode, const struct cudaExternalSemaphoreWaitNodeParams *nodeParams); +#endif + +/** + * \brief Creates an allocation node and adds it to a graph + * + * Creates a new allocation node and adds it to \p graph with \p numDependencies + * dependencies specified via \p pDependencies and arguments specified in \p nodeParams. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. A handle + * to the new node will be returned in \p pGraphNode. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param nodeParams - Parameters for the node + * + * When ::cudaGraphAddMemAllocNode creates an allocation node, it returns the address of the allocation in + * \p nodeParams.dptr. The allocation's address remains fixed across instantiations and launches. + * + * If the allocation is freed in the same graph, by creating a free node using ::cudaGraphAddMemFreeNode, + * the allocation can be accessed by nodes ordered after the allocation node but before the free node. + * These allocations cannot be freed outside the owning graph, and they can only be freed once in the + * owning graph. + * + * If the allocation is not freed in the same graph, then it can be accessed not only by nodes in the + * graph which are ordered after the allocation node, but also by stream operations ordered after the + * graph's execution but before the allocation is freed. + * + * Allocations which are not freed in the same graph can be freed by: + * - passing the allocation to ::cudaMemFreeAsync or ::cudaMemFree; + * - launching a graph with a free node for that allocation; or + * - specifying ::cudaGraphInstantiateFlagAutoFreeOnLaunch during instantiation, which makes + * each launch behave as though it called ::cudaMemFreeAsync for every unfreed allocation. + * + * It is not possible to free an allocation in both the owning graph and another graph. If the allocation + * is freed in the same graph, a free node cannot be added to another graph. If the allocation is freed + * in another graph, a free node can no longer be added to the owning graph. + * + * The following restrictions apply to graphs which contain allocation and/or memory free nodes: + * - Nodes and edges of the graph cannot be deleted. + * - The graph cannot be used in a child node. + * - Only one instantiation of the graph may exist at any point in time. + * - The graph cannot be cloned. + * + * \return + * ::cudaSuccess, + * ::cudaErrorCudartUnloading, + * ::cudaErrorInitializationError, + * ::cudaErrorNotSupported, + * ::cudaErrorInvalidValue, + * ::cudaErrorOutOfMemory + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cudaGraphAddMemFreeNode, + * ::cudaGraphMemAllocNodeGetParams, + * ::cudaDeviceGraphMemTrim, + * ::cudaDeviceGetGraphMemAttribute, + * ::cudaDeviceSetGraphMemAttribute, + * ::cudaMallocAsync, + * ::cudaFreeAsync, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddEventRecordNode, + * ::cudaGraphAddEventWaitNode, + * ::cudaGraphAddExternalSemaphoresSignalNode, + * ::cudaGraphAddExternalSemaphoresWaitNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemsetNode + */ +#if __CUDART_API_VERSION >= 11040 +extern __host__ cudaError_t CUDARTAPI cudaGraphAddMemAllocNode(cudaGraphNode_t *pGraphNode, cudaGraph_t graph, const cudaGraphNode_t *pDependencies, size_t numDependencies, struct cudaMemAllocNodeParams *nodeParams); +#endif + +/** + * \brief Returns a memory alloc node's parameters + * + * Returns the parameters of a memory alloc node \p hNode in \p params_out. + * The \p poolProps and \p accessDescs returned in \p params_out, are owned by the + * node. This memory remains valid until the node is destroyed. The returned + * parameters must not be modified. + * + * \param node - Node to get the parameters for + * \param params_out - Pointer to return the parameters + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddMemAllocNode, + * ::cudaGraphMemFreeNodeGetParams + */ +#if __CUDART_API_VERSION >= 11040 +extern __host__ cudaError_t CUDARTAPI cudaGraphMemAllocNodeGetParams(cudaGraphNode_t node, struct cudaMemAllocNodeParams *params_out); +#endif + +/** + * \brief Creates a memory free node and adds it to a graph + * + * Creates a new memory free node and adds it to \p graph with \p numDependencies + * dependencies specified via \p pDependencies and address specified in \p dptr. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. A handle + * to the new node will be returned in \p pGraphNode. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param dptr - Address of memory to free + * + * ::cudaGraphAddMemFreeNode will return ::cudaErrorInvalidValue if the user attempts to free: + * - an allocation twice in the same graph. + * - an address that was not returned by an allocation node. + * - an invalid address. + * + * The following restrictions apply to graphs which contain allocation and/or memory free nodes: + * - Nodes and edges of the graph cannot be deleted. + * - The graph cannot be used in a child node. + * - Only one instantiation of the graph may exist at any point in time. + * - The graph cannot be cloned. + * + * \return + * ::cudaSuccess, + * ::cudaErrorCudartUnloading, + * ::cudaErrorInitializationError, + * ::cudaErrorNotSupported, + * ::cudaErrorInvalidValue, + * ::cudaErrorOutOfMemory + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cudaGraphAddMemAllocNode, + * ::cudaGraphMemFreeNodeGetParams, + * ::cudaDeviceGraphMemTrim, + * ::cudaDeviceGetGraphMemAttribute, + * ::cudaDeviceSetGraphMemAttribute, + * ::cudaMallocAsync, + * ::cudaFreeAsync, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddEventRecordNode, + * ::cudaGraphAddEventWaitNode, + * ::cudaGraphAddExternalSemaphoresSignalNode, + * ::cudaGraphAddExternalSemaphoresWaitNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemsetNode + */ +#if __CUDART_API_VERSION >= 11040 +extern __host__ cudaError_t CUDARTAPI cudaGraphAddMemFreeNode(cudaGraphNode_t *pGraphNode, cudaGraph_t graph, const cudaGraphNode_t *pDependencies, size_t numDependencies, void *dptr); +#endif + +/** + * \brief Returns a memory free node's parameters + * + * Returns the address of a memory free node \p hNode in \p dptr_out. + * + * \param node - Node to get the parameters for + * \param dptr_out - Pointer to return the device address + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddMemFreeNode, + * ::cudaGraphMemFreeNodeGetParams + */ +#if __CUDART_API_VERSION >= 11040 +extern __host__ cudaError_t CUDARTAPI cudaGraphMemFreeNodeGetParams(cudaGraphNode_t node, void *dptr_out); +#endif + +/** + * \brief Free unused memory that was cached on the specified device for use with graphs back to the OS. + * + * Blocks which are not in use by a graph that is either currently executing or scheduled to execute are + * freed back to the operating system. + * + * \param device - The device for which cached memory should be freed. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddMemAllocNode, + * ::cudaGraphAddMemFreeNode, + * ::cudaDeviceGetGraphMemAttribute, + * ::cudaDeviceSetGraphMemAttribute, + * ::cudaMallocAsync, + * ::cudaFreeAsync + */ +#if __CUDART_API_VERSION >= 11040 +extern __host__ cudaError_t CUDARTAPI cudaDeviceGraphMemTrim(int device); +#endif + +/** + * \brief Query asynchronous allocation attributes related to graphs + * + * Valid attributes are: + * + * - ::cudaGraphMemAttrUsedMemCurrent: Amount of memory, in bytes, currently associated with graphs + * - ::cudaGraphMemAttrUsedMemHigh: High watermark of memory, in bytes, associated with graphs since the + * last time it was reset. High watermark can only be reset to zero. + * - ::cudaGraphMemAttrReservedMemCurrent: Amount of memory, in bytes, currently allocated for use by + * the CUDA graphs asynchronous allocator. + * - ::cudaGraphMemAttrReservedMemHigh: High watermark of memory, in bytes, currently allocated for use by + * the CUDA graphs asynchronous allocator. + * + * \param device - Specifies the scope of the query + * \param attr - attribute to get + * \param value - retrieved value + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaDeviceSetGraphMemAttribute, + * ::cudaGraphAddMemAllocNode, + * ::cudaGraphAddMemFreeNode, + * ::cudaDeviceGraphMemTrim, + * ::cudaMallocAsync, + * ::cudaFreeAsync + */ +#if __CUDART_API_VERSION >= 11040 +extern __host__ cudaError_t CUDARTAPI cudaDeviceGetGraphMemAttribute(int device, enum cudaGraphMemAttributeType attr, void* value); +#endif + +/** + * \brief Set asynchronous allocation attributes related to graphs + * + * Valid attributes are: + * + * - ::cudaGraphMemAttrUsedMemHigh: High watermark of memory, in bytes, associated with graphs since the + * last time it was reset. High watermark can only be reset to zero. + * - ::cudaGraphMemAttrReservedMemHigh: High watermark of memory, in bytes, currently allocated for use by + * the CUDA graphs asynchronous allocator. + * + * \param device - Specifies the scope of the query + * \param attr - attribute to get + * \param value - pointer to value to set + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaDeviceGetGraphMemAttribute, + * ::cudaGraphAddMemAllocNode, + * ::cudaGraphAddMemFreeNode, + * ::cudaDeviceGraphMemTrim, + * ::cudaMallocAsync, + * ::cudaFreeAsync + */ +#if __CUDART_API_VERSION >= 11040 +extern __host__ cudaError_t CUDARTAPI cudaDeviceSetGraphMemAttribute(int device, enum cudaGraphMemAttributeType attr, void* value); +#endif + +/** + * \brief Clones a graph + * + * This function creates a copy of \p originalGraph and returns it in \p pGraphClone. + * All parameters are copied into the cloned graph. The original graph may be modified + * after this call without affecting the clone. + * + * Child graph nodes in the original graph are recursively copied into the clone. + * + * \param pGraphClone - Returns newly created cloned graph + * \param originalGraph - Graph to clone + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphCreate, + * ::cudaGraphNodeFindInClone + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphClone(cudaGraph_t *pGraphClone, cudaGraph_t originalGraph); + +/** + * \brief Finds a cloned version of a node + * + * This function returns the node in \p clonedGraph corresponding to \p originalNode + * in the original graph. + * + * \p clonedGraph must have been cloned from \p originalGraph via ::cudaGraphClone. + * \p originalNode must have been in \p originalGraph at the time of the call to + * ::cudaGraphClone, and the corresponding cloned node in \p clonedGraph must not have + * been removed. The cloned node is then returned via \p pClonedNode. + * + * \param pNode - Returns handle to the cloned node + * \param originalNode - Handle to the original node + * \param clonedGraph - Cloned graph to query + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphClone + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphNodeFindInClone(cudaGraphNode_t *pNode, cudaGraphNode_t originalNode, cudaGraph_t clonedGraph); + +/** + * \brief Returns a node's type + * + * Returns the node type of \p node in \p pType. + * + * \param node - Node to query + * \param pType - Pointer to return the node type + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphGetNodes, + * ::cudaGraphGetRootNodes, + * ::cudaGraphChildGraphNodeGetGraph, + * ::cudaGraphKernelNodeGetParams, + * ::cudaGraphKernelNodeSetParams, + * ::cudaGraphHostNodeGetParams, + * ::cudaGraphHostNodeSetParams, + * ::cudaGraphMemcpyNodeGetParams, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemsetNodeGetParams, + * ::cudaGraphMemsetNodeSetParams + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphNodeGetType(cudaGraphNode_t node, enum cudaGraphNodeType *pType); + +/** + * \brief Returns a graph's nodes + * + * Returns a list of \p graph's nodes. \p nodes may be NULL, in which case this + * function will return the number of nodes in \p numNodes. Otherwise, + * \p numNodes entries will be filled in. If \p numNodes is higher than the actual + * number of nodes, the remaining entries in \p nodes will be set to NULL, and the + * number of nodes actually obtained will be returned in \p numNodes. + * + * \param graph - Graph to query + * \param nodes - Pointer to return the nodes + * \param numNodes - See description + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphCreate, + * ::cudaGraphGetRootNodes, + * ::cudaGraphGetEdges, + * ::cudaGraphNodeGetType, + * ::cudaGraphNodeGetDependencies, + * ::cudaGraphNodeGetDependentNodes + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphGetNodes(cudaGraph_t graph, cudaGraphNode_t *nodes, size_t *numNodes); + +/** + * \brief Returns a graph's root nodes + * + * Returns a list of \p graph's root nodes. \p pRootNodes may be NULL, in which case this + * function will return the number of root nodes in \p pNumRootNodes. Otherwise, + * \p pNumRootNodes entries will be filled in. If \p pNumRootNodes is higher than the actual + * number of root nodes, the remaining entries in \p pRootNodes will be set to NULL, and the + * number of nodes actually obtained will be returned in \p pNumRootNodes. + * + * \param graph - Graph to query + * \param pRootNodes - Pointer to return the root nodes + * \param pNumRootNodes - See description + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphCreate, + * ::cudaGraphGetNodes, + * ::cudaGraphGetEdges, + * ::cudaGraphNodeGetType, + * ::cudaGraphNodeGetDependencies, + * ::cudaGraphNodeGetDependentNodes + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphGetRootNodes(cudaGraph_t graph, cudaGraphNode_t *pRootNodes, size_t *pNumRootNodes); + +/** + * \brief Returns a graph's dependency edges + * + * Returns a list of \p graph's dependency edges. Edges are returned via corresponding + * indices in \p from and \p to; that is, the node in \p to[i] has a dependency on the + * node in \p from[i]. \p from and \p to may both be NULL, in which + * case this function only returns the number of edges in \p numEdges. Otherwise, + * \p numEdges entries will be filled in. If \p numEdges is higher than the actual + * number of edges, the remaining entries in \p from and \p to will be set to NULL, and + * the number of edges actually returned will be written to \p numEdges. + * + * \param graph - Graph to get the edges from + * \param from - Location to return edge endpoints + * \param to - Location to return edge endpoints + * \param numEdges - See description + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphGetNodes, + * ::cudaGraphGetRootNodes, + * ::cudaGraphAddDependencies, + * ::cudaGraphRemoveDependencies, + * ::cudaGraphNodeGetDependencies, + * ::cudaGraphNodeGetDependentNodes + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphGetEdges(cudaGraph_t graph, cudaGraphNode_t *from, cudaGraphNode_t *to, size_t *numEdges); + +/** + * \brief Returns a node's dependencies + * + * Returns a list of \p node's dependencies. \p pDependencies may be NULL, in which case this + * function will return the number of dependencies in \p pNumDependencies. Otherwise, + * \p pNumDependencies entries will be filled in. If \p pNumDependencies is higher than the actual + * number of dependencies, the remaining entries in \p pDependencies will be set to NULL, and the + * number of nodes actually obtained will be returned in \p pNumDependencies. + * + * \param node - Node to query + * \param pDependencies - Pointer to return the dependencies + * \param pNumDependencies - See description + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphNodeGetDependentNodes, + * ::cudaGraphGetNodes, + * ::cudaGraphGetRootNodes, + * ::cudaGraphGetEdges, + * ::cudaGraphAddDependencies, + * ::cudaGraphRemoveDependencies + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphNodeGetDependencies(cudaGraphNode_t node, cudaGraphNode_t *pDependencies, size_t *pNumDependencies); + +/** + * \brief Returns a node's dependent nodes + * + * Returns a list of \p node's dependent nodes. \p pDependentNodes may be NULL, in which + * case this function will return the number of dependent nodes in \p pNumDependentNodes. + * Otherwise, \p pNumDependentNodes entries will be filled in. If \p pNumDependentNodes is + * higher than the actual number of dependent nodes, the remaining entries in + * \p pDependentNodes will be set to NULL, and the number of nodes actually obtained will + * be returned in \p pNumDependentNodes. + * + * \param node - Node to query + * \param pDependentNodes - Pointer to return the dependent nodes + * \param pNumDependentNodes - See description + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphNodeGetDependencies, + * ::cudaGraphGetNodes, + * ::cudaGraphGetRootNodes, + * ::cudaGraphGetEdges, + * ::cudaGraphAddDependencies, + * ::cudaGraphRemoveDependencies + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphNodeGetDependentNodes(cudaGraphNode_t node, cudaGraphNode_t *pDependentNodes, size_t *pNumDependentNodes); + +/** + * \brief Adds dependency edges to a graph. + * + * The number of dependencies to be added is defined by \p numDependencies + * Elements in \p pFrom and \p pTo at corresponding indices define a dependency. + * Each node in \p pFrom and \p pTo must belong to \p graph. + * + * If \p numDependencies is 0, elements in \p pFrom and \p pTo will be ignored. + * Specifying an existing dependency will return an error. + * + * \param graph - Graph to which dependencies are added + * \param from - Array of nodes that provide the dependencies + * \param to - Array of dependent nodes + * \param numDependencies - Number of dependencies to be added + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphRemoveDependencies, + * ::cudaGraphGetEdges, + * ::cudaGraphNodeGetDependencies, + * ::cudaGraphNodeGetDependentNodes + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphAddDependencies(cudaGraph_t graph, const cudaGraphNode_t *from, const cudaGraphNode_t *to, size_t numDependencies); + +/** + * \brief Removes dependency edges from a graph. + * + * The number of \p pDependencies to be removed is defined by \p numDependencies. + * Elements in \p pFrom and \p pTo at corresponding indices define a dependency. + * Each node in \p pFrom and \p pTo must belong to \p graph. + * + * If \p numDependencies is 0, elements in \p pFrom and \p pTo will be ignored. + * Specifying a non-existing dependency will return an error. + * + * \param graph - Graph from which to remove dependencies + * \param from - Array of nodes that provide the dependencies + * \param to - Array of dependent nodes + * \param numDependencies - Number of dependencies to be removed + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddDependencies, + * ::cudaGraphGetEdges, + * ::cudaGraphNodeGetDependencies, + * ::cudaGraphNodeGetDependentNodes + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphRemoveDependencies(cudaGraph_t graph, const cudaGraphNode_t *from, const cudaGraphNode_t *to, size_t numDependencies); + +/** + * \brief Remove a node from the graph + * + * Removes \p node from its graph. This operation also severs any dependencies of other nodes + * on \p node and vice versa. + * + * Dependencies cannot be removed from graphs which contain allocation or free nodes. + * Any attempt to do so will return an error. + * + * \param node - Node to remove + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * \note_destroy_ub + * + * \sa + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemsetNode + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphDestroyNode(cudaGraphNode_t node); + +/** + * \brief Creates an executable graph from a graph + * + * Instantiates \p graph as an executable graph. The graph is validated for any + * structural constraints or intra-node constraints which were not previously + * validated. If instantiation is successful, a handle to the instantiated graph + * is returned in \p pGraphExec. + * + * The \p flags parameter controls the behavior of instantiation and subsequent + * graph launches. Valid flags are: + * + * - ::cudaGraphInstantiateFlagAutoFreeOnLaunch, which configures a + * graph containing memory allocation nodes to automatically free any + * unfreed memory allocations before the graph is relaunched. + * + * - ::cudaGraphInstantiateFlagDeviceLaunch, which configures the graph for launch + * from the device. If this flag is passed, the executable graph handle returned can be + * used to launch the graph from both the host and device. This flag cannot be used in + * conjunction with ::cudaGraphInstantiateFlagAutoFreeOnLaunch. + * + * - ::cudaGraphInstantiateFlagUseNodePriority, which causes the graph + * to use the priorities from the per-node attributes rather than the priority + * of the launch stream during execution. Note that priorities are only available + * on kernel nodes, and are copied from stream priority during stream capture. + * + * If \p graph contains any allocation or free nodes, there can be at most one + * executable graph in existence for that graph at a time. An attempt to + * instantiate a second executable graph before destroying the first with + * ::cudaGraphExecDestroy will result in an error. + * + * Graphs instantiated for launch on the device have additional restrictions which do not + * apply to host graphs: + * + * - The graph's nodes must reside on a single device. + * + * - The graph can only contain kernel nodes. Furthermore, use of CUDA Dynamic Parallelism + * is not permitted. Cooperative launches are permitted as long as MPS is not in use. + * + * If \p graph is not instantiated for launch on the device but contains kernels which + * call device-side cudaGraphLaunch() from multiple devices, this will result in an error. + * + * \param pGraphExec - Returns instantiated graph + * \param graph - Graph to instantiate + * \param flags - Flags to control instantiation. See ::CUgraphInstantiate_flags. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphInstantiateWithFlags, + * ::cudaGraphCreate, + * ::cudaGraphUpload, + * ::cudaGraphLaunch, + * ::cudaGraphExecDestroy + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphInstantiate(cudaGraphExec_t *pGraphExec, cudaGraph_t graph, unsigned long long flags __dv(0)); + +/** + * \brief Creates an executable graph from a graph + * + * Instantiates \p graph as an executable graph. The graph is validated for any + * structural constraints or intra-node constraints which were not previously + * validated. If instantiation is successful, a handle to the instantiated graph + * is returned in \p pGraphExec. + * + * The \p flags parameter controls the behavior of instantiation and subsequent + * graph launches. Valid flags are: + * + * - ::cudaGraphInstantiateFlagAutoFreeOnLaunch, which configures a + * graph containing memory allocation nodes to automatically free any + * unfreed memory allocations before the graph is relaunched. + * + * - ::cudaGraphInstantiateFlagDeviceLaunch, which configures the graph for launch + * from the device. If this flag is passed, the executable graph handle returned can be + * used to launch the graph from both the host and device. This flag can only be used + * on platforms which support unified addressing. This flag cannot be used in + * conjunction with ::cudaGraphInstantiateFlagAutoFreeOnLaunch. + * + * - ::cudaGraphInstantiateFlagUseNodePriority, which causes the graph + * to use the priorities from the per-node attributes rather than the priority + * of the launch stream during execution. Note that priorities are only available + * on kernel nodes, and are copied from stream priority during stream capture. + * + * If \p graph contains any allocation or free nodes, there can be at most one + * executable graph in existence for that graph at a time. An attempt to + * instantiate a second executable graph before destroying the first with + * ::cudaGraphExecDestroy will result in an error. + * + * If \p graph contains kernels which call device-side cudaGraphLaunch() from multiple + * devices, this will result in an error. + * + * Graphs instantiated for launch on the device have additional restrictions which do not + * apply to host graphs: + * + * - The graph's nodes must reside on a single device. + * - The graph can only contain kernel nodes, memcpy nodes, memset nodes, and child graph nodes. + * Operation-specific restrictions are outlined below. + * - Kernel nodes: + * - Use of CUDA Dynamic Parallelism is not permitted. + * - Cooperative launches are permitted as long as MPS is not in use. + * - Memcpy nodes: + * - Only copies involving device memory and/or pinned device-mapped host memory are permitted. + * - Copies involving CUDA arrays are not permitted. + * - Both operands must be accessible from the current device, and the current device must + * match the device of other nodes in the graph. + * + * \param pGraphExec - Returns instantiated graph + * \param graph - Graph to instantiate + * \param flags - Flags to control instantiation. See ::CUgraphInstantiate_flags. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphInstantiate, + * ::cudaGraphCreate, + * ::cudaGraphUpload, + * ::cudaGraphLaunch, + * ::cudaGraphExecDestroy + */ +#if __CUDART_API_VERSION >= 11040 +extern __host__ cudaError_t CUDARTAPI cudaGraphInstantiateWithFlags(cudaGraphExec_t *pGraphExec, cudaGraph_t graph, unsigned long long flags __dv(0)); +#endif + +/** + * \brief Creates an executable graph from a graph + * + * Instantiates \p graph as an executable graph according to the \p instantiateParams structure. + * The graph is validated for any structural constraints or intra-node constraints + * which were not previously validated. If instantiation is successful, a handle to + * the instantiated graph is returned in \p pGraphExec. + * + * \p instantiateParams controls the behavior of instantiation and subsequent + * graph launches, as well as returning more detailed information in the event of an error. + * ::cudaGraphInstantiateParams is defined as: + * + * \code + typedef struct { + unsigned long long flags; + cudaStream_t uploadStream; + cudaGraphNode_t errNode_out; + cudaGraphInstantiateResult result_out; + } cudaGraphInstantiateParams; + * \endcode + * + * The \p flags field controls the behavior of instantiation and subsequent + * graph launches. Valid flags are: + * + * - ::cudaGraphInstantiateFlagAutoFreeOnLaunch, which configures a + * graph containing memory allocation nodes to automatically free any + * unfreed memory allocations before the graph is relaunched. + * + * - ::cudaGraphInstantiateFlagUpload, which will perform an upload of the graph + * into \p uploadStream once the graph has been instantiated. + * + * - ::cudaGraphInstantiateFlagDeviceLaunch, which configures the graph for launch + * from the device. If this flag is passed, the executable graph handle returned can be + * used to launch the graph from both the host and device. This flag can only be used + * on platforms which support unified addressing. This flag cannot be used in + * conjunction with ::cudaGraphInstantiateFlagAutoFreeOnLaunch. + * + * - ::cudaGraphInstantiateFlagUseNodePriority, which causes the graph + * to use the priorities from the per-node attributes rather than the priority + * of the launch stream during execution. Note that priorities are only available + * on kernel nodes, and are copied from stream priority during stream capture. + * + * If \p graph contains any allocation or free nodes, there can be at most one + * executable graph in existence for that graph at a time. An attempt to instantiate a + * second executable graph before destroying the first with ::cudaGraphExecDestroy will + * result in an error. + * + * If \p graph contains kernels which call device-side cudaGraphLaunch() from multiple + * devices, this will result in an error. + * + * Graphs instantiated for launch on the device have additional restrictions which do not + * apply to host graphs: + * + * - The graph's nodes must reside on a single device. + * - The graph can only contain kernel nodes, memcpy nodes, memset nodes, and child graph nodes. + * Operation-specific restrictions are outlined below. + * - Kernel nodes: + * - Use of CUDA Dynamic Parallelism is not permitted. + * - Cooperative launches are permitted as long as MPS is not in use. + * - Memcpy nodes: + * - Only copies involving device memory and/or pinned device-mapped host memory are permitted. + * - Copies involving CUDA arrays are not permitted. + * - Both operands must be accessible from the current device, and the current device must + * match the device of other nodes in the graph. + * + * In the event of an error, the \p result_out and \p errNode_out fields will contain more + * information about the nature of the error. Possible error reporting includes: + * + * - ::cudaGraphInstantiateError, if passed an invalid value or if an unexpected error occurred + * which is described by the return value of the function. \p errNode_out will be set to NULL. + * - ::cudaGraphInstantiateInvalidStructure, if the graph structure is invalid. \p errNode_out + * will be set to one of the offending nodes. + * - ::cudaGraphInstantiateNodeOperationNotSupported, if the graph is instantiated for device + * launch but contains a node of an unsupported node type, or a node which performs unsupported + * operations, such as use of CUDA dynamic parallelism within a kernel node. \p errNode_out will + * be set to this node. + * - ::cudaGraphInstantiateMultipleDevicesNotSupported, if the graph is instantiated for device + * launch but a node’s device differs from that of another node. This error can also be returned + * if a graph is not instantiated for device launch and it contains kernels which call device-side + * cudaGraphLaunch() from multiple devices. \p errNode_out will be set to this node. + * + * If instantiation is successful, \p result_out will be set to ::cudaGraphInstantiateSuccess, + * and \p hErrNode_out will be set to NULL. + * + * \param pGraphExec - Returns instantiated graph + * \param graph - Graph to instantiate + * \param instantiateParams - Instantiation parameters + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphCreate, + * ::cudaGraphInstantiate, + * ::cudaGraphInstantiateWithFlags, + * ::cudaGraphExecDestroy + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphInstantiateWithParams(cudaGraphExec_t *pGraphExec, cudaGraph_t graph, cudaGraphInstantiateParams *instantiateParams); + +/** + * \brief Query the instantiation flags of an executable graph + * + * Returns the flags that were passed to instantiation for the given executable graph. + * ::cudaGraphInstantiateFlagUpload will not be returned by this API as it does + * not affect the resulting executable graph. + * + * \param graphExec - The executable graph to query + * \param flags - Returns the instantiation flags + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphInstantiate, + * ::cudaGraphInstantiateWithFlags, + * ::cudaGraphInstantiateWithParams + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphExecGetFlags(cudaGraphExec_t graphExec, unsigned long long *flags); + +/** + * \brief Sets the parameters for a kernel node in the given graphExec + * + * Sets the parameters of a kernel node in an executable graph \p hGraphExec. + * The node is identified by the corresponding node \p node in the + * non-executable graph, from which the executable graph was instantiated. + * + * \p hNode must not have been removed from the original graph. All \p nodeParams + * fields may change, but the following restrictions apply to \p func updates: + * + * - The owning device of the function cannot change. + * - A node whose function originally did not use CUDA dynamic parallelism cannot be updated + * to a function which uses CDP + * - If \p hGraphExec was not instantiated for device launch, a node whose function originally + * did not use device-side cudaGraphLaunch() cannot be updated to a function which uses + * device-side cudaGraphLaunch() unless the node resides on the same device as nodes which + * contained such calls at instantiate-time. If no such calls were present at instantiation, + * these updates cannot be performed at all. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p node is also not modified by this call. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param node - kernel node from the graph from which graphExec was instantiated + * \param pNodeParams - Updated Parameters to set + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddKernelNode, + * ::cudaGraphKernelNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams, + * ::cudaGraphExecChildGraphNodeSetParams, + * ::cudaGraphExecEventRecordNodeSetEvent, + * ::cudaGraphExecEventWaitNodeSetEvent, + * ::cudaGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphExecKernelNodeSetParams(cudaGraphExec_t hGraphExec, cudaGraphNode_t node, const struct cudaKernelNodeParams *pNodeParams); + +/** + * \brief Sets the parameters for a memcpy node in the given graphExec. + * + * Updates the work represented by \p node in \p hGraphExec as though \p node had + * contained \p pNodeParams at instantiation. \p node must remain in the graph which was + * used to instantiate \p hGraphExec. Changed edges to and from \p node are ignored. + * + * The source and destination memory in \p pNodeParams must be allocated from the same + * contexts as the original source and destination memory. Both the instantiation-time + * memory operands and the memory operands in \p pNodeParams must be 1-dimensional. + * Zero-length operations are not supported. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. \p node is also + * not modified by this call. + * + * Returns ::cudaErrorInvalidValue if the memory operands' mappings changed or + * either the original or new memory operands are multidimensional. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param node - Memcpy node from the graph which was used to instantiate graphExec + * \param pNodeParams - Updated Parameters to set + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParamsToSymbol, + * ::cudaGraphExecMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphExecMemcpyNodeSetParams1D, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams, + * ::cudaGraphExecChildGraphNodeSetParams, + * ::cudaGraphExecEventRecordNodeSetEvent, + * ::cudaGraphExecEventWaitNodeSetEvent, + * ::cudaGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphExecMemcpyNodeSetParams(cudaGraphExec_t hGraphExec, cudaGraphNode_t node, const struct cudaMemcpy3DParms *pNodeParams); + +/** + * \brief Sets the parameters for a memcpy node in the given graphExec to copy to a symbol on the device + * + * Updates the work represented by \p node in \p hGraphExec as though \p node had + * contained the given params at instantiation. \p node must remain in the graph which was + * used to instantiate \p hGraphExec. Changed edges to and from \p node are ignored. + * + * \p src and \p symbol must be allocated from the same contexts as the original source and + * destination memory. The instantiation-time memory operands must be 1-dimensional. + * Zero-length operations are not supported. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. \p node is also + * not modified by this call. + * + * Returns ::cudaErrorInvalidValue if the memory operands' mappings changed or + * the original memory operands are multidimensional. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param node - Memcpy node from the graph which was used to instantiate graphExec + * \param symbol - Device symbol address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemcpyNodeToSymbol, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsToSymbol, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams, + * ::cudaGraphExecChildGraphNodeSetParams, + * ::cudaGraphExecEventRecordNodeSetEvent, + * ::cudaGraphExecEventWaitNodeSetEvent, + * ::cudaGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphExecMemcpyNodeSetParamsToSymbol( + cudaGraphExec_t hGraphExec, + cudaGraphNode_t node, + const void* symbol, + const void* src, + size_t count, + size_t offset, + enum cudaMemcpyKind kind); +#endif + +/** + * \brief Sets the parameters for a memcpy node in the given graphExec to copy from a symbol on the device + * + * Updates the work represented by \p node in \p hGraphExec as though \p node had + * contained the given params at instantiation. \p node must remain in the graph which was + * used to instantiate \p hGraphExec. Changed edges to and from \p node are ignored. + * + * \p symbol and \p dst must be allocated from the same contexts as the original source and + * destination memory. The instantiation-time memory operands must be 1-dimensional. + * Zero-length operations are not supported. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. \p node is also + * not modified by this call. + * + * Returns ::cudaErrorInvalidValue if the memory operands' mappings changed or + * the original memory operands are multidimensional. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param node - Memcpy node from the graph which was used to instantiate graphExec + * \param dst - Destination memory address + * \param symbol - Device symbol address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemcpyNodeFromSymbol, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParamsToSymbol, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams, + * ::cudaGraphExecChildGraphNodeSetParams, + * ::cudaGraphExecEventRecordNodeSetEvent, + * ::cudaGraphExecEventWaitNodeSetEvent, + * ::cudaGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphExecMemcpyNodeSetParamsFromSymbol( + cudaGraphExec_t hGraphExec, + cudaGraphNode_t node, + void* dst, + const void* symbol, + size_t count, + size_t offset, + enum cudaMemcpyKind kind); +#endif + +/** + * \brief Sets the parameters for a memcpy node in the given graphExec to perform a 1-dimensional copy + * + * Updates the work represented by \p node in \p hGraphExec as though \p node had + * contained the given params at instantiation. \p node must remain in the graph which was + * used to instantiate \p hGraphExec. Changed edges to and from \p node are ignored. + * + * \p src and \p dst must be allocated from the same contexts as the original source + * and destination memory. The instantiation-time memory operands must be 1-dimensional. + * Zero-length operations are not supported. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. \p node is also + * not modified by this call. + * + * Returns ::cudaErrorInvalidValue if the memory operands' mappings changed or + * the original memory operands are multidimensional. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param node - Memcpy node from the graph which was used to instantiate graphExec + * \param dst - Destination memory address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemcpyNode1D, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParams1D, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams, + * ::cudaGraphExecChildGraphNodeSetParams, + * ::cudaGraphExecEventRecordNodeSetEvent, + * ::cudaGraphExecEventWaitNodeSetEvent, + * ::cudaGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphExecMemcpyNodeSetParams1D( + cudaGraphExec_t hGraphExec, + cudaGraphNode_t node, + void* dst, + const void* src, + size_t count, + enum cudaMemcpyKind kind); +#endif + +/** + * \brief Sets the parameters for a memset node in the given graphExec. + * + * Updates the work represented by \p node in \p hGraphExec as though \p node had + * contained \p pNodeParams at instantiation. \p node must remain in the graph which was + * used to instantiate \p hGraphExec. Changed edges to and from \p node are ignored. + * + * The destination memory in \p pNodeParams must be allocated from the same + * context as the original destination memory. Both the instantiation-time + * memory operand and the memory operand in \p pNodeParams must be 1-dimensional. + * Zero-length operations are not supported. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. \p node is also + * not modified by this call. + * + * Returns cudaErrorInvalidValue if the memory operand's mappings changed or + * either the original or new memory operand are multidimensional. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param node - Memset node from the graph which was used to instantiate graphExec + * \param pNodeParams - Updated Parameters to set + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddMemsetNode, + * ::cudaGraphMemsetNodeSetParams, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecHostNodeSetParams, + * ::cudaGraphExecChildGraphNodeSetParams, + * ::cudaGraphExecEventRecordNodeSetEvent, + * ::cudaGraphExecEventWaitNodeSetEvent, + * ::cudaGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphExecMemsetNodeSetParams(cudaGraphExec_t hGraphExec, cudaGraphNode_t node, const struct cudaMemsetParams *pNodeParams); + +/** + * \brief Sets the parameters for a host node in the given graphExec. + * + * Updates the work represented by \p node in \p hGraphExec as though \p node had + * contained \p pNodeParams at instantiation. \p node must remain in the graph which was + * used to instantiate \p hGraphExec. Changed edges to and from \p node are ignored. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. \p node is also + * not modified by this call. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param node - Host node from the graph which was used to instantiate graphExec + * \param pNodeParams - Updated Parameters to set + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddHostNode, + * ::cudaGraphHostNodeSetParams, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecChildGraphNodeSetParams, + * ::cudaGraphExecEventRecordNodeSetEvent, + * ::cudaGraphExecEventWaitNodeSetEvent, + * ::cudaGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphExecHostNodeSetParams(cudaGraphExec_t hGraphExec, cudaGraphNode_t node, const struct cudaHostNodeParams *pNodeParams); + +/** + * \brief Updates node parameters in the child graph node in the given graphExec. + * + * Updates the work represented by \p node in \p hGraphExec as though the nodes contained + * in \p node's graph had the parameters contained in \p childGraph's nodes at instantiation. + * \p node must remain in the graph which was used to instantiate \p hGraphExec. + * Changed edges to and from \p node are ignored. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. \p node is also + * not modified by this call. + * + * The topology of \p childGraph, as well as the node insertion order, must match that + * of the graph contained in \p node. See ::cudaGraphExecUpdate() for a list of restrictions + * on what can be updated in an instantiated graph. The update is recursive, so child graph + * nodes contained within the top level child graph will also be updated. + + * \param hGraphExec - The executable graph in which to set the specified node + * \param node - Host node from the graph which was used to instantiate graphExec + * \param childGraph - The graph supplying the updated parameters + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphChildGraphNodeGetGraph, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams, + * ::cudaGraphExecEventRecordNodeSetEvent, + * ::cudaGraphExecEventWaitNodeSetEvent, + * ::cudaGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphExecChildGraphNodeSetParams(cudaGraphExec_t hGraphExec, cudaGraphNode_t node, cudaGraph_t childGraph); +#endif + +/** + * \brief Sets the event for an event record node in the given graphExec + * + * Sets the event of an event record node in an executable graph \p hGraphExec. + * The node is identified by the corresponding node \p hNode in the + * non-executable graph, from which the executable graph was instantiated. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p hNode is also not modified by this call. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - Event record node from the graph from which graphExec was instantiated + * \param event - Updated event to use + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddEventRecordNode, + * ::cudaGraphEventRecordNodeGetEvent, + * ::cudaGraphEventWaitNodeSetEvent, + * ::cudaEventRecordWithFlags, + * ::cudaStreamWaitEvent, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams, + * ::cudaGraphExecChildGraphNodeSetParams, + * ::cudaGraphExecEventWaitNodeSetEvent, + * ::cudaGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphExecEventRecordNodeSetEvent(cudaGraphExec_t hGraphExec, cudaGraphNode_t hNode, cudaEvent_t event); +#endif + +/** + * \brief Sets the event for an event wait node in the given graphExec + * + * Sets the event of an event wait node in an executable graph \p hGraphExec. + * The node is identified by the corresponding node \p hNode in the + * non-executable graph, from which the executable graph was instantiated. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p hNode is also not modified by this call. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - Event wait node from the graph from which graphExec was instantiated + * \param event - Updated event to use + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddEventWaitNode, + * ::cudaGraphEventWaitNodeGetEvent, + * ::cudaGraphEventRecordNodeSetEvent, + * ::cudaEventRecordWithFlags, + * ::cudaStreamWaitEvent, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams, + * ::cudaGraphExecChildGraphNodeSetParams, + * ::cudaGraphExecEventRecordNodeSetEvent, + * ::cudaGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphExecEventWaitNodeSetEvent(cudaGraphExec_t hGraphExec, cudaGraphNode_t hNode, cudaEvent_t event); +#endif + +/** + * \brief Sets the parameters for an external semaphore signal node in the given graphExec + * + * Sets the parameters of an external semaphore signal node in an executable graph \p hGraphExec. + * The node is identified by the corresponding node \p hNode in the + * non-executable graph, from which the executable graph was instantiated. + * + * \p hNode must not have been removed from the original graph. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p hNode is also not modified by this call. + * + * Changing \p nodeParams->numExtSems is not supported. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - semaphore signal node from the graph from which graphExec was instantiated + * \param nodeParams - Updated Parameters to set + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddExternalSemaphoresSignalNode, + * ::cudaImportExternalSemaphore, + * ::cudaSignalExternalSemaphoresAsync, + * ::cudaWaitExternalSemaphoresAsync, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams, + * ::cudaGraphExecChildGraphNodeSetParams, + * ::cudaGraphExecEventRecordNodeSetEvent, + * ::cudaGraphExecEventWaitNodeSetEvent, + * ::cudaGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + */ +#if __CUDART_API_VERSION >= 11020 +extern __host__ cudaError_t CUDARTAPI cudaGraphExecExternalSemaphoresSignalNodeSetParams(cudaGraphExec_t hGraphExec, cudaGraphNode_t hNode, const struct cudaExternalSemaphoreSignalNodeParams *nodeParams); +#endif + +/** + * \brief Sets the parameters for an external semaphore wait node in the given graphExec + * + * Sets the parameters of an external semaphore wait node in an executable graph \p hGraphExec. + * The node is identified by the corresponding node \p hNode in the + * non-executable graph, from which the executable graph was instantiated. + * + * \p hNode must not have been removed from the original graph. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p hNode is also not modified by this call. + * + * Changing \p nodeParams->numExtSems is not supported. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - semaphore wait node from the graph from which graphExec was instantiated + * \param nodeParams - Updated Parameters to set + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddExternalSemaphoresWaitNode, + * ::cudaImportExternalSemaphore, + * ::cudaSignalExternalSemaphoresAsync, + * ::cudaWaitExternalSemaphoresAsync, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams, + * ::cudaGraphExecChildGraphNodeSetParams, + * ::cudaGraphExecEventRecordNodeSetEvent, + * ::cudaGraphExecEventWaitNodeSetEvent, + * ::cudaGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + */ +#if __CUDART_API_VERSION >= 11020 +extern __host__ cudaError_t CUDARTAPI cudaGraphExecExternalSemaphoresWaitNodeSetParams(cudaGraphExec_t hGraphExec, cudaGraphNode_t hNode, const struct cudaExternalSemaphoreWaitNodeParams *nodeParams); +#endif + +/** + * \brief Enables or disables the specified node in the given graphExec + * + * Sets \p hNode to be either enabled or disabled. Disabled nodes are functionally equivalent + * to empty nodes until they are reenabled. Existing node parameters are not affected by + * disabling/enabling the node. + * + * The node is identified by the corresponding node \p hNode in the non-executable + * graph, from which the executable graph was instantiated. + * + * \p hNode must not have been removed from the original graph. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p hNode is also not modified by this call. + * + * \note Currently only kernel, memset and memcpy nodes are supported. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - Node from the graph from which graphExec was instantiated + * \param isEnabled - Node is enabled if != 0, otherwise the node is disabled + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphNodeGetEnabled, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + * ::cudaGraphLaunch + */ +#if __CUDART_API_VERSION >= 11060 +extern __host__ cudaError_t CUDARTAPI cudaGraphNodeSetEnabled(cudaGraphExec_t hGraphExec, cudaGraphNode_t hNode, unsigned int isEnabled); +#endif + +/** + * \brief Query whether a node in the given graphExec is enabled + * + * Sets isEnabled to 1 if \p hNode is enabled, or 0 if \p hNode is disabled. + * + * The node is identified by the corresponding node \p hNode in the non-executable + * graph, from which the executable graph was instantiated. + * + * \p hNode must not have been removed from the original graph. + * + * \note Currently only kernel, memset and memcpy nodes are supported. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - Node from the graph from which graphExec was instantiated + * \param isEnabled - Location to return the enabled status of the node + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphNodeSetEnabled, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + * ::cudaGraphLaunch + */ +#if __CUDART_API_VERSION >= 11060 +extern __host__ cudaError_t CUDARTAPI cudaGraphNodeGetEnabled(cudaGraphExec_t hGraphExec, cudaGraphNode_t hNode, unsigned int *isEnabled); +#endif + +/** + * \brief Check whether an executable graph can be updated with a graph and perform the update if possible + * + * Updates the node parameters in the instantiated graph specified by \p hGraphExec with the + * node parameters in a topologically identical graph specified by \p hGraph. + * + * Limitations: + * + * - Kernel nodes: + * - The owning context of the function cannot change. + * - A node whose function originally did not use CUDA dynamic parallelism cannot be updated + * to a function which uses CDP. + * - A cooperative node cannot be updated to a non-cooperative node, and vice-versa. + * - If the graph was instantiated with cudaGraphInstantiateFlagUseNodePriority, the + * priority attribute cannot change. Equality is checked on the originally requested + * priority values, before they are clamped to the device's supported range. + * - If \p hGraphExec was not instantiated for device launch, a node whose function originally + * did not use device-side cudaGraphLaunch() cannot be updated to a function which uses + * device-side cudaGraphLaunch() unless the node resides on the same device as nodes which + * contained such calls at instantiate-time. If no such calls were present at instantiation, + * these updates cannot be performed at all. + * - Memset and memcpy nodes: + * - The CUDA device(s) to which the operand(s) was allocated/mapped cannot change. + * - The source/destination memory must be allocated from the same contexts as the original + * source/destination memory. + * - Only 1D memsets can be changed. + * - Additional memcpy node restrictions: + * - Changing either the source or destination memory type(i.e. CU_MEMORYTYPE_DEVICE, + * CU_MEMORYTYPE_ARRAY, etc.) is not supported. + * + * Note: The API may add further restrictions in future releases. The return code should always be checked. + * + * cudaGraphExecUpdate sets the result member of \p resultInfo to cudaGraphExecUpdateErrorTopologyChanged + * under the following conditions: + * - The count of nodes directly in \p hGraphExec and \p hGraph differ, in which case resultInfo->errorNode + * is set to NULL. + * - \p hGraph has more exit nodes than \p hGraph, in which case resultInfo->errorNode is set to one of + * the exit nodes in hGraph. + * - A node in \p hGraph has a different number of dependencies than the node from \p hGraphExec it is paired with, + * in which case resultInfo->errorNode is set to the node from \p hGraph. + * - A node in \p hGraph has a dependency that does not match with the corresponding dependency of the paired node + * from \p hGraphExec. resultInfo->errorNode will be set to the node from \p hGraph. resultInfo->errorFromNode + * will be set to the mismatched dependency. The dependencies are paired based on edge order and a dependency + * does not match when the nodes are already paired based on other edges examined in the graph. + * + * cudaGraphExecUpdate sets \p the result member of \p resultInfo to: + * - cudaGraphExecUpdateError if passed an invalid value. + * - cudaGraphExecUpdateErrorTopologyChanged if the graph topology changed + * - cudaGraphExecUpdateErrorNodeTypeChanged if the type of a node changed, in which case + * \p hErrorNode_out is set to the node from \p hGraph. + * - cudaGraphExecUpdateErrorFunctionChanged if the function of a kernel node changed (CUDA driver < 11.2) + * - cudaGraphExecUpdateErrorUnsupportedFunctionChange if the func field of a kernel changed in an + * unsupported way(see note above), in which case \p hErrorNode_out is set to the node from \p hGraph + * - cudaGraphExecUpdateErrorParametersChanged if any parameters to a node changed in a way + * that is not supported, in which case \p hErrorNode_out is set to the node from \p hGraph + * - cudaGraphExecUpdateErrorAttributesChanged if any attributes of a node changed in a way + * that is not supported, in which case \p hErrorNode_out is set to the node from \p hGraph + * - cudaGraphExecUpdateErrorNotSupported if something about a node is unsupported, like + * the node's type or configuration, in which case \p hErrorNode_out is set to the node from \p hGraph + * + * If the update fails for a reason not listed above, the result member of \p resultInfo will be set + * to cudaGraphExecUpdateError. If the update succeeds, the result member will be set to cudaGraphExecUpdateSuccess. + * + * cudaGraphExecUpdate returns cudaSuccess when the updated was performed successfully. It returns + * cudaErrorGraphExecUpdateFailure if the graph update was not performed because it included + * changes which violated constraints specific to instantiated graph update. + * + * \param hGraphExec The instantiated graph to be updated + * \param hGraph The graph containing the updated parameters + \param resultInfo the error info structure + * + * \return + * ::cudaSuccess, + * ::cudaErrorGraphExecUpdateFailure, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphInstantiate + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphExecUpdate(cudaGraphExec_t hGraphExec, cudaGraph_t hGraph, cudaGraphExecUpdateResultInfo *resultInfo); + +/** + * \brief Uploads an executable graph in a stream + * + * Uploads \p hGraphExec to the device in \p hStream without executing it. Uploads of + * the same \p hGraphExec will be serialized. Each upload is ordered behind both any + * previous work in \p hStream and any previous launches of \p hGraphExec. + * Uses memory cached by \p stream to back the allocations owned by \p graphExec. + * + * \param hGraphExec - Executable graph to upload + * \param hStream - Stream in which to upload the graph + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \notefnerr + * \note_init_rt + * + * \sa + * ::cudaGraphInstantiate, + * ::cudaGraphLaunch, + * ::cudaGraphExecDestroy + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphUpload(cudaGraphExec_t graphExec, cudaStream_t stream); +#endif + +/** + * \brief Launches an executable graph in a stream + * + * Executes \p graphExec in \p stream. Only one instance of \p graphExec may be executing + * at a time. Each launch is ordered behind both any previous work in \p stream + * and any previous launches of \p graphExec. To execute a graph concurrently, it must be + * instantiated multiple times into multiple executable graphs. + * + * If any allocations created by \p graphExec remain unfreed (from a previous launch) and + * \p graphExec was not instantiated with ::cudaGraphInstantiateFlagAutoFreeOnLaunch, + * the launch will fail with ::cudaErrorInvalidValue. + * + * \param graphExec - Executable graph to launch + * \param stream - Stream in which to launch the graph + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphInstantiate, + * ::cudaGraphUpload, + * ::cudaGraphExecDestroy + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphLaunch(cudaGraphExec_t graphExec, cudaStream_t stream); + +/** + * \brief Destroys an executable graph + * + * Destroys the executable graph specified by \p graphExec. + * + * \param graphExec - Executable graph to destroy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * \note_destroy_ub + * + * \sa + * ::cudaGraphInstantiate, + * ::cudaGraphUpload, + * ::cudaGraphLaunch + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphExecDestroy(cudaGraphExec_t graphExec); + +/** + * \brief Destroys a graph + * + * Destroys the graph specified by \p graph, as well as all of its nodes. + * + * \param graph - Graph to destroy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * \note_destroy_ub + * + * \sa + * ::cudaGraphCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphDestroy(cudaGraph_t graph); + +/** + * \brief Write a DOT file describing graph structure + * + * Using the provided \p graph, write to \p path a DOT formatted description of the graph. + * By default this includes the graph topology, node types, node id, kernel names and memcpy direction. + * \p flags can be specified to write more detailed information about each node type such as + * parameter values, kernel attributes, node and function handles. + * + * \param graph - The graph to create a DOT file from + * \param path - The path to write the DOT file to + * \param flags - Flags from cudaGraphDebugDotFlags for specifying which additional node information to write + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorOperatingSystem + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphDebugDotPrint(cudaGraph_t graph, const char *path, unsigned int flags); + +/** + * \brief Create a user object + * + * Create a user object with the specified destructor callback and initial reference count. The + * initial references are owned by the caller. + * + * Destructor callbacks cannot make CUDA API calls and should avoid blocking behavior, as they + * are executed by a shared internal thread. Another thread may be signaled to perform such + * actions, if it does not block forward progress of tasks scheduled through CUDA. + * + * See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. + * + * \param object_out - Location to return the user object handle + * \param ptr - The pointer to pass to the destroy function + * \param destroy - Callback to free the user object when it is no longer in use + * \param initialRefcount - The initial refcount to create the object with, typically 1. The + * initial references are owned by the calling thread. + * \param flags - Currently it is required to pass ::cudaUserObjectNoDestructorSync, + * which is the only defined flag. This indicates that the destroy + * callback cannot be waited on by any CUDA API. Users requiring + * synchronization of the callback should signal its completion + * manually. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * + * \sa + * ::cudaUserObjectRetain, + * ::cudaUserObjectRelease, + * ::cudaGraphRetainUserObject, + * ::cudaGraphReleaseUserObject, + * ::cudaGraphCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaUserObjectCreate(cudaUserObject_t *object_out, void *ptr, cudaHostFn_t destroy, unsigned int initialRefcount, unsigned int flags); + +/** + * \brief Retain a reference to a user object + * + * Retains new references to a user object. The new references are owned by the caller. + * + * See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. + * + * \param object - The object to retain + * \param count - The number of references to retain, typically 1. Must be nonzero + * and not larger than INT_MAX. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * + * \sa + * ::cudaUserObjectCreate, + * ::cudaUserObjectRelease, + * ::cudaGraphRetainUserObject, + * ::cudaGraphReleaseUserObject, + * ::cudaGraphCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaUserObjectRetain(cudaUserObject_t object, unsigned int count __dv(1)); + +/** + * \brief Release a reference to a user object + * + * Releases user object references owned by the caller. The object's destructor is invoked if + * the reference count reaches zero. + * + * It is undefined behavior to release references not owned by the caller, or to use a user + * object handle after all references are released. + * + * See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. + * + * \param object - The object to release + * \param count - The number of references to release, typically 1. Must be nonzero + * and not larger than INT_MAX. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * + * \sa + * ::cudaUserObjectCreate, + * ::cudaUserObjectRetain, + * ::cudaGraphRetainUserObject, + * ::cudaGraphReleaseUserObject, + * ::cudaGraphCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaUserObjectRelease(cudaUserObject_t object, unsigned int count __dv(1)); + +/** + * \brief Retain a reference to a user object from a graph + * + * Creates or moves user object references that will be owned by a CUDA graph. + * + * See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. + * + * \param graph - The graph to associate the reference with + * \param object - The user object to retain a reference for + * \param count - The number of references to add to the graph, typically 1. Must be + * nonzero and not larger than INT_MAX. + * \param flags - The optional flag ::cudaGraphUserObjectMove transfers references + * from the calling thread, rather than create new references. Pass 0 + * to create new references. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * + * \sa + * ::cudaUserObjectCreate + * ::cudaUserObjectRetain, + * ::cudaUserObjectRelease, + * ::cudaGraphReleaseUserObject, + * ::cudaGraphCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphRetainUserObject(cudaGraph_t graph, cudaUserObject_t object, unsigned int count __dv(1), unsigned int flags __dv(0)); + +/** + * \brief Release a user object reference from a graph + * + * Releases user object references owned by a graph. + * + * See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. + * + * \param graph - The graph that will release the reference + * \param object - The user object to release a reference for + * \param count - The number of references to release, typically 1. Must be nonzero + * and not larger than INT_MAX. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * + * \sa + * ::cudaUserObjectCreate + * ::cudaUserObjectRetain, + * ::cudaUserObjectRelease, + * ::cudaGraphRetainUserObject, + * ::cudaGraphCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphReleaseUserObject(cudaGraph_t graph, cudaUserObject_t object, unsigned int count __dv(1)); + +/** @} */ /* END CUDART_GRAPH */ + +/** + * \defgroup CUDART_DRIVER_ENTRY_POINT Driver Entry Point Access + * + * ___MANBRIEF___ driver entry point access functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the driver entry point access functions of CUDA + * runtime application programming interface. + * + * @{ + */ + +/** + * \brief Returns the requested driver API function pointer + * + * Returns in \p **funcPtr the address of the CUDA driver function for the requested flags. + * + * For a requested driver symbol, if the CUDA version in which the driver symbol was + * introduced is less than or equal to the CUDA runtime version, the API will return + * the function pointer to the corresponding versioned driver function. + * + * The pointer returned by the API should be cast to a function pointer matching the + * requested driver function's definition in the API header file. The function pointer + * typedef can be picked up from the corresponding typedefs header file. For example, + * cudaTypedefs.h consists of function pointer typedefs for driver APIs defined in cuda.h. + * + * The API will return ::cudaSuccess and set the returned \p funcPtr to NULL if the + * requested driver function is not supported on the platform, no ABI + * compatible driver function exists for the CUDA runtime version or if the + * driver symbol is invalid. + * + * It will also set the optional \p driverStatus to one of the values in + * ::cudaDriverEntryPointQueryResult with the following meanings: + * - ::cudaDriverEntryPointSuccess - The requested symbol was succesfully found based + * on input arguments and \p pfn is valid + * - ::cudaDriverEntryPointSymbolNotFound - The requested symbol was not found + * - ::cudaDriverEntryPointVersionNotSufficent - The requested symbol was found but is + * not supported by the current runtime version (CUDART_VERSION) + * + * The requested flags can be: + * - ::cudaEnableDefault: This is the default mode. This is equivalent to + * ::cudaEnablePerThreadDefaultStream if the code is compiled with + * --default-stream per-thread compilation flag or the macro CUDA_API_PER_THREAD_DEFAULT_STREAM + * is defined; ::cudaEnableLegacyStream otherwise. + * - ::cudaEnableLegacyStream: This will enable the search for all driver symbols + * that match the requested driver symbol name except the corresponding per-thread versions. + * - ::cudaEnablePerThreadDefaultStream: This will enable the search for all + * driver symbols that match the requested driver symbol name including the per-thread + * versions. If a per-thread version is not found, the API will return the legacy version + * of the driver function. + * + * \param symbol - The base name of the driver API function to look for. As an example, + * for the driver API ::cuMemAlloc_v2, \p symbol would be cuMemAlloc. + * Note that the API will use the CUDA runtime version to return the + * address to the most recent ABI compatible driver symbol, ::cuMemAlloc + * or ::cuMemAlloc_v2. + * \param funcPtr - Location to return the function pointer to the requested driver function + * \param flags - Flags to specify search options. + * \param driverStatus - Optional location to store the status of finding the symbol from + * the driver. See ::cudaDriverEntryPointQueryResult for + * possible values. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorNotSupported + * \note_version_mixing + * \note_init_rt + * \note_callback + * + * \sa + * ::cuGetProcAddress + */ +#if defined(__cplusplus) +extern __host__ cudaError_t CUDARTAPI cudaGetDriverEntryPoint(const char *symbol, void **funcPtr, unsigned long long flags, enum cudaDriverEntryPointQueryResult *driverStatus = NULL); +#else +extern __host__ cudaError_t CUDARTAPI cudaGetDriverEntryPoint(const char *symbol, void **funcPtr, unsigned long long flags, enum cudaDriverEntryPointQueryResult *driverStatus); +#endif + +/** @} */ /* END CUDART_DRIVER_ENTRY_POINT */ + +/** \cond impl_private */ +extern __host__ cudaError_t CUDARTAPI cudaGetExportTable(const void **ppExportTable, const cudaUUID_t *pExportTableId); +/** \endcond impl_private */ + +/** + * \defgroup CUDART_HIGHLEVEL C++ API Routines + * + * ___MANBRIEF___ C++ high level API functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the C++ high level API functions of the CUDA runtime + * application programming interface. To use these functions, your + * application needs to be compiled with the \p nvcc compiler. + * + * \brief C++-style interface built on top of CUDA runtime API + */ + +/** + * \defgroup CUDART_DRIVER Interactions with the CUDA Driver API + * + * ___MANBRIEF___ interactions between CUDA Driver API and CUDA Runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the interactions between the CUDA Driver API and the CUDA Runtime API + * + * @{ + * + * \section CUDART_CUDA_primary Primary Contexts + * + * There exists a one to one relationship between CUDA devices in the CUDA Runtime + * API and ::CUcontext s in the CUDA Driver API within a process. The specific + * context which the CUDA Runtime API uses for a device is called the device's + * primary context. From the perspective of the CUDA Runtime API, a device and + * its primary context are synonymous. + * + * \section CUDART_CUDA_init Initialization and Tear-Down + * + * CUDA Runtime API calls operate on the CUDA Driver API ::CUcontext which is current to + * to the calling host thread. + * + * The function ::cudaInitDevice() ensures that the primary context is initialized + * for the requested device but does not make it current to the calling thread. + * + * The function ::cudaSetDevice() initializes the primary context for the + * specified device and makes it current to the calling thread by calling ::cuCtxSetCurrent(). + * + * The CUDA Runtime API will automatically initialize the primary context for + * a device at the first CUDA Runtime API call which requires an active context. + * If no ::CUcontext is current to the calling thread when a CUDA Runtime API call + * which requires an active context is made, then the primary context for a device + * will be selected, made current to the calling thread, and initialized. + * + * The context which the CUDA Runtime API initializes will be initialized using + * the parameters specified by the CUDA Runtime API functions + * ::cudaSetDeviceFlags(), + * ::cudaD3D9SetDirect3DDevice(), + * ::cudaD3D10SetDirect3DDevice(), + * ::cudaD3D11SetDirect3DDevice(), + * ::cudaGLSetGLDevice(), and + * ::cudaVDPAUSetVDPAUDevice(). + * Note that these functions will fail with ::cudaErrorSetOnActiveProcess if they are + * called when the primary context for the specified device has already been initialized. + * (or if the current device has already been initialized, in the case of + * ::cudaSetDeviceFlags()). + * + * Primary contexts will remain active until they are explicitly deinitialized + * using ::cudaDeviceReset(). The function ::cudaDeviceReset() will deinitialize the + * primary context for the calling thread's current device immediately. The context + * will remain current to all of the threads that it was current to. The next CUDA + * Runtime API call on any thread which requires an active context will trigger the + * reinitialization of that device's primary context. + * + * Note that primary contexts are shared resources. It is recommended that + * the primary context not be reset except just before exit or to recover from an + * unspecified launch failure. + * + * \section CUDART_CUDA_context Context Interoperability + * + * Note that the use of multiple ::CUcontext s per device within a single process + * will substantially degrade performance and is strongly discouraged. Instead, + * it is highly recommended that the implicit one-to-one device-to-context mapping + * for the process provided by the CUDA Runtime API be used. + * + * If a non-primary ::CUcontext created by the CUDA Driver API is current to a + * thread then the CUDA Runtime API calls to that thread will operate on that + * ::CUcontext, with some exceptions listed below. Interoperability between data + * types is discussed in the following sections. + * + * The function ::cudaPointerGetAttributes() will return the error + * ::cudaErrorIncompatibleDriverContext if the pointer being queried was allocated by a + * non-primary context. The function ::cudaDeviceEnablePeerAccess() and the rest of + * the peer access API may not be called when a non-primary ::CUcontext is current. + * To use the pointer query and peer access APIs with a context created using the + * CUDA Driver API, it is necessary that the CUDA Driver API be used to access + * these features. + * + * All CUDA Runtime API state (e.g, global variables' addresses and values) travels + * with its underlying ::CUcontext. In particular, if a ::CUcontext is moved from one + * thread to another then all CUDA Runtime API state will move to that thread as well. + * + * Please note that attaching to legacy contexts (those with a version of 3010 as returned + * by ::cuCtxGetApiVersion()) is not possible. The CUDA Runtime will return + * ::cudaErrorIncompatibleDriverContext in such cases. + * + * \section CUDART_CUDA_stream Interactions between CUstream and cudaStream_t + * + * The types ::CUstream and ::cudaStream_t are identical and may be used interchangeably. + * + * \section CUDART_CUDA_event Interactions between CUevent and cudaEvent_t + * + * The types ::CUevent and ::cudaEvent_t are identical and may be used interchangeably. + * + * \section CUDART_CUDA_array Interactions between CUarray and cudaArray_t + * + * The types ::CUarray and struct ::cudaArray * represent the same data type and may be used + * interchangeably by casting the two types between each other. + * + * In order to use a ::CUarray in a CUDA Runtime API function which takes a struct ::cudaArray *, + * it is necessary to explicitly cast the ::CUarray to a struct ::cudaArray *. + * + * In order to use a struct ::cudaArray * in a CUDA Driver API function which takes a ::CUarray, + * it is necessary to explicitly cast the struct ::cudaArray * to a ::CUarray . + * + * \section CUDART_CUDA_graphicsResource Interactions between CUgraphicsResource and cudaGraphicsResource_t + * + * The types ::CUgraphicsResource and ::cudaGraphicsResource_t represent the same data type and may be used + * interchangeably by casting the two types between each other. + * + * In order to use a ::CUgraphicsResource in a CUDA Runtime API function which takes a + * ::cudaGraphicsResource_t, it is necessary to explicitly cast the ::CUgraphicsResource + * to a ::cudaGraphicsResource_t. + * + * In order to use a ::cudaGraphicsResource_t in a CUDA Driver API function which takes a + * ::CUgraphicsResource, it is necessary to explicitly cast the ::cudaGraphicsResource_t + * to a ::CUgraphicsResource. + * + * \section CUDART_CUDA_texture_objects Interactions between CUtexObject and cudaTextureObject_t + * + * The types ::CUtexObject and ::cudaTextureObject_t represent the same data type and may be used + * interchangeably by casting the two types between each other. + * + * In order to use a ::CUtexObject in a CUDA Runtime API function which takes a ::cudaTextureObject_t, + * it is necessary to explicitly cast the ::CUtexObject to a ::cudaTextureObject_t. + * + * In order to use a ::cudaTextureObject_t in a CUDA Driver API function which takes a ::CUtexObject, + * it is necessary to explicitly cast the ::cudaTextureObject_t to a ::CUtexObject. + * + * \section CUDART_CUDA_surface_objects Interactions between CUsurfObject and cudaSurfaceObject_t + * + * The types ::CUsurfObject and ::cudaSurfaceObject_t represent the same data type and may be used + * interchangeably by casting the two types between each other. + * + * In order to use a ::CUsurfObject in a CUDA Runtime API function which takes a ::cudaSurfaceObject_t, + * it is necessary to explicitly cast the ::CUsurfObject to a ::cudaSurfaceObject_t. + * + * In order to use a ::cudaSurfaceObject_t in a CUDA Driver API function which takes a ::CUsurfObject, + * it is necessary to explicitly cast the ::cudaSurfaceObject_t to a ::CUsurfObject. + * + * \section CUDART_CUDA_module Interactions between CUfunction and cudaFunction_t + * + * The types ::CUfunction and ::cudaFunction_t represent the same data type and may be used + * interchangeably by casting the two types between each other. + * + * In order to use a ::cudaFunction_t in a CUDA Driver API function which takes a ::CUfunction, + * it is necessary to explicitly cast the ::cudaFunction_t to a ::CUfunction. + * + */ + + /** + * \brief Get pointer to device entry function that matches entry function \p symbolPtr + * + * Returns in \p functionPtr the device entry function corresponding to the symbol \p symbolPtr. + * + * \param functionPtr - Returns the device entry function + * \param symbolPtr - Pointer to device entry function to search for + * + * \return + * ::cudaSuccess + * + */ +extern __host__ cudaError_t cudaGetFuncBySymbol(cudaFunction_t* functionPtr, const void* symbolPtr); + +/** + * \brief Get pointer to device kernel that matches entry function \p entryFuncAddr + * + * Returns in \p kernelPtr the device kernel corresponding to the entry function \p entryFuncAddr. + * + * \param kernelPtr - Returns the device kernel + * \param entryFuncAddr - Address of device entry function to search kernel for + * + * \return + * ::cudaSuccess + * + * \sa + * \ref ::cudaGetKernel(cudaKernel_t *kernelPtr, const T *entryFuncAddr) "cudaGetKernel (C++ API)" + */ +extern __host__ cudaError_t CUDARTAPI cudaGetKernel(cudaKernel_t *kernelPtr, const void *entryFuncAddr); + +/** @} */ /* END CUDART_DRIVER */ + +#if defined(__CUDA_API_VERSION_INTERNAL) + #undef cudaMemcpy + #undef cudaMemcpyToSymbol + #undef cudaMemcpyFromSymbol + #undef cudaMemcpy2D + #undef cudaMemcpyToArray + #undef cudaMemcpy2DToArray + #undef cudaMemcpyFromArray + #undef cudaMemcpy2DFromArray + #undef cudaMemcpyArrayToArray + #undef cudaMemcpy2DArrayToArray + #undef cudaMemcpy3D + #undef cudaMemcpy3DPeer + #undef cudaMemset + #undef cudaMemset2D + #undef cudaMemset3D + #undef cudaMemcpyAsync + #undef cudaMemcpyToSymbolAsync + #undef cudaMemcpyFromSymbolAsync + #undef cudaMemcpy2DAsync + #undef cudaMemcpyToArrayAsync + #undef cudaMemcpy2DToArrayAsync + #undef cudaMemcpyFromArrayAsync + #undef cudaMemcpy2DFromArrayAsync + #undef cudaMemcpy3DAsync + #undef cudaMemcpy3DPeerAsync + #undef cudaMemsetAsync + #undef cudaMemset2DAsync + #undef cudaMemset3DAsync + #undef cudaStreamQuery + #undef cudaStreamGetFlags + #undef cudaStreamGetId + #undef cudaStreamGetPriority + #undef cudaEventRecord + #undef cudaEventRecordWithFlags + #undef cudaStreamWaitEvent + #undef cudaStreamAddCallback + #undef cudaStreamAttachMemAsync + #undef cudaStreamSynchronize + #undef cudaLaunchKernel + #undef cudaLaunchKernelExC + #undef cudaLaunchHostFunc + #undef cudaMemPrefetchAsync + #undef cudaLaunchCooperativeKernel + #undef cudaSignalExternalSemaphoresAsync + #undef cudaWaitExternalSemaphoresAsync + #undef cudaGraphInstantiateWithParams + #undef cudaGraphUpload + #undef cudaGraphLaunch + #undef cudaStreamBeginCapture + #undef cudaStreamEndCapture + #undef cudaStreamIsCapturing + #undef cudaStreamGetCaptureInfo + #undef cudaStreamGetCaptureInfo_v2 + #undef cudaStreamCopyAttributes + #undef cudaStreamGetAttribute + #undef cudaStreamSetAttribute + #undef cudaMallocAsync + #undef cudaFreeAsync + #undef cudaMallocFromPoolAsync + #undef cudaGetDriverEntryPoint + + #undef cudaGetDeviceProperties + + extern __host__ cudaError_t CUDARTAPI cudaMemcpy(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind); + extern __host__ cudaError_t CUDARTAPI cudaMemcpyToSymbol(const void *symbol, const void *src, size_t count, size_t offset __dv(0), enum cudaMemcpyKind kind __dv(cudaMemcpyHostToDevice)); + extern __host__ cudaError_t CUDARTAPI cudaMemcpyFromSymbol(void *dst, const void *symbol, size_t count, size_t offset __dv(0), enum cudaMemcpyKind kind __dv(cudaMemcpyDeviceToHost)); + extern __host__ cudaError_t CUDARTAPI cudaMemcpy2D(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind); + extern __host__ cudaError_t CUDARTAPI cudaMemcpyToArray(cudaArray_t dst, size_t wOffset, size_t hOffset, const void *src, size_t count, enum cudaMemcpyKind kind); + extern __host__ cudaError_t CUDARTAPI cudaMemcpy2DToArray(cudaArray_t dst, size_t wOffset, size_t hOffset, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind); + extern __host__ cudaError_t CUDARTAPI cudaMemcpyFromArray(void *dst, cudaArray_const_t src, size_t wOffset, size_t hOffset, size_t count, enum cudaMemcpyKind kind); + extern __host__ cudaError_t CUDARTAPI cudaMemcpy2DFromArray(void *dst, size_t dpitch, cudaArray_const_t src, size_t wOffset, size_t hOffset, size_t width, size_t height, enum cudaMemcpyKind kind); + extern __host__ cudaError_t CUDARTAPI cudaMemcpyArrayToArray(cudaArray_t dst, size_t wOffsetDst, size_t hOffsetDst, cudaArray_const_t src, size_t wOffsetSrc, size_t hOffsetSrc, size_t count, enum cudaMemcpyKind kind __dv(cudaMemcpyDeviceToDevice)); + extern __host__ cudaError_t CUDARTAPI cudaMemcpy2DArrayToArray(cudaArray_t dst, size_t wOffsetDst, size_t hOffsetDst, cudaArray_const_t src, size_t wOffsetSrc, size_t hOffsetSrc, size_t width, size_t height, enum cudaMemcpyKind kind __dv(cudaMemcpyDeviceToDevice)); + extern __host__ cudaError_t CUDARTAPI cudaMemcpy3D(const struct cudaMemcpy3DParms *p); + extern __host__ cudaError_t CUDARTAPI cudaMemcpy3DPeer(const struct cudaMemcpy3DPeerParms *p); + extern __host__ cudaError_t CUDARTAPI cudaMemset(void *devPtr, int value, size_t count); + extern __host__ cudaError_t CUDARTAPI cudaMemset2D(void *devPtr, size_t pitch, int value, size_t width, size_t height); + extern __host__ cudaError_t CUDARTAPI cudaMemset3D(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpyAsync(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaMemcpyToSymbolAsync(const void *symbol, const void *src, size_t count, size_t offset, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaMemcpyFromSymbolAsync(void *dst, const void *symbol, size_t count, size_t offset, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy2DAsync(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaMemcpyToArrayAsync(cudaArray_t dst, size_t wOffset, size_t hOffset, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaMemcpy2DToArrayAsync(cudaArray_t dst, size_t wOffset, size_t hOffset, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaMemcpyFromArrayAsync(void *dst, cudaArray_const_t src, size_t wOffset, size_t hOffset, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaMemcpy2DFromArrayAsync(void *dst, size_t dpitch, cudaArray_const_t src, size_t wOffset, size_t hOffset, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy3DAsync(const struct cudaMemcpy3DParms *p, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaMemcpy3DPeerAsync(const struct cudaMemcpy3DPeerParms *p, cudaStream_t stream __dv(0)); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemsetAsync(void *devPtr, int value, size_t count, cudaStream_t stream __dv(0)); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset2DAsync(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream __dv(0)); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset3DAsync(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaStreamQuery(cudaStream_t stream); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamGetFlags(cudaStream_t hStream, unsigned int *flags); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamGetId(cudaStream_t hStream, unsigned long long *streamId); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamGetPriority(cudaStream_t hStream, int *priority); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecord(cudaEvent_t event, cudaStream_t stream __dv(0)); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecordWithFlags(cudaEvent_t event, cudaStream_t stream __dv(0), unsigned int flags __dv(0)); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamWaitEvent(cudaStream_t stream, cudaEvent_t event, unsigned int flags); + extern __host__ cudaError_t CUDARTAPI cudaStreamAddCallback(cudaStream_t stream, cudaStreamCallback_t callback, void *userData, unsigned int flags); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamAttachMemAsync(cudaStream_t stream, void *devPtr, size_t length, unsigned int flags); + extern __host__ cudaError_t CUDARTAPI cudaStreamSynchronize(cudaStream_t stream); + extern __host__ cudaError_t CUDARTAPI cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream); + extern __host__ cudaError_t CUDARTAPI cudaLaunchKernelExC(const cudaLaunchConfig_t *config, const void *func, void **args); + extern __host__ cudaError_t CUDARTAPI cudaLaunchCooperativeKernel(const void *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream); + extern __host__ cudaError_t CUDARTAPI cudaLaunchHostFunc(cudaStream_t stream, cudaHostFn_t fn, void *userData); + extern __host__ cudaError_t CUDARTAPI cudaMemPrefetchAsync(const void *devPtr, size_t count, int dstDevice, cudaStream_t stream); + extern __host__ cudaError_t CUDARTAPI cudaSignalExternalSemaphoresAsync(const cudaExternalSemaphore_t *extSemArray, const struct cudaExternalSemaphoreSignalParams_v1 *paramsArray, unsigned int numExtSems, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaSignalExternalSemaphoresAsync_ptsz(const cudaExternalSemaphore_t *extSemArray, const struct cudaExternalSemaphoreSignalParams_v1 *paramsArray, unsigned int numExtSems, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaSignalExternalSemaphoresAsync_v2(const cudaExternalSemaphore_t *extSemArray, const struct cudaExternalSemaphoreSignalParams *paramsArray, unsigned int numExtSems, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaWaitExternalSemaphoresAsync(const cudaExternalSemaphore_t *extSemArray, const struct cudaExternalSemaphoreWaitParams_v1 *paramsArray, unsigned int numExtSems, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaWaitExternalSemaphoresAsync_ptsz(const cudaExternalSemaphore_t *extSemArray, const struct cudaExternalSemaphoreWaitParams_v1 *paramsArray, unsigned int numExtSems, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaWaitExternalSemaphoresAsync_v2(const cudaExternalSemaphore_t *extSemArray, const struct cudaExternalSemaphoreWaitParams *paramsArray, unsigned int numExtSems, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaGraphInstantiateWithParams(cudaGraphExec_t *pGraphExec, cudaGraph_t graph, cudaGraphInstantiateParams *instantiateParams); + extern __host__ cudaError_t CUDARTAPI cudaGraphUpload(cudaGraphExec_t graphExec, cudaStream_t stream); + extern __host__ cudaError_t CUDARTAPI cudaGraphLaunch(cudaGraphExec_t graphExec, cudaStream_t stream); + extern __host__ cudaError_t CUDARTAPI cudaStreamBeginCapture(cudaStream_t stream, enum cudaStreamCaptureMode mode); + extern __host__ cudaError_t CUDARTAPI cudaStreamEndCapture(cudaStream_t stream, cudaGraph_t *pGraph); + extern __host__ cudaError_t CUDARTAPI cudaStreamIsCapturing(cudaStream_t stream, enum cudaStreamCaptureStatus *pCaptureStatus); + extern __host__ cudaError_t CUDARTAPI cudaStreamGetCaptureInfo(cudaStream_t stream, enum cudaStreamCaptureStatus *captureStatus_out, unsigned long long *id_out); + extern __host__ cudaError_t CUDARTAPI cudaStreamGetCaptureInfo_ptsz(cudaStream_t stream, enum cudaStreamCaptureStatus *captureStatus_out, unsigned long long *id_out); + extern __host__ cudaError_t CUDARTAPI cudaStreamGetCaptureInfo_v2(cudaStream_t stream, enum cudaStreamCaptureStatus *captureStatus_out, unsigned long long *id_out __dv(0), cudaGraph_t *graph_out __dv(0), const cudaGraphNode_t **dependencies_out __dv(0), size_t *numDependencies_out __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaStreamUpdateCaptureDependencies_ptsz(cudaStream_t stream, cudaGraphNode_t *dependencies, size_t numDependencies, unsigned int flags __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaStreamCopyAttributes(cudaStream_t dstStream, cudaStream_t srcStream); + extern __host__ cudaError_t CUDARTAPI cudaStreamGetAttribute(cudaStream_t stream, cudaStreamAttrID attr, cudaStreamAttrValue *value); + extern __host__ cudaError_t CUDARTAPI cudaStreamSetAttribute(cudaStream_t stream, cudaStreamAttrID attr, const cudaStreamAttrValue *param); + + extern __host__ cudaError_t CUDARTAPI cudaMallocAsync(void **devPtr, size_t size, cudaStream_t hStream); + extern __host__ cudaError_t CUDARTAPI cudaFreeAsync(void *devPtr, cudaStream_t hStream); + extern __host__ cudaError_t CUDARTAPI cudaMallocFromPoolAsync(void **ptr, size_t size, cudaMemPool_t memPool, cudaStream_t stream); + extern __host__ cudaError_t CUDARTAPI cudaGetDriverEntryPoint(const char *symbol, void **funcPtr, unsigned long long flags, enum cudaDriverEntryPointQueryResult *driverStatus); + + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetDeviceProperties(struct cudaDeviceProp *prop, int device); + +#elif defined(__CUDART_API_PER_THREAD_DEFAULT_STREAM) + // nvcc stubs reference the 'cudaLaunch'/'cudaLaunchKernel' identifier even if it was defined + // to 'cudaLaunch_ptsz'/'cudaLaunchKernel_ptsz'. Redirect through a static inline function. + #undef cudaLaunchKernel + static __inline__ __host__ cudaError_t cudaLaunchKernel(const void *func, + dim3 gridDim, dim3 blockDim, + void **args, size_t sharedMem, + cudaStream_t stream) + { + return cudaLaunchKernel_ptsz(func, gridDim, blockDim, args, sharedMem, stream); + } + #define cudaLaunchKernel __CUDART_API_PTSZ(cudaLaunchKernel) + #undef cudaLaunchKernelExC + static __inline__ __host__ cudaError_t cudaLaunchKernelExC(const cudaLaunchConfig_t *config, + const void *func, + void **args) + { + return cudaLaunchKernelExC_ptsz(config, func, args); + } + #define cudaLaunchKernelExC __CUDART_API_PTSZ(cudaLaunchKernelExC) +#endif + +#if defined(__cplusplus) +} + +#endif /* __cplusplus */ + +#undef EXCLUDE_FROM_RTC +#endif /* !__CUDACC_RTC__ */ + +#undef __dv +#undef __CUDA_DEPRECATED + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_RUNTIME_API_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_RUNTIME_API_H__ +#endif + +#endif /* !__CUDA_RUNTIME_API_H__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_surface_types.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_surface_types.h new file mode 100644 index 0000000000000000000000000000000000000000..4a35c215668e98006c3eaa286deb70461eb1fa62 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_surface_types.h @@ -0,0 +1,76 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_SURFACE_TYPES_H__) +#define __CUDA_SURFACE_TYPES_H__ + +#if defined(__cplusplus) && defined(__CUDACC__) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if !defined(__CUDACC_RTC__) +#define EXCLUDE_FROM_RTC +#include "channel_descriptor.h" +#undef EXCLUDE_FROM_RTC +#endif /* !__CUDACC_RTC__ */ +#include "cuda_runtime_api.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#endif /* __cplusplus && __CUDACC__ */ + +#endif /* !__CUDA_SURFACE_TYPES_H__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_texture_types.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_texture_types.h new file mode 100644 index 0000000000000000000000000000000000000000..4f723db5c682a7b4b05491219df8993f0f6ebd59 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_texture_types.h @@ -0,0 +1,76 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_TEXTURE_TYPES_H__) +#define __CUDA_TEXTURE_TYPES_H__ + +#if defined(__cplusplus) && defined(__CUDACC__) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if !defined(__CUDACC_RTC__) +#define EXCLUDE_FROM_RTC +#include "channel_descriptor.h" +#undef EXCLUDE_FROM_RTC +#endif /* !__CUDACC_RTC__ */ +#include "cuda_runtime_api.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#endif /* __cplusplus && __CUDACC__ */ + +#endif /* !__CUDA_TEXTURE_TYPES_H__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_vdpau_interop.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_vdpau_interop.h new file mode 100644 index 0000000000000000000000000000000000000000..2cf1ba357eb02ed82afc2f1812627a8a2d88c6f7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_vdpau_interop.h @@ -0,0 +1,201 @@ +/* + * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_VDPAU_INTEROP_H__) +#define __CUDA_VDPAU_INTEROP_H__ + +#include "cuda_runtime_api.h" + +#include + +#if defined(__cplusplus) +extern "C" { +#endif /* __cplusplus */ + +/** + * \addtogroup CUDART_VDPAU VDPAU Interoperability + * This section describes the VDPAU interoperability functions of the CUDA + * runtime application programming interface. + * + * @{ + */ + +/** + * \brief Gets the CUDA device associated with a VdpDevice. + * + * Returns the CUDA device associated with a VdpDevice, if applicable. + * + * \param device - Returns the device associated with vdpDevice, or -1 if + * the device associated with vdpDevice is not a compute device. + * \param vdpDevice - A VdpDevice handle + * \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer + * + * \return + * ::cudaSuccess + * \notefnerr + * + * \sa + * ::cudaVDPAUSetVDPAUDevice, + * ::cuVDPAUGetDevice + */ +extern __host__ cudaError_t CUDARTAPI cudaVDPAUGetDevice(int *device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress); + +/** + * \brief Sets a CUDA device to use VDPAU interoperability + * + * Records \p vdpDevice as the VdpDevice for VDPAU interoperability + * with the CUDA device \p device and sets \p device as the current + * device for the calling host thread. + * + * This function will immediately initialize the primary context on + * \p device if needed. + * + * If \p device has already been initialized then this call will fail + * with the error ::cudaErrorSetOnActiveProcess. In this case it is + * necessary to reset \p device using ::cudaDeviceReset() before + * VDPAU interoperability on \p device may be enabled. + * + * \param device - Device to use for VDPAU interoperability + * \param vdpDevice - The VdpDevice to interoperate with + * \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorSetOnActiveProcess + * \notefnerr + * + * \sa ::cudaGraphicsVDPAURegisterVideoSurface, + * ::cudaGraphicsVDPAURegisterOutputSurface, + * ::cudaDeviceReset + */ +extern __host__ cudaError_t CUDARTAPI cudaVDPAUSetVDPAUDevice(int device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress); + +/** + * \brief Register a VdpVideoSurface object + * + * Registers the VdpVideoSurface specified by \p vdpSurface for access by CUDA. + * A handle to the registered object is returned as \p resource. + * The surface's intended usage is specified using \p flags, as follows: + * + * - ::cudaGraphicsMapFlagsNone: Specifies no hints about how this + * resource will be used. It is therefore assumed that this resource will be + * read from and written to by CUDA. This is the default value. + * - ::cudaGraphicsMapFlagsReadOnly: Specifies that CUDA + * will not write to this resource. + * - ::cudaGraphicsMapFlagsWriteDiscard: Specifies that + * CUDA will not read from this resource and will write over the + * entire contents of the resource, so none of the data previously + * stored in the resource will be preserved. + * + * \param resource - Pointer to the returned object handle + * \param vdpSurface - VDPAU object to be registered + * \param flags - Map flags + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorUnknown + * \notefnerr + * + * \sa + * ::cudaVDPAUSetVDPAUDevice, + * ::cudaGraphicsUnregisterResource, + * ::cudaGraphicsSubResourceGetMappedArray, + * ::cuGraphicsVDPAURegisterVideoSurface + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphicsVDPAURegisterVideoSurface(struct cudaGraphicsResource **resource, VdpVideoSurface vdpSurface, unsigned int flags); + +/** + * \brief Register a VdpOutputSurface object + * + * Registers the VdpOutputSurface specified by \p vdpSurface for access by CUDA. + * A handle to the registered object is returned as \p resource. + * The surface's intended usage is specified using \p flags, as follows: + * + * - ::cudaGraphicsMapFlagsNone: Specifies no hints about how this + * resource will be used. It is therefore assumed that this resource will be + * read from and written to by CUDA. This is the default value. + * - ::cudaGraphicsMapFlagsReadOnly: Specifies that CUDA + * will not write to this resource. + * - ::cudaGraphicsMapFlagsWriteDiscard: Specifies that + * CUDA will not read from this resource and will write over the + * entire contents of the resource, so none of the data previously + * stored in the resource will be preserved. + * + * \param resource - Pointer to the returned object handle + * \param vdpSurface - VDPAU object to be registered + * \param flags - Map flags + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorUnknown + * \notefnerr + * + * \sa + * ::cudaVDPAUSetVDPAUDevice, + * ::cudaGraphicsUnregisterResource, + * ::cudaGraphicsSubResourceGetMappedArray, + * ::cuGraphicsVDPAURegisterOutputSurface + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphicsVDPAURegisterOutputSurface(struct cudaGraphicsResource **resource, VdpOutputSurface vdpSurface, unsigned int flags); + +/** @} */ /* END CUDART_VDPAU */ + +#if defined(__cplusplus) +} +#endif /* __cplusplus */ + +#endif /* __CUDA_VDPAU_INTEROP_H__ */ + diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudart_platform.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudart_platform.h new file mode 100644 index 0000000000000000000000000000000000000000..0f022bbe349eba2219a6b74f1ea315c1ce8551b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudart_platform.h @@ -0,0 +1,57 @@ +/* + * Copyright 2016 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef __CUDART_PLATFORM_H__ +#define __CUDART_PLATFORM_H__ + +#if ((defined(__linux__) || defined(__QNX__)) && (defined(__arm__) || defined(__aarch64__) || defined(__x86_64__))) +#define isEglSupported 1 +#endif + +#endif diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_atomic_functions.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_atomic_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..611b715edfb82b6ee731419c30505676e1f07c52 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_atomic_functions.h @@ -0,0 +1,217 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__DEVICE_ATOMIC_FUNCTIONS_H__) +#define __DEVICE_ATOMIC_FUNCTIONS_H__ + +#if defined(__CUDACC_RTC__) +#define __DEVICE_ATOMIC_FUNCTIONS_DECL__ __device__ +#elif defined(_NVHPC_CUDA) +# define __DEVICE_ATOMIC_FUNCTIONS_DECL__ extern __device__ __cudart_builtin__ +#else /* __CUDACC_RTC__ */ +#define __DEVICE_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/* Add !defined(_NVHPC_CUDA) to avoid empty function definition in PGI CUDA + * C++ compiler where the macro __CUDA_ARCH__ is not defined. */ +#if !defined(__CUDA_ARCH__) && !defined(_NVHPC_CUDA) +#define __DEF_IF_HOST { } +#else /* !__CUDA_ARCH__ */ +#define __DEF_IF_HOST ; +#endif /* __CUDA_ARCH__ */ + +#if defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA) +extern "C" +{ +extern __device__ __device_builtin__ int __iAtomicAdd(int *address, int val); +extern __device__ __device_builtin__ unsigned int __uAtomicAdd(unsigned int *address, unsigned int val); +extern __device__ __device_builtin__ int __iAtomicExch(int *address, int val); +extern __device__ __device_builtin__ unsigned int __uAtomicExch(unsigned int *address, unsigned int val); +extern __device__ __device_builtin__ float __fAtomicExch(float *address, float val); +extern __device__ __device_builtin__ int __iAtomicMin(int *address, int val); +extern __device__ __device_builtin__ unsigned int __uAtomicMin(unsigned int *address, unsigned int val); +extern __device__ __device_builtin__ int __iAtomicMax(int *address, int val); +extern __device__ __device_builtin__ unsigned int __uAtomicMax(unsigned int *address, unsigned int val); +extern __device__ __device_builtin__ unsigned int __uAtomicInc(unsigned int *address, unsigned int val); +extern __device__ __device_builtin__ unsigned int __uAtomicDec(unsigned int *address, unsigned int val); +extern __device__ __device_builtin__ int __iAtomicAnd(int *address, int val); +extern __device__ __device_builtin__ unsigned int __uAtomicAnd(unsigned int *address, unsigned int val); +extern __device__ __device_builtin__ int __iAtomicOr(int *address, int val); +extern __device__ __device_builtin__ unsigned int __uAtomicOr(unsigned int *address, unsigned int val); +extern __device__ __device_builtin__ int __iAtomicXor(int *address, int val); +extern __device__ __device_builtin__ unsigned int __uAtomicXor(unsigned int *address, unsigned int val); +extern __device__ __device_builtin__ int __iAtomicCAS(int *address, int compare, int val); +extern __device__ __device_builtin__ unsigned int __uAtomicCAS(unsigned int *address, unsigned int compare, unsigned int val); +} +#endif /* __CUDA_ARCH__ || defined(_NVHPC_CUDA) */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicAdd(int *address, int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicAdd(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicSub(int *address, int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicSub(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicExch(int *address, int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicExch(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ float atomicExch(float *address, float val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicMin(int *address, int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicMin(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicMax(int *address, int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicMax(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicInc(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicDec(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicAnd(int *address, int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicAnd(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicOr(int *address, int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicOr(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicXor(int *address, int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicXor(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicCAS(int *address, int compare, int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicCAS(unsigned int *address, unsigned int compare, unsigned int val) __DEF_IF_HOST + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +#if defined(_WIN32) +# define __DEPRECATED__(msg) __declspec(deprecated(msg)) +#elif (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 5 && !defined(__clang__)))) +# define __DEPRECATED__(msg) __attribute__((deprecated)) +#else +# define __DEPRECATED__(msg) __attribute__((deprecated(msg))) +#endif + +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 +#define __WSB_DEPRECATION_MESSAGE(x) #x"() is not valid on compute_70 and above, and should be replaced with "#x"_sync()."\ + "To continue using "#x"(), specify virtual architecture compute_60 when targeting sm_70 and above, for example, using the pair of compiler options: -arch=compute_60 -code=sm_70." +#elif defined(_NVHPC_CUDA) +#define __WSB_DEPRECATION_MESSAGE(x) #x"() is not valid on cc70 and above, and should be replaced with "#x"_sync()." +#else +#define __WSB_DEPRECATION_MESSAGE(x) #x"() is deprecated in favor of "#x"_sync() and may be removed in a future release (Use -Wno-deprecated-declarations to suppress this warning)." +#endif + +extern "C" +{ +#if defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA) +extern __device__ __device_builtin__ unsigned long long int __ullAtomicAdd(unsigned long long int *address, unsigned long long int val); +extern __device__ __device_builtin__ unsigned long long int __ullAtomicExch(unsigned long long int *address, unsigned long long int val); +extern __device__ __device_builtin__ unsigned long long int __ullAtomicCAS(unsigned long long int *address, unsigned long long int compare, unsigned long long int val); +#endif /* __CUDA_ARCH__ || _NVHPC_CUDA */ +extern __device__ __device_builtin__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__any)) int __any(int cond); +extern __device__ __device_builtin__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__all)) int __all(int cond); +} + + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicAdd(unsigned long long int *address, unsigned long long int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicExch(unsigned long long int *address, unsigned long long int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicCAS(unsigned long long int *address, unsigned long long int compare, unsigned long long int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__any)) bool any(bool cond) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__all)) bool all(bool cond) __DEF_IF_HOST + +#undef __DEPRECATED__ +#undef __WSB_DEPRECATION_MESSAGE + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __DEF_IF_HOST +#undef __DEVICE_ATOMIC_FUNCTIONS_DECL__ + +#if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__) +#include "device_atomic_functions.hpp" +#endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */ + +#endif /* !__DEVICE_ATOMIC_FUNCTIONS_H__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_atomic_functions.hpp b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_atomic_functions.hpp new file mode 100644 index 0000000000000000000000000000000000000000..50e427c39b164e6e145c3930cd66cc03806175a6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_atomic_functions.hpp @@ -0,0 +1,224 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__DEVICE_ATOMIC_FUNCTIONS_HPP__) +#define __DEVICE_ATOMIC_FUNCTIONS_HPP__ + +#if defined(__CUDACC_RTC__) +#define __DEVICE_ATOMIC_FUNCTIONS_DECL__ __device__ +#else /* __CUDACC_RTC__ */ +#define __DEVICE_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicAdd(int *address, int val) +{ + return __iAtomicAdd(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicAdd(unsigned int *address, unsigned int val) +{ + return __uAtomicAdd(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicSub(int *address, int val) +{ + return __iAtomicAdd(address, (unsigned int)-(int)val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicSub(unsigned int *address, unsigned int val) +{ + return __uAtomicAdd(address, (unsigned int)-(int)val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicExch(int *address, int val) +{ + return __iAtomicExch(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicExch(unsigned int *address, unsigned int val) +{ + return __uAtomicExch(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ float atomicExch(float *address, float val) +{ + return __fAtomicExch(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicMin(int *address, int val) +{ + return __iAtomicMin(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicMin(unsigned int *address, unsigned int val) +{ + return __uAtomicMin(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicMax(int *address, int val) +{ + return __iAtomicMax(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicMax(unsigned int *address, unsigned int val) +{ + return __uAtomicMax(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicInc(unsigned int *address, unsigned int val) +{ + return __uAtomicInc(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicDec(unsigned int *address, unsigned int val) +{ + return __uAtomicDec(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicAnd(int *address, int val) +{ + return __iAtomicAnd(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicAnd(unsigned int *address, unsigned int val) +{ + return __uAtomicAnd(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicOr(int *address, int val) +{ + return __iAtomicOr(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicOr(unsigned int *address, unsigned int val) +{ + return __uAtomicOr(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicXor(int *address, int val) +{ + return __iAtomicXor(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicXor(unsigned int *address, unsigned int val) +{ + return __uAtomicXor(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicCAS(int *address, int compare, int val) +{ + return __iAtomicCAS(address, compare, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicCAS(unsigned int *address, unsigned int compare, unsigned int val) +{ + return __uAtomicCAS(address, compare, val); +} + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicAdd(unsigned long long int *address, unsigned long long int val) +{ + return __ullAtomicAdd(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicExch(unsigned long long int *address, unsigned long long int val) +{ + return __ullAtomicExch(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicCAS(unsigned long long int *address, unsigned long long int compare, unsigned long long int val) +{ + return __ullAtomicCAS(address, compare, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ bool any(bool cond) +{ + return (bool)__any((int)cond); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ bool all(bool cond) +{ + return (bool)__all((int)cond); +} + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __DEVICE_ATOMIC_FUNCTIONS_DECL__ + +#endif /* !__DEVICE_ATOMIC_FUNCTIONS_HPP__ */ + diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_double_functions.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_double_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..82b25e59b40aeaf1e475ff3179e49640a44918b8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_double_functions.h @@ -0,0 +1,65 @@ +/* + * Copyright 1993-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("device_double_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "device_double_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_H_WRAPPER__ +#endif + +#include "crt/device_double_functions.h" + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_H_WRAPPER__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_H_WRAPPER__ +#endif diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_functions.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..0094cc9a0a57f53f47421a8ecc400fb84c26babe --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_functions.h @@ -0,0 +1,65 @@ +/* + * Copyright 1993-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("device_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "device_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_H_WRAPPER__ +#endif + +#include "crt/device_functions.h" + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_H_WRAPPER__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_H_WRAPPER__ +#endif diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_launch_parameters.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_launch_parameters.h new file mode 100644 index 0000000000000000000000000000000000000000..8f552db8faab7d21e90e06a1ea2184a5563d3bf2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_launch_parameters.h @@ -0,0 +1,118 @@ +/* + * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__DEVICE_LAUNCH_PARAMETERS_H__) +#define __DEVICE_LAUNCH_PARAMETERS_H__ + +#include "vector_types.h" + +#if !defined(__STORAGE__) + +#if defined(__CUDACC_RTC__) +#define __STORAGE__ \ + extern const __device__ +#else /* !__CUDACC_RTC__ */ +#define __STORAGE__ \ + extern const +#endif /* __CUDACC_RTC__ */ + +#endif /* __STORAGE__ */ + +#if defined(__cplusplus) +extern "C" { +#endif /* __cplusplus */ + +uint3 __device_builtin__ __STORAGE__ threadIdx; +uint3 __device_builtin__ __STORAGE__ blockIdx; +dim3 __device_builtin__ __STORAGE__ blockDim; +dim3 __device_builtin__ __STORAGE__ gridDim; +int __device_builtin__ __STORAGE__ warpSize; + +#undef __STORAGE__ + +#if defined(__cplusplus) +} +#endif /* __cplusplus */ + +#if !defined(__cudaGet_threadIdx) + +#define __cudaGet_threadIdx() \ + threadIdx + +#endif /* __cudaGet_threadIdx */ + +#if !defined(__cudaGet_blockIdx) + +#define __cudaGet_blockIdx() \ + blockIdx + +#endif /* __cudaGet_blockIdx */ + +#if !defined(__cudaGet_blockDim) + +#define __cudaGet_blockDim() \ + blockDim + +#endif /* __cudaGet_blockDim */ + +#if !defined(__cudaGet_gridDim) + +#define __cudaGet_gridDim() \ + gridDim + +#endif /* __cudaGet_gridDim */ + +#if !defined(__cudaGet_warpSize) + +#define __cudaGet_warpSize() \ + warpSize + +#endif /* __cudaGet_warpSize */ + +#endif /* !__DEVICE_LAUNCH_PARAMETERS_H__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_types.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_types.h new file mode 100644 index 0000000000000000000000000000000000000000..4b575a1014c6cdb9bf2f722c2a67e329186079e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_types.h @@ -0,0 +1,81 @@ +/* + * Copyright 1993-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__DEVICE_TYPES_H__) +#define __DEVICE_TYPES_H__ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_TYPES_H__ +#endif + +#ifndef __DOXYGEN_ONLY__ +#include "crt/host_defines.h" +#endif + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +enum __device_builtin__ cudaRoundMode +{ + cudaRoundNearest, + cudaRoundZero, + cudaRoundPosInf, + cudaRoundMinInf +}; + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_TYPES_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_TYPES_H__ +#endif + +#endif /* !__DEVICE_TYPES_H__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/driver_functions.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/driver_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..94767974220594550d496cad4d14c45349b27737 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/driver_functions.h @@ -0,0 +1,145 @@ +/* + * Copyright 1993-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__DRIVER_FUNCTIONS_H__) +#define __DRIVER_FUNCTIONS_H__ + +#include "builtin_types.h" +#include "crt/host_defines.h" +#include "driver_types.h" + +/** + * \addtogroup CUDART_MEMORY + * + * @{ + */ + +/** + * \brief Returns a cudaPitchedPtr based on input parameters + * + * Returns a ::cudaPitchedPtr based on the specified input parameters \p d, + * \p p, \p xsz, and \p ysz. + * + * \param d - Pointer to allocated memory + * \param p - Pitch of allocated memory in bytes + * \param xsz - Logical width of allocation in elements + * \param ysz - Logical height of allocation in elements + * + * \return + * ::cudaPitchedPtr specified by \p d, \p p, \p xsz, and \p ysz + * + * \sa make_cudaExtent, make_cudaPos + */ +static __inline__ __host__ struct cudaPitchedPtr make_cudaPitchedPtr(void *d, size_t p, size_t xsz, size_t ysz) +{ + struct cudaPitchedPtr s; + + s.ptr = d; + s.pitch = p; + s.xsize = xsz; + s.ysize = ysz; + + return s; +} + +/** + * \brief Returns a cudaPos based on input parameters + * + * Returns a ::cudaPos based on the specified input parameters \p x, + * \p y, and \p z. + * + * \param x - X position + * \param y - Y position + * \param z - Z position + * + * \return + * ::cudaPos specified by \p x, \p y, and \p z + * + * \sa make_cudaExtent, make_cudaPitchedPtr + */ +static __inline__ __host__ struct cudaPos make_cudaPos(size_t x, size_t y, size_t z) +{ + struct cudaPos p; + + p.x = x; + p.y = y; + p.z = z; + + return p; +} + +/** + * \brief Returns a cudaExtent based on input parameters + * + * Returns a ::cudaExtent based on the specified input parameters \p w, + * \p h, and \p d. + * + * \param w - Width in elements when referring to array memory, in bytes when referring to linear memory + * \param h - Height in elements + * \param d - Depth in elements + * + * \return + * ::cudaExtent specified by \p w, \p h, and \p d + * + * \sa make_cudaPitchedPtr, make_cudaPos + */ +static __inline__ __host__ struct cudaExtent make_cudaExtent(size_t w, size_t h, size_t d) +{ + struct cudaExtent e; + + e.width = w; + e.height = h; + e.depth = d; + + return e; +} + +/** @} */ /* END CUDART_MEMORY */ + +#endif /* !__DRIVER_FUNCTIONS_H__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/driver_types.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/driver_types.h new file mode 100644 index 0000000000000000000000000000000000000000..988702c0e7c6b796c286ef3237c8af3341bad846 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/driver_types.h @@ -0,0 +1,3162 @@ +/* + * Copyright 1993-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__DRIVER_TYPES_H__) +#define __DRIVER_TYPES_H__ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DRIVER_TYPES_H__ +#endif + +#ifndef __DOXYGEN_ONLY__ +#include "crt/host_defines.h" +#endif +#include "vector_types.h" + + + +/** + * \defgroup CUDART_TYPES Data types used by CUDA Runtime + * \ingroup CUDART + * + * @{ + */ + +/******************************************************************************* +* * +* TYPE DEFINITIONS USED BY RUNTIME API * +* * +*******************************************************************************/ + +#if !defined(__CUDA_INTERNAL_COMPILATION__) + +#if !defined(__CUDACC_RTC__) +#include +#include +#endif /* !defined(__CUDACC_RTC__) */ + +#define cudaHostAllocDefault 0x00 /**< Default page-locked allocation flag */ +#define cudaHostAllocPortable 0x01 /**< Pinned memory accessible by all CUDA contexts */ +#define cudaHostAllocMapped 0x02 /**< Map allocation into device space */ +#define cudaHostAllocWriteCombined 0x04 /**< Write-combined memory */ + +#define cudaHostRegisterDefault 0x00 /**< Default host memory registration flag */ +#define cudaHostRegisterPortable 0x01 /**< Pinned memory accessible by all CUDA contexts */ +#define cudaHostRegisterMapped 0x02 /**< Map registered memory into device space */ +#define cudaHostRegisterIoMemory 0x04 /**< Memory-mapped I/O space */ +#define cudaHostRegisterReadOnly 0x08 /**< Memory-mapped read-only */ + +#define cudaPeerAccessDefault 0x00 /**< Default peer addressing enable flag */ + +#define cudaStreamDefault 0x00 /**< Default stream flag */ +#define cudaStreamNonBlocking 0x01 /**< Stream does not synchronize with stream 0 (the NULL stream) */ + + /** + * Legacy stream handle + * + * Stream handle that can be passed as a cudaStream_t to use an implicit stream + * with legacy synchronization behavior. + * + * See details of the \link_sync_behavior + */ +#define cudaStreamLegacy ((cudaStream_t)0x1) + +/** + * Per-thread stream handle + * + * Stream handle that can be passed as a cudaStream_t to use an implicit stream + * with per-thread synchronization behavior. + * + * See details of the \link_sync_behavior + */ +#define cudaStreamPerThread ((cudaStream_t)0x2) + +#define cudaEventDefault 0x00 /**< Default event flag */ +#define cudaEventBlockingSync 0x01 /**< Event uses blocking synchronization */ +#define cudaEventDisableTiming 0x02 /**< Event will not record timing data */ +#define cudaEventInterprocess 0x04 /**< Event is suitable for interprocess use. cudaEventDisableTiming must be set */ + +#define cudaEventRecordDefault 0x00 /**< Default event record flag */ +#define cudaEventRecordExternal 0x01 /**< Event is captured in the graph as an external event node when performing stream capture */ + +#define cudaEventWaitDefault 0x00 /**< Default event wait flag */ +#define cudaEventWaitExternal 0x01 /**< Event is captured in the graph as an external event node when performing stream capture */ + +#define cudaDeviceScheduleAuto 0x00 /**< Device flag - Automatic scheduling */ +#define cudaDeviceScheduleSpin 0x01 /**< Device flag - Spin default scheduling */ +#define cudaDeviceScheduleYield 0x02 /**< Device flag - Yield default scheduling */ +#define cudaDeviceScheduleBlockingSync 0x04 /**< Device flag - Use blocking synchronization */ +#define cudaDeviceBlockingSync 0x04 /**< Device flag - Use blocking synchronization + * \deprecated This flag was deprecated as of CUDA 4.0 and + * replaced with ::cudaDeviceScheduleBlockingSync. */ +#define cudaDeviceScheduleMask 0x07 /**< Device schedule flags mask */ +#define cudaDeviceMapHost 0x08 /**< Device flag - Support mapped pinned allocations */ +#define cudaDeviceLmemResizeToMax 0x10 /**< Device flag - Keep local memory allocation after launch */ +#define cudaDeviceSyncMemops 0x80 /**< Device flag - Use synchronous behavior for cudaMemcpy/cudaMemset */ +#define cudaDeviceMask 0xff /**< Device flags mask */ + +#define cudaArrayDefault 0x00 /**< Default CUDA array allocation flag */ +#define cudaArrayLayered 0x01 /**< Must be set in cudaMalloc3DArray to create a layered CUDA array */ +#define cudaArraySurfaceLoadStore 0x02 /**< Must be set in cudaMallocArray or cudaMalloc3DArray in order to bind surfaces to the CUDA array */ +#define cudaArrayCubemap 0x04 /**< Must be set in cudaMalloc3DArray to create a cubemap CUDA array */ +#define cudaArrayTextureGather 0x08 /**< Must be set in cudaMallocArray or cudaMalloc3DArray in order to perform texture gather operations on the CUDA array */ +#define cudaArrayColorAttachment 0x20 /**< Must be set in cudaExternalMemoryGetMappedMipmappedArray if the mipmapped array is used as a color target in a graphics API */ +#define cudaArraySparse 0x40 /**< Must be set in cudaMallocArray, cudaMalloc3DArray or cudaMallocMipmappedArray in order to create a sparse CUDA array or CUDA mipmapped array */ +#define cudaArrayDeferredMapping 0x80 /**< Must be set in cudaMallocArray, cudaMalloc3DArray or cudaMallocMipmappedArray in order to create a deferred mapping CUDA array or CUDA mipmapped array */ + +#define cudaIpcMemLazyEnablePeerAccess 0x01 /**< Automatically enable peer access between remote devices as needed */ + +#define cudaMemAttachGlobal 0x01 /**< Memory can be accessed by any stream on any device*/ +#define cudaMemAttachHost 0x02 /**< Memory cannot be accessed by any stream on any device */ +#define cudaMemAttachSingle 0x04 /**< Memory can only be accessed by a single stream on the associated device */ + +#define cudaOccupancyDefault 0x00 /**< Default behavior */ +#define cudaOccupancyDisableCachingOverride 0x01 /**< Assume global caching is enabled and cannot be automatically turned off */ + +#define cudaCpuDeviceId ((int)-1) /**< Device id that represents the CPU */ +#define cudaInvalidDeviceId ((int)-2) /**< Device id that represents an invalid device */ +#define cudaInitDeviceFlagsAreValid 0x01 /**< Tell the CUDA runtime that DeviceFlags is being set in cudaInitDevice call */ +/** + * If set, each kernel launched as part of ::cudaLaunchCooperativeKernelMultiDevice only + * waits for prior work in the stream corresponding to that GPU to complete before the + * kernel begins execution. + */ +#define cudaCooperativeLaunchMultiDeviceNoPreSync 0x01 + +/** + * If set, any subsequent work pushed in a stream that participated in a call to + * ::cudaLaunchCooperativeKernelMultiDevice will only wait for the kernel launched on + * the GPU corresponding to that stream to complete before it begins execution. + */ +#define cudaCooperativeLaunchMultiDeviceNoPostSync 0x02 + +#endif /* !__CUDA_INTERNAL_COMPILATION__ */ + +/** \cond impl_private */ +#if defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED) +#define __CUDA_DEPRECATED +#elif defined(_MSC_VER) +#define __CUDA_DEPRECATED __declspec(deprecated) +#elif defined(__GNUC__) +#define __CUDA_DEPRECATED __attribute__((deprecated)) +#else +#define __CUDA_DEPRECATED +#endif +/** \endcond impl_private */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +/** + * CUDA error types + */ +enum __device_builtin__ cudaError +{ + /** + * The API call returned with no errors. In the case of query calls, this + * also means that the operation being queried is complete (see + * ::cudaEventQuery() and ::cudaStreamQuery()). + */ + cudaSuccess = 0, + + /** + * This indicates that one or more of the parameters passed to the API call + * is not within an acceptable range of values. + */ + cudaErrorInvalidValue = 1, + + /** + * The API call failed because it was unable to allocate enough memory to + * perform the requested operation. + */ + cudaErrorMemoryAllocation = 2, + + /** + * The API call failed because the CUDA driver and runtime could not be + * initialized. + */ + cudaErrorInitializationError = 3, + + /** + * This indicates that a CUDA Runtime API call cannot be executed because + * it is being called during process shut down, at a point in time after + * CUDA driver has been unloaded. + */ + cudaErrorCudartUnloading = 4, + + /** + * This indicates profiler is not initialized for this run. This can + * happen when the application is running with external profiling tools + * like visual profiler. + */ + cudaErrorProfilerDisabled = 5, + + /** + * \deprecated + * This error return is deprecated as of CUDA 5.0. It is no longer an error + * to attempt to enable/disable the profiling via ::cudaProfilerStart or + * ::cudaProfilerStop without initialization. + */ + cudaErrorProfilerNotInitialized = 6, + + /** + * \deprecated + * This error return is deprecated as of CUDA 5.0. It is no longer an error + * to call cudaProfilerStart() when profiling is already enabled. + */ + cudaErrorProfilerAlreadyStarted = 7, + + /** + * \deprecated + * This error return is deprecated as of CUDA 5.0. It is no longer an error + * to call cudaProfilerStop() when profiling is already disabled. + */ + cudaErrorProfilerAlreadyStopped = 8, + + /** + * This indicates that a kernel launch is requesting resources that can + * never be satisfied by the current device. Requesting more shared memory + * per block than the device supports will trigger this error, as will + * requesting too many threads or blocks. See ::cudaDeviceProp for more + * device limitations. + */ + cudaErrorInvalidConfiguration = 9, + + /** + * This indicates that one or more of the pitch-related parameters passed + * to the API call is not within the acceptable range for pitch. + */ + cudaErrorInvalidPitchValue = 12, + + /** + * This indicates that the symbol name/identifier passed to the API call + * is not a valid name or identifier. + */ + cudaErrorInvalidSymbol = 13, + + /** + * This indicates that at least one host pointer passed to the API call is + * not a valid host pointer. + * \deprecated + * This error return is deprecated as of CUDA 10.1. + */ + cudaErrorInvalidHostPointer = 16, + + /** + * This indicates that at least one device pointer passed to the API call is + * not a valid device pointer. + * \deprecated + * This error return is deprecated as of CUDA 10.1. + */ + cudaErrorInvalidDevicePointer = 17, + + /** + * This indicates that the texture passed to the API call is not a valid + * texture. + */ + cudaErrorInvalidTexture = 18, + + /** + * This indicates that the texture binding is not valid. This occurs if you + * call ::cudaGetTextureAlignmentOffset() with an unbound texture. + */ + cudaErrorInvalidTextureBinding = 19, + + /** + * This indicates that the channel descriptor passed to the API call is not + * valid. This occurs if the format is not one of the formats specified by + * ::cudaChannelFormatKind, or if one of the dimensions is invalid. + */ + cudaErrorInvalidChannelDescriptor = 20, + + /** + * This indicates that the direction of the memcpy passed to the API call is + * not one of the types specified by ::cudaMemcpyKind. + */ + cudaErrorInvalidMemcpyDirection = 21, + + /** + * This indicated that the user has taken the address of a constant variable, + * which was forbidden up until the CUDA 3.1 release. + * \deprecated + * This error return is deprecated as of CUDA 3.1. Variables in constant + * memory may now have their address taken by the runtime via + * ::cudaGetSymbolAddress(). + */ + cudaErrorAddressOfConstant = 22, + + /** + * This indicated that a texture fetch was not able to be performed. + * This was previously used for device emulation of texture operations. + * \deprecated + * This error return is deprecated as of CUDA 3.1. Device emulation mode was + * removed with the CUDA 3.1 release. + */ + cudaErrorTextureFetchFailed = 23, + + /** + * This indicated that a texture was not bound for access. + * This was previously used for device emulation of texture operations. + * \deprecated + * This error return is deprecated as of CUDA 3.1. Device emulation mode was + * removed with the CUDA 3.1 release. + */ + cudaErrorTextureNotBound = 24, + + /** + * This indicated that a synchronization operation had failed. + * This was previously used for some device emulation functions. + * \deprecated + * This error return is deprecated as of CUDA 3.1. Device emulation mode was + * removed with the CUDA 3.1 release. + */ + cudaErrorSynchronizationError = 25, + + /** + * This indicates that a non-float texture was being accessed with linear + * filtering. This is not supported by CUDA. + */ + cudaErrorInvalidFilterSetting = 26, + + /** + * This indicates that an attempt was made to read a non-float texture as a + * normalized float. This is not supported by CUDA. + */ + cudaErrorInvalidNormSetting = 27, + + /** + * Mixing of device and device emulation code was not allowed. + * \deprecated + * This error return is deprecated as of CUDA 3.1. Device emulation mode was + * removed with the CUDA 3.1 release. + */ + cudaErrorMixedDeviceExecution = 28, + + /** + * This indicates that the API call is not yet implemented. Production + * releases of CUDA will never return this error. + * \deprecated + * This error return is deprecated as of CUDA 4.1. + */ + cudaErrorNotYetImplemented = 31, + + /** + * This indicated that an emulated device pointer exceeded the 32-bit address + * range. + * \deprecated + * This error return is deprecated as of CUDA 3.1. Device emulation mode was + * removed with the CUDA 3.1 release. + */ + cudaErrorMemoryValueTooLarge = 32, + + /** + * This indicates that the CUDA driver that the application has loaded is a + * stub library. Applications that run with the stub rather than a real + * driver loaded will result in CUDA API returning this error. + */ + cudaErrorStubLibrary = 34, + + /** + * This indicates that the installed NVIDIA CUDA driver is older than the + * CUDA runtime library. This is not a supported configuration. Users should + * install an updated NVIDIA display driver to allow the application to run. + */ + cudaErrorInsufficientDriver = 35, + + /** + * This indicates that the API call requires a newer CUDA driver than the one + * currently installed. Users should install an updated NVIDIA CUDA driver + * to allow the API call to succeed. + */ + cudaErrorCallRequiresNewerDriver = 36, + + /** + * This indicates that the surface passed to the API call is not a valid + * surface. + */ + cudaErrorInvalidSurface = 37, + + /** + * This indicates that multiple global or constant variables (across separate + * CUDA source files in the application) share the same string name. + */ + cudaErrorDuplicateVariableName = 43, + + /** + * This indicates that multiple textures (across separate CUDA source + * files in the application) share the same string name. + */ + cudaErrorDuplicateTextureName = 44, + + /** + * This indicates that multiple surfaces (across separate CUDA source + * files in the application) share the same string name. + */ + cudaErrorDuplicateSurfaceName = 45, + + /** + * This indicates that all CUDA devices are busy or unavailable at the current + * time. Devices are often busy/unavailable due to use of + * ::cudaComputeModeProhibited, ::cudaComputeModeExclusiveProcess, or when long + * running CUDA kernels have filled up the GPU and are blocking new work + * from starting. They can also be unavailable due to memory constraints + * on a device that already has active CUDA work being performed. + */ + cudaErrorDevicesUnavailable = 46, + + /** + * This indicates that the current context is not compatible with this + * the CUDA Runtime. This can only occur if you are using CUDA + * Runtime/Driver interoperability and have created an existing Driver + * context using the driver API. The Driver context may be incompatible + * either because the Driver context was created using an older version + * of the API, because the Runtime API call expects a primary driver + * context and the Driver context is not primary, or because the Driver + * context has been destroyed. Please see \ref CUDART_DRIVER "Interactions + * with the CUDA Driver API" for more information. + */ + cudaErrorIncompatibleDriverContext = 49, + + /** + * The device function being invoked (usually via ::cudaLaunchKernel()) was not + * previously configured via the ::cudaConfigureCall() function. + */ + cudaErrorMissingConfiguration = 52, + + /** + * This indicated that a previous kernel launch failed. This was previously + * used for device emulation of kernel launches. + * \deprecated + * This error return is deprecated as of CUDA 3.1. Device emulation mode was + * removed with the CUDA 3.1 release. + */ + cudaErrorPriorLaunchFailure = 53, + + /** + * This error indicates that a device runtime grid launch did not occur + * because the depth of the child grid would exceed the maximum supported + * number of nested grid launches. + */ + cudaErrorLaunchMaxDepthExceeded = 65, + + /** + * This error indicates that a grid launch did not occur because the kernel + * uses file-scoped textures which are unsupported by the device runtime. + * Kernels launched via the device runtime only support textures created with + * the Texture Object API's. + */ + cudaErrorLaunchFileScopedTex = 66, + + /** + * This error indicates that a grid launch did not occur because the kernel + * uses file-scoped surfaces which are unsupported by the device runtime. + * Kernels launched via the device runtime only support surfaces created with + * the Surface Object API's. + */ + cudaErrorLaunchFileScopedSurf = 67, + + /** + * This error indicates that a call to ::cudaDeviceSynchronize made from + * the device runtime failed because the call was made at grid depth greater + * than than either the default (2 levels of grids) or user specified device + * limit ::cudaLimitDevRuntimeSyncDepth. To be able to synchronize on + * launched grids at a greater depth successfully, the maximum nested + * depth at which ::cudaDeviceSynchronize will be called must be specified + * with the ::cudaLimitDevRuntimeSyncDepth limit to the ::cudaDeviceSetLimit + * api before the host-side launch of a kernel using the device runtime. + * Keep in mind that additional levels of sync depth require the runtime + * to reserve large amounts of device memory that cannot be used for + * user allocations. Note that ::cudaDeviceSynchronize made from device + * runtime is only supported on devices of compute capability < 9.0. + */ + cudaErrorSyncDepthExceeded = 68, + + /** + * This error indicates that a device runtime grid launch failed because + * the launch would exceed the limit ::cudaLimitDevRuntimePendingLaunchCount. + * For this launch to proceed successfully, ::cudaDeviceSetLimit must be + * called to set the ::cudaLimitDevRuntimePendingLaunchCount to be higher + * than the upper bound of outstanding launches that can be issued to the + * device runtime. Keep in mind that raising the limit of pending device + * runtime launches will require the runtime to reserve device memory that + * cannot be used for user allocations. + */ + cudaErrorLaunchPendingCountExceeded = 69, + + /** + * The requested device function does not exist or is not compiled for the + * proper device architecture. + */ + cudaErrorInvalidDeviceFunction = 98, + + /** + * This indicates that no CUDA-capable devices were detected by the installed + * CUDA driver. + */ + cudaErrorNoDevice = 100, + + /** + * This indicates that the device ordinal supplied by the user does not + * correspond to a valid CUDA device or that the action requested is + * invalid for the specified device. + */ + cudaErrorInvalidDevice = 101, + + /** + * This indicates that the device doesn't have a valid Grid License. + */ + cudaErrorDeviceNotLicensed = 102, + + /** + * By default, the CUDA runtime may perform a minimal set of self-tests, + * as well as CUDA driver tests, to establish the validity of both. + * Introduced in CUDA 11.2, this error return indicates that at least one + * of these tests has failed and the validity of either the runtime + * or the driver could not be established. + */ + cudaErrorSoftwareValidityNotEstablished = 103, + + /** + * This indicates an internal startup failure in the CUDA runtime. + */ + cudaErrorStartupFailure = 127, + + /** + * This indicates that the device kernel image is invalid. + */ + cudaErrorInvalidKernelImage = 200, + + /** + * This most frequently indicates that there is no context bound to the + * current thread. This can also be returned if the context passed to an + * API call is not a valid handle (such as a context that has had + * ::cuCtxDestroy() invoked on it). This can also be returned if a user + * mixes different API versions (i.e. 3010 context with 3020 API calls). + * See ::cuCtxGetApiVersion() for more details. + */ + cudaErrorDeviceUninitialized = 201, + + /** + * This indicates that the buffer object could not be mapped. + */ + cudaErrorMapBufferObjectFailed = 205, + + /** + * This indicates that the buffer object could not be unmapped. + */ + cudaErrorUnmapBufferObjectFailed = 206, + + /** + * This indicates that the specified array is currently mapped and thus + * cannot be destroyed. + */ + cudaErrorArrayIsMapped = 207, + + /** + * This indicates that the resource is already mapped. + */ + cudaErrorAlreadyMapped = 208, + + /** + * This indicates that there is no kernel image available that is suitable + * for the device. This can occur when a user specifies code generation + * options for a particular CUDA source file that do not include the + * corresponding device configuration. + */ + cudaErrorNoKernelImageForDevice = 209, + + /** + * This indicates that a resource has already been acquired. + */ + cudaErrorAlreadyAcquired = 210, + + /** + * This indicates that a resource is not mapped. + */ + cudaErrorNotMapped = 211, + + /** + * This indicates that a mapped resource is not available for access as an + * array. + */ + cudaErrorNotMappedAsArray = 212, + + /** + * This indicates that a mapped resource is not available for access as a + * pointer. + */ + cudaErrorNotMappedAsPointer = 213, + + /** + * This indicates that an uncorrectable ECC error was detected during + * execution. + */ + cudaErrorECCUncorrectable = 214, + + /** + * This indicates that the ::cudaLimit passed to the API call is not + * supported by the active device. + */ + cudaErrorUnsupportedLimit = 215, + + /** + * This indicates that a call tried to access an exclusive-thread device that + * is already in use by a different thread. + */ + cudaErrorDeviceAlreadyInUse = 216, + + /** + * This error indicates that P2P access is not supported across the given + * devices. + */ + cudaErrorPeerAccessUnsupported = 217, + + /** + * A PTX compilation failed. The runtime may fall back to compiling PTX if + * an application does not contain a suitable binary for the current device. + */ + cudaErrorInvalidPtx = 218, + + /** + * This indicates an error with the OpenGL or DirectX context. + */ + cudaErrorInvalidGraphicsContext = 219, + + /** + * This indicates that an uncorrectable NVLink error was detected during the + * execution. + */ + cudaErrorNvlinkUncorrectable = 220, + + /** + * This indicates that the PTX JIT compiler library was not found. The JIT Compiler + * library is used for PTX compilation. The runtime may fall back to compiling PTX + * if an application does not contain a suitable binary for the current device. + */ + cudaErrorJitCompilerNotFound = 221, + + /** + * This indicates that the provided PTX was compiled with an unsupported toolchain. + * The most common reason for this, is the PTX was generated by a compiler newer + * than what is supported by the CUDA driver and PTX JIT compiler. + */ + cudaErrorUnsupportedPtxVersion = 222, + + /** + * This indicates that the JIT compilation was disabled. The JIT compilation compiles + * PTX. The runtime may fall back to compiling PTX if an application does not contain + * a suitable binary for the current device. + */ + cudaErrorJitCompilationDisabled = 223, + + /** + * This indicates that the provided execution affinity is not supported by the device. + */ + cudaErrorUnsupportedExecAffinity = 224, + + /** + * This indicates that the code to be compiled by the PTX JIT contains + * unsupported call to cudaDeviceSynchronize. + */ + cudaErrorUnsupportedDevSideSync = 225, + + /** + * This indicates that the device kernel source is invalid. + */ + cudaErrorInvalidSource = 300, + + /** + * This indicates that the file specified was not found. + */ + cudaErrorFileNotFound = 301, + + /** + * This indicates that a link to a shared object failed to resolve. + */ + cudaErrorSharedObjectSymbolNotFound = 302, + + /** + * This indicates that initialization of a shared object failed. + */ + cudaErrorSharedObjectInitFailed = 303, + + /** + * This error indicates that an OS call failed. + */ + cudaErrorOperatingSystem = 304, + + /** + * This indicates that a resource handle passed to the API call was not + * valid. Resource handles are opaque types like ::cudaStream_t and + * ::cudaEvent_t. + */ + cudaErrorInvalidResourceHandle = 400, + + /** + * This indicates that a resource required by the API call is not in a + * valid state to perform the requested operation. + */ + cudaErrorIllegalState = 401, + + /** + * This indicates that a named symbol was not found. Examples of symbols + * are global/constant variable names, driver function names, texture names, + * and surface names. + */ + cudaErrorSymbolNotFound = 500, + + /** + * This indicates that asynchronous operations issued previously have not + * completed yet. This result is not actually an error, but must be indicated + * differently than ::cudaSuccess (which indicates completion). Calls that + * may return this value include ::cudaEventQuery() and ::cudaStreamQuery(). + */ + cudaErrorNotReady = 600, + + /** + * The device encountered a load or store instruction on an invalid memory address. + * This leaves the process in an inconsistent state and any further CUDA work + * will return the same error. To continue using CUDA, the process must be terminated + * and relaunched. + */ + cudaErrorIllegalAddress = 700, + + /** + * This indicates that a launch did not occur because it did not have + * appropriate resources. Although this error is similar to + * ::cudaErrorInvalidConfiguration, this error usually indicates that the + * user has attempted to pass too many arguments to the device kernel, or the + * kernel launch specifies too many threads for the kernel's register count. + */ + cudaErrorLaunchOutOfResources = 701, + + /** + * This indicates that the device kernel took too long to execute. This can + * only occur if timeouts are enabled - see the device property + * \ref ::cudaDeviceProp::kernelExecTimeoutEnabled "kernelExecTimeoutEnabled" + * for more information. + * This leaves the process in an inconsistent state and any further CUDA work + * will return the same error. To continue using CUDA, the process must be terminated + * and relaunched. + */ + cudaErrorLaunchTimeout = 702, + + /** + * This error indicates a kernel launch that uses an incompatible texturing + * mode. + */ + cudaErrorLaunchIncompatibleTexturing = 703, + + /** + * This error indicates that a call to ::cudaDeviceEnablePeerAccess() is + * trying to re-enable peer addressing on from a context which has already + * had peer addressing enabled. + */ + cudaErrorPeerAccessAlreadyEnabled = 704, + + /** + * This error indicates that ::cudaDeviceDisablePeerAccess() is trying to + * disable peer addressing which has not been enabled yet via + * ::cudaDeviceEnablePeerAccess(). + */ + cudaErrorPeerAccessNotEnabled = 705, + + /** + * This indicates that the user has called ::cudaSetValidDevices(), + * ::cudaSetDeviceFlags(), ::cudaD3D9SetDirect3DDevice(), + * ::cudaD3D10SetDirect3DDevice, ::cudaD3D11SetDirect3DDevice(), or + * ::cudaVDPAUSetVDPAUDevice() after initializing the CUDA runtime by + * calling non-device management operations (allocating memory and + * launching kernels are examples of non-device management operations). + * This error can also be returned if using runtime/driver + * interoperability and there is an existing ::CUcontext active on the + * host thread. + */ + cudaErrorSetOnActiveProcess = 708, + + /** + * This error indicates that the context current to the calling thread + * has been destroyed using ::cuCtxDestroy, or is a primary context which + * has not yet been initialized. + */ + cudaErrorContextIsDestroyed = 709, + + /** + * An assert triggered in device code during kernel execution. The device + * cannot be used again. All existing allocations are invalid. To continue + * using CUDA, the process must be terminated and relaunched. + */ + cudaErrorAssert = 710, + + /** + * This error indicates that the hardware resources required to enable + * peer access have been exhausted for one or more of the devices + * passed to ::cudaEnablePeerAccess(). + */ + cudaErrorTooManyPeers = 711, + + /** + * This error indicates that the memory range passed to ::cudaHostRegister() + * has already been registered. + */ + cudaErrorHostMemoryAlreadyRegistered = 712, + + /** + * This error indicates that the pointer passed to ::cudaHostUnregister() + * does not correspond to any currently registered memory region. + */ + cudaErrorHostMemoryNotRegistered = 713, + + /** + * Device encountered an error in the call stack during kernel execution, + * possibly due to stack corruption or exceeding the stack size limit. + * This leaves the process in an inconsistent state and any further CUDA work + * will return the same error. To continue using CUDA, the process must be terminated + * and relaunched. + */ + cudaErrorHardwareStackError = 714, + + /** + * The device encountered an illegal instruction during kernel execution + * This leaves the process in an inconsistent state and any further CUDA work + * will return the same error. To continue using CUDA, the process must be terminated + * and relaunched. + */ + cudaErrorIllegalInstruction = 715, + + /** + * The device encountered a load or store instruction + * on a memory address which is not aligned. + * This leaves the process in an inconsistent state and any further CUDA work + * will return the same error. To continue using CUDA, the process must be terminated + * and relaunched. + */ + cudaErrorMisalignedAddress = 716, + + /** + * While executing a kernel, the device encountered an instruction + * which can only operate on memory locations in certain address spaces + * (global, shared, or local), but was supplied a memory address not + * belonging to an allowed address space. + * This leaves the process in an inconsistent state and any further CUDA work + * will return the same error. To continue using CUDA, the process must be terminated + * and relaunched. + */ + cudaErrorInvalidAddressSpace = 717, + + /** + * The device encountered an invalid program counter. + * This leaves the process in an inconsistent state and any further CUDA work + * will return the same error. To continue using CUDA, the process must be terminated + * and relaunched. + */ + cudaErrorInvalidPc = 718, + + /** + * An exception occurred on the device while executing a kernel. Common + * causes include dereferencing an invalid device pointer and accessing + * out of bounds shared memory. Less common cases can be system specific - more + * information about these cases can be found in the system specific user guide. + * This leaves the process in an inconsistent state and any further CUDA work + * will return the same error. To continue using CUDA, the process must be terminated + * and relaunched. + */ + cudaErrorLaunchFailure = 719, + + /** + * This error indicates that the number of blocks launched per grid for a kernel that was + * launched via either ::cudaLaunchCooperativeKernel or ::cudaLaunchCooperativeKernelMultiDevice + * exceeds the maximum number of blocks as allowed by ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + * or ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags times the number of multiprocessors + * as specified by the device attribute ::cudaDevAttrMultiProcessorCount. + */ + cudaErrorCooperativeLaunchTooLarge = 720, + + /** + * This error indicates the attempted operation is not permitted. + */ + cudaErrorNotPermitted = 800, + + /** + * This error indicates the attempted operation is not supported + * on the current system or device. + */ + cudaErrorNotSupported = 801, + + /** + * This error indicates that the system is not yet ready to start any CUDA + * work. To continue using CUDA, verify the system configuration is in a + * valid state and all required driver daemons are actively running. + * More information about this error can be found in the system specific + * user guide. + */ + cudaErrorSystemNotReady = 802, + + /** + * This error indicates that there is a mismatch between the versions of + * the display driver and the CUDA driver. Refer to the compatibility documentation + * for supported versions. + */ + cudaErrorSystemDriverMismatch = 803, + + /** + * This error indicates that the system was upgraded to run with forward compatibility + * but the visible hardware detected by CUDA does not support this configuration. + * Refer to the compatibility documentation for the supported hardware matrix or ensure + * that only supported hardware is visible during initialization via the CUDA_VISIBLE_DEVICES + * environment variable. + */ + cudaErrorCompatNotSupportedOnDevice = 804, + + /** + * This error indicates that the MPS client failed to connect to the MPS control daemon or the MPS server. + */ + cudaErrorMpsConnectionFailed = 805, + + /** + * This error indicates that the remote procedural call between the MPS server and the MPS client failed. + */ + cudaErrorMpsRpcFailure = 806, + + /** + * This error indicates that the MPS server is not ready to accept new MPS client requests. + * This error can be returned when the MPS server is in the process of recovering from a fatal failure. + */ + cudaErrorMpsServerNotReady = 807, + + /** + * This error indicates that the hardware resources required to create MPS client have been exhausted. + */ + cudaErrorMpsMaxClientsReached = 808, + + /** + * This error indicates the the hardware resources required to device connections have been exhausted. + */ + cudaErrorMpsMaxConnectionsReached = 809, + + /** + * This error indicates that the MPS client has been terminated by the server. To continue using CUDA, the process must be terminated and relaunched. + */ + cudaErrorMpsClientTerminated = 810, + + /** + * This error indicates, that the program is using CUDA Dynamic Parallelism, but the current configuration, like MPS, does not support it. + */ + cudaErrorCdpNotSupported = 811, + + /** + * This error indicates, that the program contains an unsupported interaction between different versions of CUDA Dynamic Parallelism. + */ + cudaErrorCdpVersionMismatch = 812, + + /** + * The operation is not permitted when the stream is capturing. + */ + cudaErrorStreamCaptureUnsupported = 900, + + /** + * The current capture sequence on the stream has been invalidated due to + * a previous error. + */ + cudaErrorStreamCaptureInvalidated = 901, + + /** + * The operation would have resulted in a merge of two independent capture + * sequences. + */ + cudaErrorStreamCaptureMerge = 902, + + /** + * The capture was not initiated in this stream. + */ + cudaErrorStreamCaptureUnmatched = 903, + + /** + * The capture sequence contains a fork that was not joined to the primary + * stream. + */ + cudaErrorStreamCaptureUnjoined = 904, + + /** + * A dependency would have been created which crosses the capture sequence + * boundary. Only implicit in-stream ordering dependencies are allowed to + * cross the boundary. + */ + cudaErrorStreamCaptureIsolation = 905, + + /** + * The operation would have resulted in a disallowed implicit dependency on + * a current capture sequence from cudaStreamLegacy. + */ + cudaErrorStreamCaptureImplicit = 906, + + /** + * The operation is not permitted on an event which was last recorded in a + * capturing stream. + */ + cudaErrorCapturedEvent = 907, + + /** + * A stream capture sequence not initiated with the ::cudaStreamCaptureModeRelaxed + * argument to ::cudaStreamBeginCapture was passed to ::cudaStreamEndCapture in a + * different thread. + */ + cudaErrorStreamCaptureWrongThread = 908, + + /** + * This indicates that the wait operation has timed out. + */ + cudaErrorTimeout = 909, + + /** + * This error indicates that the graph update was not performed because it included + * changes which violated constraints specific to instantiated graph update. + */ + cudaErrorGraphExecUpdateFailure = 910, + + /** + * This indicates that an async error has occurred in a device outside of CUDA. + * If CUDA was waiting for an external device's signal before consuming shared data, + * the external device signaled an error indicating that the data is not valid for + * consumption. This leaves the process in an inconsistent state and any further CUDA + * work will return the same error. To continue using CUDA, the process must be + * terminated and relaunched. + */ + cudaErrorExternalDevice = 911, + + /** + * This indicates that a kernel launch error has occurred due to cluster + * misconfiguration. + */ + cudaErrorInvalidClusterSize = 912, + + /** + * This indicates that an unknown internal error has occurred. + */ + cudaErrorUnknown = 999, + + /** + * Any unhandled CUDA driver error is added to this value and returned via + * the runtime. Production releases of CUDA should not return such errors. + * \deprecated + * This error return is deprecated as of CUDA 4.1. + */ + cudaErrorApiFailureBase = 10000 +}; + +/** + * Channel format kind + */ +enum __device_builtin__ cudaChannelFormatKind +{ + cudaChannelFormatKindSigned = 0, /**< Signed channel format */ + cudaChannelFormatKindUnsigned = 1, /**< Unsigned channel format */ + cudaChannelFormatKindFloat = 2, /**< Float channel format */ + cudaChannelFormatKindNone = 3, /**< No channel format */ + cudaChannelFormatKindNV12 = 4, /**< Unsigned 8-bit integers, planar 4:2:0 YUV format */ + cudaChannelFormatKindUnsignedNormalized8X1 = 5, /**< 1 channel unsigned 8-bit normalized integer */ + cudaChannelFormatKindUnsignedNormalized8X2 = 6, /**< 2 channel unsigned 8-bit normalized integer */ + cudaChannelFormatKindUnsignedNormalized8X4 = 7, /**< 4 channel unsigned 8-bit normalized integer */ + cudaChannelFormatKindUnsignedNormalized16X1 = 8, /**< 1 channel unsigned 16-bit normalized integer */ + cudaChannelFormatKindUnsignedNormalized16X2 = 9, /**< 2 channel unsigned 16-bit normalized integer */ + cudaChannelFormatKindUnsignedNormalized16X4 = 10, /**< 4 channel unsigned 16-bit normalized integer */ + cudaChannelFormatKindSignedNormalized8X1 = 11, /**< 1 channel signed 8-bit normalized integer */ + cudaChannelFormatKindSignedNormalized8X2 = 12, /**< 2 channel signed 8-bit normalized integer */ + cudaChannelFormatKindSignedNormalized8X4 = 13, /**< 4 channel signed 8-bit normalized integer */ + cudaChannelFormatKindSignedNormalized16X1 = 14, /**< 1 channel signed 16-bit normalized integer */ + cudaChannelFormatKindSignedNormalized16X2 = 15, /**< 2 channel signed 16-bit normalized integer */ + cudaChannelFormatKindSignedNormalized16X4 = 16, /**< 4 channel signed 16-bit normalized integer */ + cudaChannelFormatKindUnsignedBlockCompressed1 = 17, /**< 4 channel unsigned normalized block-compressed (BC1 compression) format */ + cudaChannelFormatKindUnsignedBlockCompressed1SRGB = 18, /**< 4 channel unsigned normalized block-compressed (BC1 compression) format with sRGB encoding*/ + cudaChannelFormatKindUnsignedBlockCompressed2 = 19, /**< 4 channel unsigned normalized block-compressed (BC2 compression) format */ + cudaChannelFormatKindUnsignedBlockCompressed2SRGB = 20, /**< 4 channel unsigned normalized block-compressed (BC2 compression) format with sRGB encoding */ + cudaChannelFormatKindUnsignedBlockCompressed3 = 21, /**< 4 channel unsigned normalized block-compressed (BC3 compression) format */ + cudaChannelFormatKindUnsignedBlockCompressed3SRGB = 22, /**< 4 channel unsigned normalized block-compressed (BC3 compression) format with sRGB encoding */ + cudaChannelFormatKindUnsignedBlockCompressed4 = 23, /**< 1 channel unsigned normalized block-compressed (BC4 compression) format */ + cudaChannelFormatKindSignedBlockCompressed4 = 24, /**< 1 channel signed normalized block-compressed (BC4 compression) format */ + cudaChannelFormatKindUnsignedBlockCompressed5 = 25, /**< 2 channel unsigned normalized block-compressed (BC5 compression) format */ + cudaChannelFormatKindSignedBlockCompressed5 = 26, /**< 2 channel signed normalized block-compressed (BC5 compression) format */ + cudaChannelFormatKindUnsignedBlockCompressed6H = 27, /**< 3 channel unsigned half-float block-compressed (BC6H compression) format */ + cudaChannelFormatKindSignedBlockCompressed6H = 28, /**< 3 channel signed half-float block-compressed (BC6H compression) format */ + cudaChannelFormatKindUnsignedBlockCompressed7 = 29, /**< 4 channel unsigned normalized block-compressed (BC7 compression) format */ + cudaChannelFormatKindUnsignedBlockCompressed7SRGB = 30 /**< 4 channel unsigned normalized block-compressed (BC7 compression) format with sRGB encoding */ +}; + +/** + * CUDA Channel format descriptor + */ +struct __device_builtin__ cudaChannelFormatDesc +{ + int x; /**< x */ + int y; /**< y */ + int z; /**< z */ + int w; /**< w */ + enum cudaChannelFormatKind f; /**< Channel format kind */ +}; + +/** + * CUDA array + */ +typedef struct cudaArray *cudaArray_t; + +/** + * CUDA array (as source copy argument) + */ +typedef const struct cudaArray *cudaArray_const_t; + +struct cudaArray; + +/** + * CUDA mipmapped array + */ +typedef struct cudaMipmappedArray *cudaMipmappedArray_t; + +/** + * CUDA mipmapped array (as source argument) + */ +typedef const struct cudaMipmappedArray *cudaMipmappedArray_const_t; + +struct cudaMipmappedArray; + +/** + * Indicates that the layered sparse CUDA array or CUDA mipmapped array has a single mip tail region for all layers + */ +#define cudaArraySparsePropertiesSingleMipTail 0x1 + +/** + * Sparse CUDA array and CUDA mipmapped array properties + */ +struct __device_builtin__ cudaArraySparseProperties { + struct { + unsigned int width; /**< Tile width in elements */ + unsigned int height; /**< Tile height in elements */ + unsigned int depth; /**< Tile depth in elements */ + } tileExtent; + unsigned int miptailFirstLevel; /**< First mip level at which the mip tail begins */ + unsigned long long miptailSize; /**< Total size of the mip tail. */ + unsigned int flags; /**< Flags will either be zero or ::cudaArraySparsePropertiesSingleMipTail */ + unsigned int reserved[4]; +}; + +/** + * CUDA array and CUDA mipmapped array memory requirements + */ +struct __device_builtin__ cudaArrayMemoryRequirements { + size_t size; /**< Total size of the array. */ + size_t alignment; /**< Alignment necessary for mapping the array. */ + unsigned int reserved[4]; +}; + +/** + * CUDA memory types + */ +enum __device_builtin__ cudaMemoryType +{ + cudaMemoryTypeUnregistered = 0, /**< Unregistered memory */ + cudaMemoryTypeHost = 1, /**< Host memory */ + cudaMemoryTypeDevice = 2, /**< Device memory */ + cudaMemoryTypeManaged = 3 /**< Managed memory */ +}; + +/** + * CUDA memory copy types + */ +enum __device_builtin__ cudaMemcpyKind +{ + cudaMemcpyHostToHost = 0, /**< Host -> Host */ + cudaMemcpyHostToDevice = 1, /**< Host -> Device */ + cudaMemcpyDeviceToHost = 2, /**< Device -> Host */ + cudaMemcpyDeviceToDevice = 3, /**< Device -> Device */ + cudaMemcpyDefault = 4 /**< Direction of the transfer is inferred from the pointer values. Requires unified virtual addressing */ +}; + +/** + * CUDA Pitched memory pointer + * + * \sa ::make_cudaPitchedPtr + */ +struct __device_builtin__ cudaPitchedPtr +{ + void *ptr; /**< Pointer to allocated memory */ + size_t pitch; /**< Pitch of allocated memory in bytes */ + size_t xsize; /**< Logical width of allocation in elements */ + size_t ysize; /**< Logical height of allocation in elements */ +}; + +/** + * CUDA extent + * + * \sa ::make_cudaExtent + */ +struct __device_builtin__ cudaExtent +{ + size_t width; /**< Width in elements when referring to array memory, in bytes when referring to linear memory */ + size_t height; /**< Height in elements */ + size_t depth; /**< Depth in elements */ +}; + +/** + * CUDA 3D position + * + * \sa ::make_cudaPos + */ +struct __device_builtin__ cudaPos +{ + size_t x; /**< x */ + size_t y; /**< y */ + size_t z; /**< z */ +}; + +/** + * CUDA 3D memory copying parameters + */ +struct __device_builtin__ cudaMemcpy3DParms +{ + cudaArray_t srcArray; /**< Source memory address */ + struct cudaPos srcPos; /**< Source position offset */ + struct cudaPitchedPtr srcPtr; /**< Pitched source memory address */ + + cudaArray_t dstArray; /**< Destination memory address */ + struct cudaPos dstPos; /**< Destination position offset */ + struct cudaPitchedPtr dstPtr; /**< Pitched destination memory address */ + + struct cudaExtent extent; /**< Requested memory copy size */ + enum cudaMemcpyKind kind; /**< Type of transfer */ +}; + +/** + * CUDA 3D cross-device memory copying parameters + */ +struct __device_builtin__ cudaMemcpy3DPeerParms +{ + cudaArray_t srcArray; /**< Source memory address */ + struct cudaPos srcPos; /**< Source position offset */ + struct cudaPitchedPtr srcPtr; /**< Pitched source memory address */ + int srcDevice; /**< Source device */ + + cudaArray_t dstArray; /**< Destination memory address */ + struct cudaPos dstPos; /**< Destination position offset */ + struct cudaPitchedPtr dstPtr; /**< Pitched destination memory address */ + int dstDevice; /**< Destination device */ + + struct cudaExtent extent; /**< Requested memory copy size */ +}; + +/** + * CUDA Memset node parameters + */ +struct __device_builtin__ cudaMemsetParams { + void *dst; /**< Destination device pointer */ + size_t pitch; /**< Pitch of destination device pointer. Unused if height is 1 */ + unsigned int value; /**< Value to be set */ + unsigned int elementSize; /**< Size of each element in bytes. Must be 1, 2, or 4. */ + size_t width; /**< Width of the row in elements */ + size_t height; /**< Number of rows */ +}; + +/** + * Specifies performance hint with ::cudaAccessPolicyWindow for hitProp and missProp members. + */ +enum __device_builtin__ cudaAccessProperty { + cudaAccessPropertyNormal = 0, /**< Normal cache persistence. */ + cudaAccessPropertyStreaming = 1, /**< Streaming access is less likely to persit from cache. */ + cudaAccessPropertyPersisting = 2 /**< Persisting access is more likely to persist in cache.*/ +}; + +/** + * Specifies an access policy for a window, a contiguous extent of memory + * beginning at base_ptr and ending at base_ptr + num_bytes. + * Partition into many segments and assign segments such that. + * sum of "hit segments" / window == approx. ratio. + * sum of "miss segments" / window == approx 1-ratio. + * Segments and ratio specifications are fitted to the capabilities of + * the architecture. + * Accesses in a hit segment apply the hitProp access policy. + * Accesses in a miss segment apply the missProp access policy. + */ +struct __device_builtin__ cudaAccessPolicyWindow { + void *base_ptr; /**< Starting address of the access policy window. CUDA driver may align it. */ + size_t num_bytes; /**< Size in bytes of the window policy. CUDA driver may restrict the maximum size and alignment. */ + float hitRatio; /**< hitRatio specifies percentage of lines assigned hitProp, rest are assigned missProp. */ + enum cudaAccessProperty hitProp; /**< ::CUaccessProperty set for hit. */ + enum cudaAccessProperty missProp; /**< ::CUaccessProperty set for miss. Must be either NORMAL or STREAMING. */ +}; + +#ifdef _WIN32 +#define CUDART_CB __stdcall +#else +#define CUDART_CB +#endif + +/** + * CUDA host function + * \param userData Argument value passed to the function + */ +typedef void (CUDART_CB *cudaHostFn_t)(void *userData); + +/** + * CUDA host node parameters + */ +struct __device_builtin__ cudaHostNodeParams { + cudaHostFn_t fn; /**< The function to call when the node executes */ + void* userData; /**< Argument to pass to the function */ +}; + +/** + * Possible stream capture statuses returned by ::cudaStreamIsCapturing + */ +enum __device_builtin__ cudaStreamCaptureStatus { + cudaStreamCaptureStatusNone = 0, /**< Stream is not capturing */ + cudaStreamCaptureStatusActive = 1, /**< Stream is actively capturing */ + cudaStreamCaptureStatusInvalidated = 2 /**< Stream is part of a capture sequence that + has been invalidated, but not terminated */ +}; + +/** + * Possible modes for stream capture thread interactions. For more details see + * ::cudaStreamBeginCapture and ::cudaThreadExchangeStreamCaptureMode + */ +enum __device_builtin__ cudaStreamCaptureMode { + cudaStreamCaptureModeGlobal = 0, + cudaStreamCaptureModeThreadLocal = 1, + cudaStreamCaptureModeRelaxed = 2 +}; + +enum __device_builtin__ cudaSynchronizationPolicy { + cudaSyncPolicyAuto = 1, + cudaSyncPolicySpin = 2, + cudaSyncPolicyYield = 3, + cudaSyncPolicyBlockingSync = 4 +}; + +/** + * Cluster scheduling policies. These may be passed to ::cudaFuncSetAttribute + */ +enum __device_builtin__ cudaClusterSchedulingPolicy { + cudaClusterSchedulingPolicyDefault = 0, /**< the default policy */ + cudaClusterSchedulingPolicySpread = 1, /**< spread the blocks within a cluster to the SMs */ + cudaClusterSchedulingPolicyLoadBalancing = 2 /**< allow the hardware to load-balance the blocks in a cluster to the SMs */ +}; + +/** + * Flags for ::cudaStreamUpdateCaptureDependencies + */ +enum __device_builtin__ cudaStreamUpdateCaptureDependenciesFlags { + cudaStreamAddCaptureDependencies = 0x0, /**< Add new nodes to the dependency set */ + cudaStreamSetCaptureDependencies = 0x1 /**< Replace the dependency set with the new nodes */ +}; + +/** + * Flags for user objects for graphs + */ +enum __device_builtin__ cudaUserObjectFlags { + cudaUserObjectNoDestructorSync = 0x1 /**< Indicates the destructor execution is not synchronized by any CUDA handle. */ +}; + +/** + * Flags for retaining user object references for graphs + */ +enum __device_builtin__ cudaUserObjectRetainFlags { + cudaGraphUserObjectMove = 0x1 /**< Transfer references from the caller rather than creating new references. */ +}; + +/** + * CUDA graphics interop resource + */ +struct cudaGraphicsResource; + +/** + * CUDA graphics interop register flags + */ +enum __device_builtin__ cudaGraphicsRegisterFlags +{ + cudaGraphicsRegisterFlagsNone = 0, /**< Default */ + cudaGraphicsRegisterFlagsReadOnly = 1, /**< CUDA will not write to this resource */ + cudaGraphicsRegisterFlagsWriteDiscard = 2, /**< CUDA will only write to and will not read from this resource */ + cudaGraphicsRegisterFlagsSurfaceLoadStore = 4, /**< CUDA will bind this resource to a surface reference */ + cudaGraphicsRegisterFlagsTextureGather = 8 /**< CUDA will perform texture gather operations on this resource */ +}; + +/** + * CUDA graphics interop map flags + */ +enum __device_builtin__ cudaGraphicsMapFlags +{ + cudaGraphicsMapFlagsNone = 0, /**< Default; Assume resource can be read/written */ + cudaGraphicsMapFlagsReadOnly = 1, /**< CUDA will not write to this resource */ + cudaGraphicsMapFlagsWriteDiscard = 2 /**< CUDA will only write to and will not read from this resource */ +}; + +/** + * CUDA graphics interop array indices for cube maps + */ +enum __device_builtin__ cudaGraphicsCubeFace +{ + cudaGraphicsCubeFacePositiveX = 0x00, /**< Positive X face of cubemap */ + cudaGraphicsCubeFaceNegativeX = 0x01, /**< Negative X face of cubemap */ + cudaGraphicsCubeFacePositiveY = 0x02, /**< Positive Y face of cubemap */ + cudaGraphicsCubeFaceNegativeY = 0x03, /**< Negative Y face of cubemap */ + cudaGraphicsCubeFacePositiveZ = 0x04, /**< Positive Z face of cubemap */ + cudaGraphicsCubeFaceNegativeZ = 0x05 /**< Negative Z face of cubemap */ +}; + +/** + * CUDA resource types + */ +enum __device_builtin__ cudaResourceType +{ + cudaResourceTypeArray = 0x00, /**< Array resource */ + cudaResourceTypeMipmappedArray = 0x01, /**< Mipmapped array resource */ + cudaResourceTypeLinear = 0x02, /**< Linear resource */ + cudaResourceTypePitch2D = 0x03 /**< Pitch 2D resource */ +}; + +/** + * CUDA texture resource view formats + */ +enum __device_builtin__ cudaResourceViewFormat +{ + cudaResViewFormatNone = 0x00, /**< No resource view format (use underlying resource format) */ + cudaResViewFormatUnsignedChar1 = 0x01, /**< 1 channel unsigned 8-bit integers */ + cudaResViewFormatUnsignedChar2 = 0x02, /**< 2 channel unsigned 8-bit integers */ + cudaResViewFormatUnsignedChar4 = 0x03, /**< 4 channel unsigned 8-bit integers */ + cudaResViewFormatSignedChar1 = 0x04, /**< 1 channel signed 8-bit integers */ + cudaResViewFormatSignedChar2 = 0x05, /**< 2 channel signed 8-bit integers */ + cudaResViewFormatSignedChar4 = 0x06, /**< 4 channel signed 8-bit integers */ + cudaResViewFormatUnsignedShort1 = 0x07, /**< 1 channel unsigned 16-bit integers */ + cudaResViewFormatUnsignedShort2 = 0x08, /**< 2 channel unsigned 16-bit integers */ + cudaResViewFormatUnsignedShort4 = 0x09, /**< 4 channel unsigned 16-bit integers */ + cudaResViewFormatSignedShort1 = 0x0a, /**< 1 channel signed 16-bit integers */ + cudaResViewFormatSignedShort2 = 0x0b, /**< 2 channel signed 16-bit integers */ + cudaResViewFormatSignedShort4 = 0x0c, /**< 4 channel signed 16-bit integers */ + cudaResViewFormatUnsignedInt1 = 0x0d, /**< 1 channel unsigned 32-bit integers */ + cudaResViewFormatUnsignedInt2 = 0x0e, /**< 2 channel unsigned 32-bit integers */ + cudaResViewFormatUnsignedInt4 = 0x0f, /**< 4 channel unsigned 32-bit integers */ + cudaResViewFormatSignedInt1 = 0x10, /**< 1 channel signed 32-bit integers */ + cudaResViewFormatSignedInt2 = 0x11, /**< 2 channel signed 32-bit integers */ + cudaResViewFormatSignedInt4 = 0x12, /**< 4 channel signed 32-bit integers */ + cudaResViewFormatHalf1 = 0x13, /**< 1 channel 16-bit floating point */ + cudaResViewFormatHalf2 = 0x14, /**< 2 channel 16-bit floating point */ + cudaResViewFormatHalf4 = 0x15, /**< 4 channel 16-bit floating point */ + cudaResViewFormatFloat1 = 0x16, /**< 1 channel 32-bit floating point */ + cudaResViewFormatFloat2 = 0x17, /**< 2 channel 32-bit floating point */ + cudaResViewFormatFloat4 = 0x18, /**< 4 channel 32-bit floating point */ + cudaResViewFormatUnsignedBlockCompressed1 = 0x19, /**< Block compressed 1 */ + cudaResViewFormatUnsignedBlockCompressed2 = 0x1a, /**< Block compressed 2 */ + cudaResViewFormatUnsignedBlockCompressed3 = 0x1b, /**< Block compressed 3 */ + cudaResViewFormatUnsignedBlockCompressed4 = 0x1c, /**< Block compressed 4 unsigned */ + cudaResViewFormatSignedBlockCompressed4 = 0x1d, /**< Block compressed 4 signed */ + cudaResViewFormatUnsignedBlockCompressed5 = 0x1e, /**< Block compressed 5 unsigned */ + cudaResViewFormatSignedBlockCompressed5 = 0x1f, /**< Block compressed 5 signed */ + cudaResViewFormatUnsignedBlockCompressed6H = 0x20, /**< Block compressed 6 unsigned half-float */ + cudaResViewFormatSignedBlockCompressed6H = 0x21, /**< Block compressed 6 signed half-float */ + cudaResViewFormatUnsignedBlockCompressed7 = 0x22 /**< Block compressed 7 */ +}; + +/** + * CUDA resource descriptor + */ +struct __device_builtin__ cudaResourceDesc { + enum cudaResourceType resType; /**< Resource type */ + + union { + struct { + cudaArray_t array; /**< CUDA array */ + } array; + struct { + cudaMipmappedArray_t mipmap; /**< CUDA mipmapped array */ + } mipmap; + struct { + void *devPtr; /**< Device pointer */ + struct cudaChannelFormatDesc desc; /**< Channel descriptor */ + size_t sizeInBytes; /**< Size in bytes */ + } linear; + struct { + void *devPtr; /**< Device pointer */ + struct cudaChannelFormatDesc desc; /**< Channel descriptor */ + size_t width; /**< Width of the array in elements */ + size_t height; /**< Height of the array in elements */ + size_t pitchInBytes; /**< Pitch between two rows in bytes */ + } pitch2D; + } res; +}; + +/** + * CUDA resource view descriptor + */ +struct __device_builtin__ cudaResourceViewDesc +{ + enum cudaResourceViewFormat format; /**< Resource view format */ + size_t width; /**< Width of the resource view */ + size_t height; /**< Height of the resource view */ + size_t depth; /**< Depth of the resource view */ + unsigned int firstMipmapLevel; /**< First defined mipmap level */ + unsigned int lastMipmapLevel; /**< Last defined mipmap level */ + unsigned int firstLayer; /**< First layer index */ + unsigned int lastLayer; /**< Last layer index */ +}; + +/** + * CUDA pointer attributes + */ +struct __device_builtin__ cudaPointerAttributes +{ + /** + * The type of memory - ::cudaMemoryTypeUnregistered, ::cudaMemoryTypeHost, + * ::cudaMemoryTypeDevice or ::cudaMemoryTypeManaged. + */ + enum cudaMemoryType type; + + /** + * The device against which the memory was allocated or registered. + * If the memory type is ::cudaMemoryTypeDevice then this identifies + * the device on which the memory referred physically resides. If + * the memory type is ::cudaMemoryTypeHost or::cudaMemoryTypeManaged then + * this identifies the device which was current when the memory was allocated + * or registered (and if that device is deinitialized then this allocation + * will vanish with that device's state). + */ + int device; + + /** + * The address which may be dereferenced on the current device to access + * the memory or NULL if no such address exists. + */ + void *devicePointer; + + /** + * The address which may be dereferenced on the host to access the + * memory or NULL if no such address exists. + * + * \note CUDA doesn't check if unregistered memory is allocated so this field + * may contain invalid pointer if an invalid pointer has been passed to CUDA. + */ + void *hostPointer; +}; + +/** + * CUDA function attributes + */ +struct __device_builtin__ cudaFuncAttributes +{ + /** + * The size in bytes of statically-allocated shared memory per block + * required by this function. This does not include dynamically-allocated + * shared memory requested by the user at runtime. + */ + size_t sharedSizeBytes; + + /** + * The size in bytes of user-allocated constant memory required by this + * function. + */ + size_t constSizeBytes; + + /** + * The size in bytes of local memory used by each thread of this function. + */ + size_t localSizeBytes; + + /** + * The maximum number of threads per block, beyond which a launch of the + * function would fail. This number depends on both the function and the + * device on which the function is currently loaded. + */ + int maxThreadsPerBlock; + + /** + * The number of registers used by each thread of this function. + */ + int numRegs; + + /** + * The PTX virtual architecture version for which the function was + * compiled. This value is the major PTX version * 10 + the minor PTX + * version, so a PTX version 1.3 function would return the value 13. + */ + int ptxVersion; + + /** + * The binary architecture version for which the function was compiled. + * This value is the major binary version * 10 + the minor binary version, + * so a binary version 1.3 function would return the value 13. + */ + int binaryVersion; + + /** + * The attribute to indicate whether the function has been compiled with + * user specified option "-Xptxas --dlcm=ca" set. + */ + int cacheModeCA; + + /** + * The maximum size in bytes of dynamic shared memory per block for + * this function. Any launch must have a dynamic shared memory size + * smaller than this value. + */ + int maxDynamicSharedSizeBytes; + + /** + * On devices where the L1 cache and shared memory use the same hardware resources, + * this sets the shared memory carveout preference, in percent of the maximum shared memory. + * Refer to ::cudaDevAttrMaxSharedMemoryPerMultiprocessor. + * This is only a hint, and the driver can choose a different ratio if required to execute the function. + * See ::cudaFuncSetAttribute + */ + int preferredShmemCarveout; + + /** + * If this attribute is set, the kernel must launch with a valid cluster dimension + * specified. + */ + int clusterDimMustBeSet; + + /** + * The required cluster width/height/depth in blocks. The values must either + * all be 0 or all be positive. The validity of the cluster dimensions is + * otherwise checked at launch time. + * + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime should return cudaErrorNotPermitted. + * See ::cudaFuncSetAttribute + */ + int requiredClusterWidth; + int requiredClusterHeight; + int requiredClusterDepth; + + /** + * The block scheduling policy of a function. + * See ::cudaFuncSetAttribute + */ + int clusterSchedulingPolicyPreference; + + /** + * Whether the function can be launched with non-portable cluster size. 1 is + * allowed, 0 is disallowed. A non-portable cluster size may only function + * on the specific SKUs the program is tested on. The launch might fail if + * the program is run on a different hardware platform. + * + * CUDA API provides ::cudaOccupancyMaxActiveClusters to assist with checking + * whether the desired size can be launched on the current device. + * + * Portable Cluster Size + * + * A portable cluster size is guaranteed to be functional on all compute + * capabilities higher than the target compute capability. The portable + * cluster size for sm_90 is 8 blocks per cluster. This value may increase + * for future compute capabilities. + * + * The specific hardware unit may support higher cluster sizes that’s not + * guaranteed to be portable. + * See ::cudaFuncSetAttribute + */ + int nonPortableClusterSizeAllowed; + + /** + * Reserved for future use. + */ + int reserved[16]; +}; + +/** + * CUDA function attributes that can be set using ::cudaFuncSetAttribute + */ +enum __device_builtin__ cudaFuncAttribute +{ + cudaFuncAttributeMaxDynamicSharedMemorySize = 8, /**< Maximum dynamic shared memory size */ + cudaFuncAttributePreferredSharedMemoryCarveout = 9, /**< Preferred shared memory-L1 cache split */ + cudaFuncAttributeClusterDimMustBeSet = 10, /**< Indicator to enforce valid cluster dimension specification on kernel launch */ + cudaFuncAttributeRequiredClusterWidth = 11, /**< Required cluster width */ + cudaFuncAttributeRequiredClusterHeight = 12, /**< Required cluster height */ + cudaFuncAttributeRequiredClusterDepth = 13, /**< Required cluster depth */ + cudaFuncAttributeNonPortableClusterSizeAllowed = 14, /**< Whether non-portable cluster scheduling policy is supported */ + cudaFuncAttributeClusterSchedulingPolicyPreference = 15, /**< Required cluster scheduling policy preference */ + cudaFuncAttributeMax +}; + +/** + * CUDA function cache configurations + */ +enum __device_builtin__ cudaFuncCache +{ + cudaFuncCachePreferNone = 0, /**< Default function cache configuration, no preference */ + cudaFuncCachePreferShared = 1, /**< Prefer larger shared memory and smaller L1 cache */ + cudaFuncCachePreferL1 = 2, /**< Prefer larger L1 cache and smaller shared memory */ + cudaFuncCachePreferEqual = 3 /**< Prefer equal size L1 cache and shared memory */ +}; + +/** + * CUDA shared memory configuration + */ + +enum __device_builtin__ cudaSharedMemConfig +{ + cudaSharedMemBankSizeDefault = 0, + cudaSharedMemBankSizeFourByte = 1, + cudaSharedMemBankSizeEightByte = 2 +}; + +/** + * Shared memory carveout configurations. These may be passed to cudaFuncSetAttribute + */ +enum __device_builtin__ cudaSharedCarveout { + cudaSharedmemCarveoutDefault = -1, /**< No preference for shared memory or L1 (default) */ + cudaSharedmemCarveoutMaxShared = 100, /**< Prefer maximum available shared memory, minimum L1 cache */ + cudaSharedmemCarveoutMaxL1 = 0 /**< Prefer maximum available L1 cache, minimum shared memory */ +}; + +/** + * CUDA device compute modes + */ +enum __device_builtin__ cudaComputeMode +{ + cudaComputeModeDefault = 0, /**< Default compute mode (Multiple threads can use ::cudaSetDevice() with this device) */ + cudaComputeModeExclusive = 1, /**< Compute-exclusive-thread mode (Only one thread in one process will be able to use ::cudaSetDevice() with this device) */ + cudaComputeModeProhibited = 2, /**< Compute-prohibited mode (No threads can use ::cudaSetDevice() with this device) */ + cudaComputeModeExclusiveProcess = 3 /**< Compute-exclusive-process mode (Many threads in one process will be able to use ::cudaSetDevice() with this device) */ +}; + +/** + * CUDA Limits + */ +enum __device_builtin__ cudaLimit +{ + cudaLimitStackSize = 0x00, /**< GPU thread stack size */ + cudaLimitPrintfFifoSize = 0x01, /**< GPU printf FIFO size */ + cudaLimitMallocHeapSize = 0x02, /**< GPU malloc heap size */ + cudaLimitDevRuntimeSyncDepth = 0x03, /**< GPU device runtime synchronize depth */ + cudaLimitDevRuntimePendingLaunchCount = 0x04, /**< GPU device runtime pending launch count */ + cudaLimitMaxL2FetchGranularity = 0x05, /**< A value between 0 and 128 that indicates the maximum fetch granularity of L2 (in Bytes). This is a hint */ + cudaLimitPersistingL2CacheSize = 0x06 /**< A size in bytes for L2 persisting lines cache size */ +}; + +/** + * CUDA Memory Advise values + */ +enum __device_builtin__ cudaMemoryAdvise +{ + cudaMemAdviseSetReadMostly = 1, /**< Data will mostly be read and only occassionally be written to */ + cudaMemAdviseUnsetReadMostly = 2, /**< Undo the effect of ::cudaMemAdviseSetReadMostly */ + cudaMemAdviseSetPreferredLocation = 3, /**< Set the preferred location for the data as the specified device */ + cudaMemAdviseUnsetPreferredLocation = 4, /**< Clear the preferred location for the data */ + cudaMemAdviseSetAccessedBy = 5, /**< Data will be accessed by the specified device, so prevent page faults as much as possible */ + cudaMemAdviseUnsetAccessedBy = 6 /**< Let the Unified Memory subsystem decide on the page faulting policy for the specified device */ +}; + +/** + * CUDA range attributes + */ +enum __device_builtin__ cudaMemRangeAttribute +{ + cudaMemRangeAttributeReadMostly = 1, /**< Whether the range will mostly be read and only occassionally be written to */ + cudaMemRangeAttributePreferredLocation = 2, /**< The preferred location of the range */ + cudaMemRangeAttributeAccessedBy = 3, /**< Memory range has ::cudaMemAdviseSetAccessedBy set for specified device */ + cudaMemRangeAttributeLastPrefetchLocation = 4 /**< The last location to which the range was prefetched */ +}; + +/** + * CUDA GPUDirect RDMA flush writes APIs supported on the device + */ +enum __device_builtin__ cudaFlushGPUDirectRDMAWritesOptions { + cudaFlushGPUDirectRDMAWritesOptionHost = 1<<0, /**< ::cudaDeviceFlushGPUDirectRDMAWrites() and its CUDA Driver API counterpart are supported on the device. */ + cudaFlushGPUDirectRDMAWritesOptionMemOps = 1<<1 /**< The ::CU_STREAM_WAIT_VALUE_FLUSH flag and the ::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES MemOp are supported on the CUDA device. */ +}; + +/** + * CUDA GPUDirect RDMA flush writes ordering features of the device + */ +enum __device_builtin__ cudaGPUDirectRDMAWritesOrdering { + cudaGPUDirectRDMAWritesOrderingNone = 0, /**< The device does not natively support ordering of GPUDirect RDMA writes. ::cudaFlushGPUDirectRDMAWrites() can be leveraged if supported. */ + cudaGPUDirectRDMAWritesOrderingOwner = 100, /**< Natively, the device can consistently consume GPUDirect RDMA writes, although other CUDA devices may not. */ + cudaGPUDirectRDMAWritesOrderingAllDevices = 200 /**< Any CUDA device in the system can consistently consume GPUDirect RDMA writes to this device. */ +}; + +/** + * CUDA GPUDirect RDMA flush writes scopes + */ +enum __device_builtin__ cudaFlushGPUDirectRDMAWritesScope { + cudaFlushGPUDirectRDMAWritesToOwner = 100, /**< Blocks until remote writes are visible to the CUDA device context owning the data. */ + cudaFlushGPUDirectRDMAWritesToAllDevices = 200 /**< Blocks until remote writes are visible to all CUDA device contexts. */ +}; + +/** + * CUDA GPUDirect RDMA flush writes targets + */ +enum __device_builtin__ cudaFlushGPUDirectRDMAWritesTarget { + cudaFlushGPUDirectRDMAWritesTargetCurrentDevice /**< Sets the target for ::cudaDeviceFlushGPUDirectRDMAWrites() to the currently active CUDA device context. */ +}; + + +/** + * CUDA device attributes + */ +enum __device_builtin__ cudaDeviceAttr +{ + cudaDevAttrMaxThreadsPerBlock = 1, /**< Maximum number of threads per block */ + cudaDevAttrMaxBlockDimX = 2, /**< Maximum block dimension X */ + cudaDevAttrMaxBlockDimY = 3, /**< Maximum block dimension Y */ + cudaDevAttrMaxBlockDimZ = 4, /**< Maximum block dimension Z */ + cudaDevAttrMaxGridDimX = 5, /**< Maximum grid dimension X */ + cudaDevAttrMaxGridDimY = 6, /**< Maximum grid dimension Y */ + cudaDevAttrMaxGridDimZ = 7, /**< Maximum grid dimension Z */ + cudaDevAttrMaxSharedMemoryPerBlock = 8, /**< Maximum shared memory available per block in bytes */ + cudaDevAttrTotalConstantMemory = 9, /**< Memory available on device for __constant__ variables in a CUDA C kernel in bytes */ + cudaDevAttrWarpSize = 10, /**< Warp size in threads */ + cudaDevAttrMaxPitch = 11, /**< Maximum pitch in bytes allowed by memory copies */ + cudaDevAttrMaxRegistersPerBlock = 12, /**< Maximum number of 32-bit registers available per block */ + cudaDevAttrClockRate = 13, /**< Peak clock frequency in kilohertz */ + cudaDevAttrTextureAlignment = 14, /**< Alignment requirement for textures */ + cudaDevAttrGpuOverlap = 15, /**< Device can possibly copy memory and execute a kernel concurrently */ + cudaDevAttrMultiProcessorCount = 16, /**< Number of multiprocessors on device */ + cudaDevAttrKernelExecTimeout = 17, /**< Specifies whether there is a run time limit on kernels */ + cudaDevAttrIntegrated = 18, /**< Device is integrated with host memory */ + cudaDevAttrCanMapHostMemory = 19, /**< Device can map host memory into CUDA address space */ + cudaDevAttrComputeMode = 20, /**< Compute mode (See ::cudaComputeMode for details) */ + cudaDevAttrMaxTexture1DWidth = 21, /**< Maximum 1D texture width */ + cudaDevAttrMaxTexture2DWidth = 22, /**< Maximum 2D texture width */ + cudaDevAttrMaxTexture2DHeight = 23, /**< Maximum 2D texture height */ + cudaDevAttrMaxTexture3DWidth = 24, /**< Maximum 3D texture width */ + cudaDevAttrMaxTexture3DHeight = 25, /**< Maximum 3D texture height */ + cudaDevAttrMaxTexture3DDepth = 26, /**< Maximum 3D texture depth */ + cudaDevAttrMaxTexture2DLayeredWidth = 27, /**< Maximum 2D layered texture width */ + cudaDevAttrMaxTexture2DLayeredHeight = 28, /**< Maximum 2D layered texture height */ + cudaDevAttrMaxTexture2DLayeredLayers = 29, /**< Maximum layers in a 2D layered texture */ + cudaDevAttrSurfaceAlignment = 30, /**< Alignment requirement for surfaces */ + cudaDevAttrConcurrentKernels = 31, /**< Device can possibly execute multiple kernels concurrently */ + cudaDevAttrEccEnabled = 32, /**< Device has ECC support enabled */ + cudaDevAttrPciBusId = 33, /**< PCI bus ID of the device */ + cudaDevAttrPciDeviceId = 34, /**< PCI device ID of the device */ + cudaDevAttrTccDriver = 35, /**< Device is using TCC driver model */ + cudaDevAttrMemoryClockRate = 36, /**< Peak memory clock frequency in kilohertz */ + cudaDevAttrGlobalMemoryBusWidth = 37, /**< Global memory bus width in bits */ + cudaDevAttrL2CacheSize = 38, /**< Size of L2 cache in bytes */ + cudaDevAttrMaxThreadsPerMultiProcessor = 39, /**< Maximum resident threads per multiprocessor */ + cudaDevAttrAsyncEngineCount = 40, /**< Number of asynchronous engines */ + cudaDevAttrUnifiedAddressing = 41, /**< Device shares a unified address space with the host */ + cudaDevAttrMaxTexture1DLayeredWidth = 42, /**< Maximum 1D layered texture width */ + cudaDevAttrMaxTexture1DLayeredLayers = 43, /**< Maximum layers in a 1D layered texture */ + cudaDevAttrMaxTexture2DGatherWidth = 45, /**< Maximum 2D texture width if cudaArrayTextureGather is set */ + cudaDevAttrMaxTexture2DGatherHeight = 46, /**< Maximum 2D texture height if cudaArrayTextureGather is set */ + cudaDevAttrMaxTexture3DWidthAlt = 47, /**< Alternate maximum 3D texture width */ + cudaDevAttrMaxTexture3DHeightAlt = 48, /**< Alternate maximum 3D texture height */ + cudaDevAttrMaxTexture3DDepthAlt = 49, /**< Alternate maximum 3D texture depth */ + cudaDevAttrPciDomainId = 50, /**< PCI domain ID of the device */ + cudaDevAttrTexturePitchAlignment = 51, /**< Pitch alignment requirement for textures */ + cudaDevAttrMaxTextureCubemapWidth = 52, /**< Maximum cubemap texture width/height */ + cudaDevAttrMaxTextureCubemapLayeredWidth = 53, /**< Maximum cubemap layered texture width/height */ + cudaDevAttrMaxTextureCubemapLayeredLayers = 54, /**< Maximum layers in a cubemap layered texture */ + cudaDevAttrMaxSurface1DWidth = 55, /**< Maximum 1D surface width */ + cudaDevAttrMaxSurface2DWidth = 56, /**< Maximum 2D surface width */ + cudaDevAttrMaxSurface2DHeight = 57, /**< Maximum 2D surface height */ + cudaDevAttrMaxSurface3DWidth = 58, /**< Maximum 3D surface width */ + cudaDevAttrMaxSurface3DHeight = 59, /**< Maximum 3D surface height */ + cudaDevAttrMaxSurface3DDepth = 60, /**< Maximum 3D surface depth */ + cudaDevAttrMaxSurface1DLayeredWidth = 61, /**< Maximum 1D layered surface width */ + cudaDevAttrMaxSurface1DLayeredLayers = 62, /**< Maximum layers in a 1D layered surface */ + cudaDevAttrMaxSurface2DLayeredWidth = 63, /**< Maximum 2D layered surface width */ + cudaDevAttrMaxSurface2DLayeredHeight = 64, /**< Maximum 2D layered surface height */ + cudaDevAttrMaxSurface2DLayeredLayers = 65, /**< Maximum layers in a 2D layered surface */ + cudaDevAttrMaxSurfaceCubemapWidth = 66, /**< Maximum cubemap surface width */ + cudaDevAttrMaxSurfaceCubemapLayeredWidth = 67, /**< Maximum cubemap layered surface width */ + cudaDevAttrMaxSurfaceCubemapLayeredLayers = 68, /**< Maximum layers in a cubemap layered surface */ + cudaDevAttrMaxTexture1DLinearWidth = 69, /**< Maximum 1D linear texture width */ + cudaDevAttrMaxTexture2DLinearWidth = 70, /**< Maximum 2D linear texture width */ + cudaDevAttrMaxTexture2DLinearHeight = 71, /**< Maximum 2D linear texture height */ + cudaDevAttrMaxTexture2DLinearPitch = 72, /**< Maximum 2D linear texture pitch in bytes */ + cudaDevAttrMaxTexture2DMipmappedWidth = 73, /**< Maximum mipmapped 2D texture width */ + cudaDevAttrMaxTexture2DMipmappedHeight = 74, /**< Maximum mipmapped 2D texture height */ + cudaDevAttrComputeCapabilityMajor = 75, /**< Major compute capability version number */ + cudaDevAttrComputeCapabilityMinor = 76, /**< Minor compute capability version number */ + cudaDevAttrMaxTexture1DMipmappedWidth = 77, /**< Maximum mipmapped 1D texture width */ + cudaDevAttrStreamPrioritiesSupported = 78, /**< Device supports stream priorities */ + cudaDevAttrGlobalL1CacheSupported = 79, /**< Device supports caching globals in L1 */ + cudaDevAttrLocalL1CacheSupported = 80, /**< Device supports caching locals in L1 */ + cudaDevAttrMaxSharedMemoryPerMultiprocessor = 81, /**< Maximum shared memory available per multiprocessor in bytes */ + cudaDevAttrMaxRegistersPerMultiprocessor = 82, /**< Maximum number of 32-bit registers available per multiprocessor */ + cudaDevAttrManagedMemory = 83, /**< Device can allocate managed memory on this system */ + cudaDevAttrIsMultiGpuBoard = 84, /**< Device is on a multi-GPU board */ + cudaDevAttrMultiGpuBoardGroupID = 85, /**< Unique identifier for a group of devices on the same multi-GPU board */ + cudaDevAttrHostNativeAtomicSupported = 86, /**< Link between the device and the host supports native atomic operations */ + cudaDevAttrSingleToDoublePrecisionPerfRatio = 87, /**< Ratio of single precision performance (in floating-point operations per second) to double precision performance */ + cudaDevAttrPageableMemoryAccess = 88, /**< Device supports coherently accessing pageable memory without calling cudaHostRegister on it */ + cudaDevAttrConcurrentManagedAccess = 89, /**< Device can coherently access managed memory concurrently with the CPU */ + cudaDevAttrComputePreemptionSupported = 90, /**< Device supports Compute Preemption */ + cudaDevAttrCanUseHostPointerForRegisteredMem = 91, /**< Device can access host registered memory at the same virtual address as the CPU */ + cudaDevAttrReserved92 = 92, + cudaDevAttrReserved93 = 93, + cudaDevAttrReserved94 = 94, + cudaDevAttrCooperativeLaunch = 95, /**< Device supports launching cooperative kernels via ::cudaLaunchCooperativeKernel*/ + cudaDevAttrCooperativeMultiDeviceLaunch = 96, /**< Deprecated, cudaLaunchCooperativeKernelMultiDevice is deprecated. */ + cudaDevAttrMaxSharedMemoryPerBlockOptin = 97, /**< The maximum optin shared memory per block. This value may vary by chip. See ::cudaFuncSetAttribute */ + cudaDevAttrCanFlushRemoteWrites = 98, /**< Device supports flushing of outstanding remote writes. */ + cudaDevAttrHostRegisterSupported = 99, /**< Device supports host memory registration via ::cudaHostRegister. */ + cudaDevAttrPageableMemoryAccessUsesHostPageTables = 100, /**< Device accesses pageable memory via the host's page tables. */ + cudaDevAttrDirectManagedMemAccessFromHost = 101, /**< Host can directly access managed memory on the device without migration. */ + cudaDevAttrMaxBlocksPerMultiprocessor = 106, /**< Maximum number of blocks per multiprocessor */ + cudaDevAttrMaxPersistingL2CacheSize = 108, /**< Maximum L2 persisting lines capacity setting in bytes. */ + cudaDevAttrMaxAccessPolicyWindowSize = 109, /**< Maximum value of cudaAccessPolicyWindow::num_bytes. */ + cudaDevAttrReservedSharedMemoryPerBlock = 111, /**< Shared memory reserved by CUDA driver per block in bytes */ + cudaDevAttrSparseCudaArraySupported = 112, /**< Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays */ + cudaDevAttrHostRegisterReadOnlySupported = 113, /**< Device supports using the ::cudaHostRegister flag cudaHostRegisterReadOnly to register memory that must be mapped as read-only to the GPU */ + cudaDevAttrTimelineSemaphoreInteropSupported = 114, /**< External timeline semaphore interop is supported on the device */ + cudaDevAttrMaxTimelineSemaphoreInteropSupported = 114, /**< Deprecated, External timeline semaphore interop is supported on the device */ + cudaDevAttrMemoryPoolsSupported = 115, /**< Device supports using the ::cudaMallocAsync and ::cudaMemPool family of APIs */ + cudaDevAttrGPUDirectRDMASupported = 116, /**< Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see https://docs.nvidia.com/cuda/gpudirect-rdma for more information) */ + cudaDevAttrGPUDirectRDMAFlushWritesOptions = 117, /**< The returned attribute shall be interpreted as a bitmask, where the individual bits are listed in the ::cudaFlushGPUDirectRDMAWritesOptions enum */ + cudaDevAttrGPUDirectRDMAWritesOrdering = 118, /**< GPUDirect RDMA writes to the device do not need to be flushed for consumers within the scope indicated by the returned attribute. See ::cudaGPUDirectRDMAWritesOrdering for the numerical values returned here. */ + cudaDevAttrMemoryPoolSupportedHandleTypes = 119, /**< Handle types supported with mempool based IPC */ + cudaDevAttrClusterLaunch = 120, /**< Indicates device supports cluster launch */ + cudaDevAttrDeferredMappingCudaArraySupported = 121, /**< Device supports deferred mapping CUDA arrays and CUDA mipmapped arrays */ + cudaDevAttrReserved122 = 122, + cudaDevAttrReserved123 = 123, + cudaDevAttrReserved124 = 124, + cudaDevAttrIpcEventSupport = 125, /**< Device supports IPC Events. */ + cudaDevAttrMemSyncDomainCount = 126, /**< Number of memory synchronization domains the device supports. */ + cudaDevAttrReserved127 = 127, + cudaDevAttrReserved128 = 128, + cudaDevAttrReserved129 = 129, + cudaDevAttrReserved132 = 132, + cudaDevAttrMax +}; + +/** + * CUDA memory pool attributes + */ +enum __device_builtin__ cudaMemPoolAttr +{ + /** + * (value type = int) + * Allow cuMemAllocAsync to use memory asynchronously freed + * in another streams as long as a stream ordering dependency + * of the allocating stream on the free action exists. + * Cuda events and null stream interactions can create the required + * stream ordered dependencies. (default enabled) + */ + cudaMemPoolReuseFollowEventDependencies = 0x1, + + /** + * (value type = int) + * Allow reuse of already completed frees when there is no dependency + * between the free and allocation. (default enabled) + */ + cudaMemPoolReuseAllowOpportunistic = 0x2, + + /** + * (value type = int) + * Allow cuMemAllocAsync to insert new stream dependencies + * in order to establish the stream ordering required to reuse + * a piece of memory released by cuFreeAsync (default enabled). + */ + cudaMemPoolReuseAllowInternalDependencies = 0x3, + + + /** + * (value type = cuuint64_t) + * Amount of reserved memory in bytes to hold onto before trying + * to release memory back to the OS. When more than the release + * threshold bytes of memory are held by the memory pool, the + * allocator will try to release memory back to the OS on the + * next call to stream, event or context synchronize. (default 0) + */ + cudaMemPoolAttrReleaseThreshold = 0x4, + + /** + * (value type = cuuint64_t) + * Amount of backing memory currently allocated for the mempool. + */ + cudaMemPoolAttrReservedMemCurrent = 0x5, + + /** + * (value type = cuuint64_t) + * High watermark of backing memory allocated for the mempool since the + * last time it was reset. High watermark can only be reset to zero. + */ + cudaMemPoolAttrReservedMemHigh = 0x6, + + /** + * (value type = cuuint64_t) + * Amount of memory from the pool that is currently in use by the application. + */ + cudaMemPoolAttrUsedMemCurrent = 0x7, + + /** + * (value type = cuuint64_t) + * High watermark of the amount of memory from the pool that was in use by the application since + * the last time it was reset. High watermark can only be reset to zero. + */ + cudaMemPoolAttrUsedMemHigh = 0x8 +}; + +/** + * Specifies the type of location + */ +enum __device_builtin__ cudaMemLocationType { + cudaMemLocationTypeInvalid = 0, + cudaMemLocationTypeDevice = 1 /**< Location is a device location, thus id is a device ordinal */ +}; + +/** + * Specifies a memory location. + * + * To specify a gpu, set type = ::cudaMemLocationTypeDevice and set id = the gpu's device ordinal. + */ +struct __device_builtin__ cudaMemLocation { + enum cudaMemLocationType type; /**< Specifies the location type, which modifies the meaning of id. */ + int id; /**< identifier for a given this location's ::CUmemLocationType. */ +}; + +/** + * Specifies the memory protection flags for mapping. + */ +enum __device_builtin__ cudaMemAccessFlags { + cudaMemAccessFlagsProtNone = 0, /**< Default, make the address range not accessible */ + cudaMemAccessFlagsProtRead = 1, /**< Make the address range read accessible */ + cudaMemAccessFlagsProtReadWrite = 3 /**< Make the address range read-write accessible */ +}; + +/** + * Memory access descriptor + */ +struct __device_builtin__ cudaMemAccessDesc { + struct cudaMemLocation location; /**< Location on which the request is to change it's accessibility */ + enum cudaMemAccessFlags flags; /**< ::CUmemProt accessibility flags to set on the request */ +}; + +/** + * Defines the allocation types available + */ +enum __device_builtin__ cudaMemAllocationType { + cudaMemAllocationTypeInvalid = 0x0, + /** This allocation type is 'pinned', i.e. cannot migrate from its current + * location while the application is actively using it + */ + cudaMemAllocationTypePinned = 0x1, + cudaMemAllocationTypeMax = 0x7FFFFFFF +}; + +/** + * Flags for specifying particular handle types + */ +enum __device_builtin__ cudaMemAllocationHandleType { + cudaMemHandleTypeNone = 0x0, /**< Does not allow any export mechanism. > */ + cudaMemHandleTypePosixFileDescriptor = 0x1, /**< Allows a file descriptor to be used for exporting. Permitted only on POSIX systems. (int) */ + cudaMemHandleTypeWin32 = 0x2, /**< Allows a Win32 NT handle to be used for exporting. (HANDLE) */ + cudaMemHandleTypeWin32Kmt = 0x4 /**< Allows a Win32 KMT handle to be used for exporting. (D3DKMT_HANDLE) */ +}; + +/** + * Specifies the properties of allocations made from the pool. + */ +struct __device_builtin__ cudaMemPoolProps { + enum cudaMemAllocationType allocType; /**< Allocation type. Currently must be specified as cudaMemAllocationTypePinned */ + enum cudaMemAllocationHandleType handleTypes; /**< Handle types that will be supported by allocations from the pool. */ + struct cudaMemLocation location; /**< Location allocations should reside. */ + /** + * Windows-specific LPSECURITYATTRIBUTES required when + * ::cudaMemHandleTypeWin32 is specified. This security attribute defines + * the scope of which exported allocations may be tranferred to other + * processes. In all other cases, this field is required to be zero. + */ + void *win32SecurityAttributes; + unsigned char reserved[64]; /**< reserved for future use, must be 0 */ +}; + +/** + * Opaque data for exporting a pool allocation + */ +struct __device_builtin__ cudaMemPoolPtrExportData { + unsigned char reserved[64]; +}; + +/** + * Memory allocation node parameters + */ +struct __device_builtin__ cudaMemAllocNodeParams { + /** + * in: location where the allocation should reside (specified in ::location). + * ::handleTypes must be ::cudaMemHandleTypeNone. IPC is not supported. + */ + struct cudaMemPoolProps poolProps; /**< in: array of memory access descriptors. Used to describe peer GPU access */ + const struct cudaMemAccessDesc *accessDescs; /**< in: number of memory access descriptors. Must not exceed the number of GPUs. */ + size_t accessDescCount; /**< in: Number of `accessDescs`s */ + size_t bytesize; /**< in: size in bytes of the requested allocation */ + void *dptr; /**< out: address of the allocation returned by CUDA */ +}; + +/** + * Graph memory attributes + */ +enum __device_builtin__ cudaGraphMemAttributeType { + /** + * (value type = cuuint64_t) + * Amount of memory, in bytes, currently associated with graphs. + */ + cudaGraphMemAttrUsedMemCurrent = 0x0, + + /** + * (value type = cuuint64_t) + * High watermark of memory, in bytes, associated with graphs since the + * last time it was reset. High watermark can only be reset to zero. + */ + cudaGraphMemAttrUsedMemHigh = 0x1, + + /** + * (value type = cuuint64_t) + * Amount of memory, in bytes, currently allocated for use by + * the CUDA graphs asynchronous allocator. + */ + cudaGraphMemAttrReservedMemCurrent = 0x2, + + /** + * (value type = cuuint64_t) + * High watermark of memory, in bytes, currently allocated for use by + * the CUDA graphs asynchronous allocator. + */ + cudaGraphMemAttrReservedMemHigh = 0x3 +}; + +/** + * CUDA device P2P attributes + */ + +enum __device_builtin__ cudaDeviceP2PAttr { + cudaDevP2PAttrPerformanceRank = 1, /**< A relative value indicating the performance of the link between two devices */ + cudaDevP2PAttrAccessSupported = 2, /**< Peer access is enabled */ + cudaDevP2PAttrNativeAtomicSupported = 3, /**< Native atomic operation over the link supported */ + cudaDevP2PAttrCudaArrayAccessSupported = 4 /**< Accessing CUDA arrays over the link supported */ +}; + +/** + * CUDA UUID types + */ +#ifndef CU_UUID_HAS_BEEN_DEFINED +#define CU_UUID_HAS_BEEN_DEFINED +struct __device_builtin__ CUuuid_st { /**< CUDA definition of UUID */ + char bytes[16]; +}; +typedef __device_builtin__ struct CUuuid_st CUuuid; +#endif +typedef __device_builtin__ struct CUuuid_st cudaUUID_t; + +/** + * CUDA device properties + */ +struct __device_builtin__ cudaDeviceProp +{ + char name[256]; /**< ASCII string identifying device */ + cudaUUID_t uuid; /**< 16-byte unique identifier */ + char luid[8]; /**< 8-byte locally unique identifier. Value is undefined on TCC and non-Windows platforms */ + unsigned int luidDeviceNodeMask; /**< LUID device node mask. Value is undefined on TCC and non-Windows platforms */ + size_t totalGlobalMem; /**< Global memory available on device in bytes */ + size_t sharedMemPerBlock; /**< Shared memory available per block in bytes */ + int regsPerBlock; /**< 32-bit registers available per block */ + int warpSize; /**< Warp size in threads */ + size_t memPitch; /**< Maximum pitch in bytes allowed by memory copies */ + int maxThreadsPerBlock; /**< Maximum number of threads per block */ + int maxThreadsDim[3]; /**< Maximum size of each dimension of a block */ + int maxGridSize[3]; /**< Maximum size of each dimension of a grid */ + int clockRate; /**< Deprecated, Clock frequency in kilohertz */ + size_t totalConstMem; /**< Constant memory available on device in bytes */ + int major; /**< Major compute capability */ + int minor; /**< Minor compute capability */ + size_t textureAlignment; /**< Alignment requirement for textures */ + size_t texturePitchAlignment; /**< Pitch alignment requirement for texture references bound to pitched memory */ + int deviceOverlap; /**< Device can concurrently copy memory and execute a kernel. Deprecated. Use instead asyncEngineCount. */ + int multiProcessorCount; /**< Number of multiprocessors on device */ + int kernelExecTimeoutEnabled; /**< Deprecated, Specified whether there is a run time limit on kernels */ + int integrated; /**< Device is integrated as opposed to discrete */ + int canMapHostMemory; /**< Device can map host memory with cudaHostAlloc/cudaHostGetDevicePointer */ + int computeMode; /**< Deprecated, Compute mode (See ::cudaComputeMode) */ + int maxTexture1D; /**< Maximum 1D texture size */ + int maxTexture1DMipmap; /**< Maximum 1D mipmapped texture size */ + int maxTexture1DLinear; /**< Deprecated, do not use. Use cudaDeviceGetTexture1DLinearMaxWidth() or cuDeviceGetTexture1DLinearMaxWidth() instead. */ + int maxTexture2D[2]; /**< Maximum 2D texture dimensions */ + int maxTexture2DMipmap[2]; /**< Maximum 2D mipmapped texture dimensions */ + int maxTexture2DLinear[3]; /**< Maximum dimensions (width, height, pitch) for 2D textures bound to pitched memory */ + int maxTexture2DGather[2]; /**< Maximum 2D texture dimensions if texture gather operations have to be performed */ + int maxTexture3D[3]; /**< Maximum 3D texture dimensions */ + int maxTexture3DAlt[3]; /**< Maximum alternate 3D texture dimensions */ + int maxTextureCubemap; /**< Maximum Cubemap texture dimensions */ + int maxTexture1DLayered[2]; /**< Maximum 1D layered texture dimensions */ + int maxTexture2DLayered[3]; /**< Maximum 2D layered texture dimensions */ + int maxTextureCubemapLayered[2];/**< Maximum Cubemap layered texture dimensions */ + int maxSurface1D; /**< Maximum 1D surface size */ + int maxSurface2D[2]; /**< Maximum 2D surface dimensions */ + int maxSurface3D[3]; /**< Maximum 3D surface dimensions */ + int maxSurface1DLayered[2]; /**< Maximum 1D layered surface dimensions */ + int maxSurface2DLayered[3]; /**< Maximum 2D layered surface dimensions */ + int maxSurfaceCubemap; /**< Maximum Cubemap surface dimensions */ + int maxSurfaceCubemapLayered[2];/**< Maximum Cubemap layered surface dimensions */ + size_t surfaceAlignment; /**< Alignment requirements for surfaces */ + int concurrentKernels; /**< Device can possibly execute multiple kernels concurrently */ + int ECCEnabled; /**< Device has ECC support enabled */ + int pciBusID; /**< PCI bus ID of the device */ + int pciDeviceID; /**< PCI device ID of the device */ + int pciDomainID; /**< PCI domain ID of the device */ + int tccDriver; /**< 1 if device is a Tesla device using TCC driver, 0 otherwise */ + int asyncEngineCount; /**< Number of asynchronous engines */ + int unifiedAddressing; /**< Device shares a unified address space with the host */ + int memoryClockRate; /**< Deprecated, Peak memory clock frequency in kilohertz */ + int memoryBusWidth; /**< Global memory bus width in bits */ + int l2CacheSize; /**< Size of L2 cache in bytes */ + int persistingL2CacheMaxSize; /**< Device's maximum l2 persisting lines capacity setting in bytes */ + int maxThreadsPerMultiProcessor;/**< Maximum resident threads per multiprocessor */ + int streamPrioritiesSupported; /**< Device supports stream priorities */ + int globalL1CacheSupported; /**< Device supports caching globals in L1 */ + int localL1CacheSupported; /**< Device supports caching locals in L1 */ + size_t sharedMemPerMultiprocessor; /**< Shared memory available per multiprocessor in bytes */ + int regsPerMultiprocessor; /**< 32-bit registers available per multiprocessor */ + int managedMemory; /**< Device supports allocating managed memory on this system */ + int isMultiGpuBoard; /**< Device is on a multi-GPU board */ + int multiGpuBoardGroupID; /**< Unique identifier for a group of devices on the same multi-GPU board */ + int hostNativeAtomicSupported; /**< Link between the device and the host supports native atomic operations */ + int singleToDoublePrecisionPerfRatio; /**< Deprecated, Ratio of single precision performance (in floating-point operations per second) to double precision performance */ + int pageableMemoryAccess; /**< Device supports coherently accessing pageable memory without calling cudaHostRegister on it */ + int concurrentManagedAccess; /**< Device can coherently access managed memory concurrently with the CPU */ + int computePreemptionSupported; /**< Device supports Compute Preemption */ + int canUseHostPointerForRegisteredMem; /**< Device can access host registered memory at the same virtual address as the CPU */ + int cooperativeLaunch; /**< Device supports launching cooperative kernels via ::cudaLaunchCooperativeKernel */ + int cooperativeMultiDeviceLaunch; /**< Deprecated, cudaLaunchCooperativeKernelMultiDevice is deprecated. */ + size_t sharedMemPerBlockOptin; /**< Per device maximum shared memory per block usable by special opt in */ + int pageableMemoryAccessUsesHostPageTables; /**< Device accesses pageable memory via the host's page tables */ + int directManagedMemAccessFromHost; /**< Host can directly access managed memory on the device without migration. */ + int maxBlocksPerMultiProcessor; /**< Maximum number of resident blocks per multiprocessor */ + int accessPolicyMaxWindowSize; /**< The maximum value of ::cudaAccessPolicyWindow::num_bytes. */ + size_t reservedSharedMemPerBlock; /**< Shared memory reserved by CUDA driver per block in bytes */ + int hostRegisterSupported; /**< Device supports host memory registration via ::cudaHostRegister. */ + int sparseCudaArraySupported; /**< 1 if the device supports sparse CUDA arrays and sparse CUDA mipmapped arrays, 0 otherwise */ + int hostRegisterReadOnlySupported; /**< Device supports using the ::cudaHostRegister flag cudaHostRegisterReadOnly to register memory that must be mapped as read-only to the GPU */ + int timelineSemaphoreInteropSupported; /**< External timeline semaphore interop is supported on the device */ + int memoryPoolsSupported; /**< 1 if the device supports using the cudaMallocAsync and cudaMemPool family of APIs, 0 otherwise */ + int gpuDirectRDMASupported; /**< 1 if the device supports GPUDirect RDMA APIs, 0 otherwise */ + unsigned int gpuDirectRDMAFlushWritesOptions; /**< Bitmask to be interpreted according to the ::cudaFlushGPUDirectRDMAWritesOptions enum */ + int gpuDirectRDMAWritesOrdering;/**< See the ::cudaGPUDirectRDMAWritesOrdering enum for numerical values */ + unsigned int memoryPoolSupportedHandleTypes; /**< Bitmask of handle types supported with mempool-based IPC */ + int deferredMappingCudaArraySupported; /**< 1 if the device supports deferred mapping CUDA arrays and CUDA mipmapped arrays */ + int ipcEventSupported; /**< Device supports IPC Events. */ + int clusterLaunch; /**< Indicates device supports cluster launch */ + int unifiedFunctionPointers; /**< Indicates device supports unified pointers */ + int reserved2[2]; + int reserved[61]; /**< Reserved for future use */ +}; + +/** + * CUDA IPC Handle Size + */ +#define CUDA_IPC_HANDLE_SIZE 64 + +/** + * CUDA IPC event handle + */ +typedef __device_builtin__ struct __device_builtin__ cudaIpcEventHandle_st +{ + char reserved[CUDA_IPC_HANDLE_SIZE]; +}cudaIpcEventHandle_t; + +/** + * CUDA IPC memory handle + */ +typedef __device_builtin__ struct __device_builtin__ cudaIpcMemHandle_st +{ + char reserved[CUDA_IPC_HANDLE_SIZE]; +}cudaIpcMemHandle_t; + +/** + * External memory handle types + */ +enum __device_builtin__ cudaExternalMemoryHandleType { + /** + * Handle is an opaque file descriptor + */ + cudaExternalMemoryHandleTypeOpaqueFd = 1, + /** + * Handle is an opaque shared NT handle + */ + cudaExternalMemoryHandleTypeOpaqueWin32 = 2, + /** + * Handle is an opaque, globally shared handle + */ + cudaExternalMemoryHandleTypeOpaqueWin32Kmt = 3, + /** + * Handle is a D3D12 heap object + */ + cudaExternalMemoryHandleTypeD3D12Heap = 4, + /** + * Handle is a D3D12 committed resource + */ + cudaExternalMemoryHandleTypeD3D12Resource = 5, + /** + * Handle is a shared NT handle to a D3D11 resource + */ + cudaExternalMemoryHandleTypeD3D11Resource = 6, + /** + * Handle is a globally shared handle to a D3D11 resource + */ + cudaExternalMemoryHandleTypeD3D11ResourceKmt = 7, + /** + * Handle is an NvSciBuf object + */ + cudaExternalMemoryHandleTypeNvSciBuf = 8 +}; + +/** + * Indicates that the external memory object is a dedicated resource + */ +#define cudaExternalMemoryDedicated 0x1 + +/** When the /p flags parameter of ::cudaExternalSemaphoreSignalParams + * contains this flag, it indicates that signaling an external semaphore object + * should skip performing appropriate memory synchronization operations over all + * the external memory objects that are imported as ::cudaExternalMemoryHandleTypeNvSciBuf, + * which otherwise are performed by default to ensure data coherency with other + * importers of the same NvSciBuf memory objects. + */ +#define cudaExternalSemaphoreSignalSkipNvSciBufMemSync 0x01 + +/** When the /p flags parameter of ::cudaExternalSemaphoreWaitParams + * contains this flag, it indicates that waiting an external semaphore object + * should skip performing appropriate memory synchronization operations over all + * the external memory objects that are imported as ::cudaExternalMemoryHandleTypeNvSciBuf, + * which otherwise are performed by default to ensure data coherency with other + * importers of the same NvSciBuf memory objects. + */ +#define cudaExternalSemaphoreWaitSkipNvSciBufMemSync 0x02 + +/** + * When /p flags of ::cudaDeviceGetNvSciSyncAttributes is set to this, + * it indicates that application need signaler specific NvSciSyncAttr + * to be filled by ::cudaDeviceGetNvSciSyncAttributes. + */ +#define cudaNvSciSyncAttrSignal 0x1 + +/** + * When /p flags of ::cudaDeviceGetNvSciSyncAttributes is set to this, + * it indicates that application need waiter specific NvSciSyncAttr + * to be filled by ::cudaDeviceGetNvSciSyncAttributes. + */ +#define cudaNvSciSyncAttrWait 0x2 + +/** + * External memory handle descriptor + */ +struct __device_builtin__ cudaExternalMemoryHandleDesc { + /** + * Type of the handle + */ + enum cudaExternalMemoryHandleType type; + union { + /** + * File descriptor referencing the memory object. Valid + * when type is + * ::cudaExternalMemoryHandleTypeOpaqueFd + */ + int fd; + /** + * Win32 handle referencing the semaphore object. Valid when + * type is one of the following: + * - ::cudaExternalMemoryHandleTypeOpaqueWin32 + * - ::cudaExternalMemoryHandleTypeOpaqueWin32Kmt + * - ::cudaExternalMemoryHandleTypeD3D12Heap + * - ::cudaExternalMemoryHandleTypeD3D12Resource + * - ::cudaExternalMemoryHandleTypeD3D11Resource + * - ::cudaExternalMemoryHandleTypeD3D11ResourceKmt + * Exactly one of 'handle' and 'name' must be non-NULL. If + * type is one of the following: + * ::cudaExternalMemoryHandleTypeOpaqueWin32Kmt + * ::cudaExternalMemoryHandleTypeD3D11ResourceKmt + * then 'name' must be NULL. + */ + struct { + /** + * Valid NT handle. Must be NULL if 'name' is non-NULL + */ + void *handle; + /** + * Name of a valid memory object. + * Must be NULL if 'handle' is non-NULL. + */ + const void *name; + } win32; + /** + * A handle representing NvSciBuf Object. Valid when type + * is ::cudaExternalMemoryHandleTypeNvSciBuf + */ + const void *nvSciBufObject; + } handle; + /** + * Size of the memory allocation + */ + unsigned long long size; + /** + * Flags must either be zero or ::cudaExternalMemoryDedicated + */ + unsigned int flags; +}; + +/** + * External memory buffer descriptor + */ +struct __device_builtin__ cudaExternalMemoryBufferDesc { + /** + * Offset into the memory object where the buffer's base is + */ + unsigned long long offset; + /** + * Size of the buffer + */ + unsigned long long size; + /** + * Flags reserved for future use. Must be zero. + */ + unsigned int flags; +}; + +/** + * External memory mipmap descriptor + */ +struct __device_builtin__ cudaExternalMemoryMipmappedArrayDesc { + /** + * Offset into the memory object where the base level of the + * mipmap chain is. + */ + unsigned long long offset; + /** + * Format of base level of the mipmap chain + */ + struct cudaChannelFormatDesc formatDesc; + /** + * Dimensions of base level of the mipmap chain + */ + struct cudaExtent extent; + /** + * Flags associated with CUDA mipmapped arrays. + * See ::cudaMallocMipmappedArray + */ + unsigned int flags; + /** + * Total number of levels in the mipmap chain + */ + unsigned int numLevels; +}; + +/** + * External semaphore handle types + */ +enum __device_builtin__ cudaExternalSemaphoreHandleType { + /** + * Handle is an opaque file descriptor + */ + cudaExternalSemaphoreHandleTypeOpaqueFd = 1, + /** + * Handle is an opaque shared NT handle + */ + cudaExternalSemaphoreHandleTypeOpaqueWin32 = 2, + /** + * Handle is an opaque, globally shared handle + */ + cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt = 3, + /** + * Handle is a shared NT handle referencing a D3D12 fence object + */ + cudaExternalSemaphoreHandleTypeD3D12Fence = 4, + /** + * Handle is a shared NT handle referencing a D3D11 fence object + */ + cudaExternalSemaphoreHandleTypeD3D11Fence = 5, + /** + * Opaque handle to NvSciSync Object + */ + cudaExternalSemaphoreHandleTypeNvSciSync = 6, + /** + * Handle is a shared NT handle referencing a D3D11 keyed mutex object + */ + cudaExternalSemaphoreHandleTypeKeyedMutex = 7, + /** + * Handle is a shared KMT handle referencing a D3D11 keyed mutex object + */ + cudaExternalSemaphoreHandleTypeKeyedMutexKmt = 8, + /** + * Handle is an opaque handle file descriptor referencing a timeline semaphore + */ + cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd = 9, + /** + * Handle is an opaque handle file descriptor referencing a timeline semaphore + */ + cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32 = 10 +}; + +/** + * External semaphore handle descriptor + */ +struct __device_builtin__ cudaExternalSemaphoreHandleDesc { + /** + * Type of the handle + */ + enum cudaExternalSemaphoreHandleType type; + union { + /** + * File descriptor referencing the semaphore object. Valid when + * type is one of the following: + * - ::cudaExternalSemaphoreHandleTypeOpaqueFd + * - ::cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd + */ + int fd; + /** + * Win32 handle referencing the semaphore object. Valid when + * type is one of the following: + * - ::cudaExternalSemaphoreHandleTypeOpaqueWin32 + * - ::cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt + * - ::cudaExternalSemaphoreHandleTypeD3D12Fence + * - ::cudaExternalSemaphoreHandleTypeD3D11Fence + * - ::cudaExternalSemaphoreHandleTypeKeyedMutex + * - ::cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32 + * Exactly one of 'handle' and 'name' must be non-NULL. If + * type is one of the following: + * ::cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt + * ::cudaExternalSemaphoreHandleTypeKeyedMutexKmt + * then 'name' must be NULL. + */ + struct { + /** + * Valid NT handle. Must be NULL if 'name' is non-NULL + */ + void *handle; + /** + * Name of a valid synchronization primitive. + * Must be NULL if 'handle' is non-NULL. + */ + const void *name; + } win32; + /** + * Valid NvSciSyncObj. Must be non NULL + */ + const void* nvSciSyncObj; + } handle; + /** + * Flags reserved for the future. Must be zero. + */ + unsigned int flags; +}; + +/** + * External semaphore signal parameters(deprecated) + */ +struct __device_builtin__ cudaExternalSemaphoreSignalParams_v1 { + struct { + /** + * Parameters for fence objects + */ + struct { + /** + * Value of fence to be signaled + */ + unsigned long long value; + } fence; + union { + /** + * Pointer to NvSciSyncFence. Valid if ::cudaExternalSemaphoreHandleType + * is of type ::cudaExternalSemaphoreHandleTypeNvSciSync. + */ + void *fence; + unsigned long long reserved; + } nvSciSync; + /** + * Parameters for keyed mutex objects + */ + struct { + /* + * Value of key to release the mutex with + */ + unsigned long long key; + } keyedMutex; + } params; + /** + * Only when ::cudaExternalSemaphoreSignalParams is used to + * signal a ::cudaExternalSemaphore_t of type + * ::cudaExternalSemaphoreHandleTypeNvSciSync, the valid flag is + * ::cudaExternalSemaphoreSignalSkipNvSciBufMemSync: which indicates + * that while signaling the ::cudaExternalSemaphore_t, no memory + * synchronization operations should be performed for any external memory + * object imported as ::cudaExternalMemoryHandleTypeNvSciBuf. + * For all other types of ::cudaExternalSemaphore_t, flags must be zero. + */ + unsigned int flags; +}; + +/** +* External semaphore wait parameters(deprecated) +*/ +struct __device_builtin__ cudaExternalSemaphoreWaitParams_v1 { + struct { + /** + * Parameters for fence objects + */ + struct { + /** + * Value of fence to be waited on + */ + unsigned long long value; + } fence; + union { + /** + * Pointer to NvSciSyncFence. Valid if ::cudaExternalSemaphoreHandleType + * is of type ::cudaExternalSemaphoreHandleTypeNvSciSync. + */ + void *fence; + unsigned long long reserved; + } nvSciSync; + /** + * Parameters for keyed mutex objects + */ + struct { + /** + * Value of key to acquire the mutex with + */ + unsigned long long key; + /** + * Timeout in milliseconds to wait to acquire the mutex + */ + unsigned int timeoutMs; + } keyedMutex; + } params; + /** + * Only when ::cudaExternalSemaphoreSignalParams is used to + * signal a ::cudaExternalSemaphore_t of type + * ::cudaExternalSemaphoreHandleTypeNvSciSync, the valid flag is + * ::cudaExternalSemaphoreSignalSkipNvSciBufMemSync: which indicates + * that while waiting for the ::cudaExternalSemaphore_t, no memory + * synchronization operations should be performed for any external memory + * object imported as ::cudaExternalMemoryHandleTypeNvSciBuf. + * For all other types of ::cudaExternalSemaphore_t, flags must be zero. + */ + unsigned int flags; +}; + +/** + * External semaphore signal parameters, compatible with driver type + */ +struct __device_builtin__ cudaExternalSemaphoreSignalParams{ + struct { + /** + * Parameters for fence objects + */ + struct { + /** + * Value of fence to be signaled + */ + unsigned long long value; + } fence; + union { + /** + * Pointer to NvSciSyncFence. Valid if ::cudaExternalSemaphoreHandleType + * is of type ::cudaExternalSemaphoreHandleTypeNvSciSync. + */ + void *fence; + unsigned long long reserved; + } nvSciSync; + /** + * Parameters for keyed mutex objects + */ + struct { + /* + * Value of key to release the mutex with + */ + unsigned long long key; + } keyedMutex; + unsigned int reserved[12]; + } params; + /** + * Only when ::cudaExternalSemaphoreSignalParams is used to + * signal a ::cudaExternalSemaphore_t of type + * ::cudaExternalSemaphoreHandleTypeNvSciSync, the valid flag is + * ::cudaExternalSemaphoreSignalSkipNvSciBufMemSync: which indicates + * that while signaling the ::cudaExternalSemaphore_t, no memory + * synchronization operations should be performed for any external memory + * object imported as ::cudaExternalMemoryHandleTypeNvSciBuf. + * For all other types of ::cudaExternalSemaphore_t, flags must be zero. + */ + unsigned int flags; + unsigned int reserved[16]; +}; + +/** + * External semaphore wait parameters, compatible with driver type + */ +struct __device_builtin__ cudaExternalSemaphoreWaitParams { + struct { + /** + * Parameters for fence objects + */ + struct { + /** + * Value of fence to be waited on + */ + unsigned long long value; + } fence; + union { + /** + * Pointer to NvSciSyncFence. Valid if ::cudaExternalSemaphoreHandleType + * is of type ::cudaExternalSemaphoreHandleTypeNvSciSync. + */ + void *fence; + unsigned long long reserved; + } nvSciSync; + /** + * Parameters for keyed mutex objects + */ + struct { + /** + * Value of key to acquire the mutex with + */ + unsigned long long key; + /** + * Timeout in milliseconds to wait to acquire the mutex + */ + unsigned int timeoutMs; + } keyedMutex; + unsigned int reserved[10]; + } params; + /** + * Only when ::cudaExternalSemaphoreSignalParams is used to + * signal a ::cudaExternalSemaphore_t of type + * ::cudaExternalSemaphoreHandleTypeNvSciSync, the valid flag is + * ::cudaExternalSemaphoreSignalSkipNvSciBufMemSync: which indicates + * that while waiting for the ::cudaExternalSemaphore_t, no memory + * synchronization operations should be performed for any external memory + * object imported as ::cudaExternalMemoryHandleTypeNvSciBuf. + * For all other types of ::cudaExternalSemaphore_t, flags must be zero. + */ + unsigned int flags; + unsigned int reserved[16]; +}; + +/******************************************************************************* +* * +* SHORTHAND TYPE DEFINITION USED BY RUNTIME API * +* * +*******************************************************************************/ + +/** + * CUDA Error types + */ +typedef __device_builtin__ enum cudaError cudaError_t; + +/** + * CUDA stream + */ +typedef __device_builtin__ struct CUstream_st *cudaStream_t; + +/** + * CUDA event types + */ +typedef __device_builtin__ struct CUevent_st *cudaEvent_t; + +/** + * CUDA graphics resource types + */ +typedef __device_builtin__ struct cudaGraphicsResource *cudaGraphicsResource_t; + +/** + * CUDA external memory + */ +typedef __device_builtin__ struct CUexternalMemory_st *cudaExternalMemory_t; + +/** + * CUDA external semaphore + */ +typedef __device_builtin__ struct CUexternalSemaphore_st *cudaExternalSemaphore_t; + +/** + * CUDA graph + */ +typedef __device_builtin__ struct CUgraph_st *cudaGraph_t; + +/** + * CUDA graph node. + */ +typedef __device_builtin__ struct CUgraphNode_st *cudaGraphNode_t; + +/** + * CUDA user object for graphs + */ +typedef __device_builtin__ struct CUuserObject_st *cudaUserObject_t; + +/** + * CUDA function + */ +typedef __device_builtin__ struct CUfunc_st *cudaFunction_t; + +/** + * CUDA kernel + */ +typedef __device_builtin__ struct CUkern_st *cudaKernel_t; + +/** + * CUDA memory pool + */ +typedef __device_builtin__ struct CUmemPoolHandle_st *cudaMemPool_t; + +/** + * CUDA cooperative group scope + */ +enum __device_builtin__ cudaCGScope { + cudaCGScopeInvalid = 0, /**< Invalid cooperative group scope */ + cudaCGScopeGrid = 1, /**< Scope represented by a grid_group */ + cudaCGScopeMultiGrid = 2 /**< Scope represented by a multi_grid_group */ +}; + +/** + * CUDA launch parameters + */ +struct __device_builtin__ cudaLaunchParams +{ + void *func; /**< Device function symbol */ + dim3 gridDim; /**< Grid dimentions */ + dim3 blockDim; /**< Block dimentions */ + void **args; /**< Arguments */ + size_t sharedMem; /**< Shared memory */ + cudaStream_t stream; /**< Stream identifier */ +}; + +/** + * CUDA GPU kernel node parameters + */ +struct __device_builtin__ cudaKernelNodeParams { + void* func; /**< Kernel to launch */ + dim3 gridDim; /**< Grid dimensions */ + dim3 blockDim; /**< Block dimensions */ + unsigned int sharedMemBytes; /**< Dynamic shared-memory size per thread block in bytes */ + void **kernelParams; /**< Array of pointers to individual kernel arguments*/ + void **extra; /**< Pointer to kernel arguments in the "extra" format */ +}; + +/** + * External semaphore signal node parameters + */ +struct __device_builtin__ cudaExternalSemaphoreSignalNodeParams { + cudaExternalSemaphore_t* extSemArray; /**< Array of external semaphore handles. */ + const struct cudaExternalSemaphoreSignalParams* paramsArray; /**< Array of external semaphore signal parameters. */ + unsigned int numExtSems; /**< Number of handles and parameters supplied in extSemArray and paramsArray. */ +}; + +/** + * External semaphore wait node parameters + */ +struct __device_builtin__ cudaExternalSemaphoreWaitNodeParams { + cudaExternalSemaphore_t* extSemArray; /**< Array of external semaphore handles. */ + const struct cudaExternalSemaphoreWaitParams* paramsArray; /**< Array of external semaphore wait parameters. */ + unsigned int numExtSems; /**< Number of handles and parameters supplied in extSemArray and paramsArray. */ +}; + +/** +* CUDA Graph node types +*/ +enum __device_builtin__ cudaGraphNodeType { + cudaGraphNodeTypeKernel = 0x00, /**< GPU kernel node */ + cudaGraphNodeTypeMemcpy = 0x01, /**< Memcpy node */ + cudaGraphNodeTypeMemset = 0x02, /**< Memset node */ + cudaGraphNodeTypeHost = 0x03, /**< Host (executable) node */ + cudaGraphNodeTypeGraph = 0x04, /**< Node which executes an embedded graph */ + cudaGraphNodeTypeEmpty = 0x05, /**< Empty (no-op) node */ + cudaGraphNodeTypeWaitEvent = 0x06, /**< External event wait node */ + cudaGraphNodeTypeEventRecord = 0x07, /**< External event record node */ + cudaGraphNodeTypeExtSemaphoreSignal = 0x08, /**< External semaphore signal node */ + cudaGraphNodeTypeExtSemaphoreWait = 0x09, /**< External semaphore wait node */ + cudaGraphNodeTypeMemAlloc = 0x0a, /**< Memory allocation node */ + cudaGraphNodeTypeMemFree = 0x0b, /**< Memory free node */ + cudaGraphNodeTypeCount +}; + +/** + * CUDA executable (launchable) graph + */ +typedef struct CUgraphExec_st* cudaGraphExec_t; + +/** +* CUDA Graph Update error types +*/ +enum __device_builtin__ cudaGraphExecUpdateResult { + cudaGraphExecUpdateSuccess = 0x0, /**< The update succeeded */ + cudaGraphExecUpdateError = 0x1, /**< The update failed for an unexpected reason which is described in the return value of the function */ + cudaGraphExecUpdateErrorTopologyChanged = 0x2, /**< The update failed because the topology changed */ + cudaGraphExecUpdateErrorNodeTypeChanged = 0x3, /**< The update failed because a node type changed */ + cudaGraphExecUpdateErrorFunctionChanged = 0x4, /**< The update failed because the function of a kernel node changed (CUDA driver < 11.2) */ + cudaGraphExecUpdateErrorParametersChanged = 0x5, /**< The update failed because the parameters changed in a way that is not supported */ + cudaGraphExecUpdateErrorNotSupported = 0x6, /**< The update failed because something about the node is not supported */ + cudaGraphExecUpdateErrorUnsupportedFunctionChange = 0x7, /**< The update failed because the function of a kernel node changed in an unsupported way */ + cudaGraphExecUpdateErrorAttributesChanged = 0x8 /**< The update failed because the node attributes changed in a way that is not supported */ +}; + +/** + * Graph instantiation results +*/ +typedef __device_builtin__ enum cudaGraphInstantiateResult { + cudaGraphInstantiateSuccess = 0, /**< Instantiation succeeded */ + cudaGraphInstantiateError = 1, /**< Instantiation failed for an unexpected reason which is described in the return value of the function */ + cudaGraphInstantiateInvalidStructure = 2, /**< Instantiation failed due to invalid structure, such as cycles */ + cudaGraphInstantiateNodeOperationNotSupported = 3, /**< Instantiation for device launch failed because the graph contained an unsupported operation */ + cudaGraphInstantiateMultipleDevicesNotSupported = 4 /**< Instantiation for device launch failed due to the nodes belonging to different contexts */ +} cudaGraphInstantiateResult; + +/** + * Graph instantiation parameters + */ +typedef __device_builtin__ struct cudaGraphInstantiateParams_st +{ + unsigned long long flags; /**< Instantiation flags */ + cudaStream_t uploadStream; /**< Upload stream */ + cudaGraphNode_t errNode_out; /**< The node which caused instantiation to fail, if any */ + cudaGraphInstantiateResult result_out; /**< Whether instantiation was successful. If it failed, the reason why */ +} cudaGraphInstantiateParams; + +/** + * Result information returned by cudaGraphExecUpdate + */ +typedef __device_builtin__ struct cudaGraphExecUpdateResultInfo_st { + /** + * Gives more specific detail when a cuda graph update fails. + */ + enum cudaGraphExecUpdateResult result; + + /** + * The "to node" of the error edge when the topologies do not match. + * The error node when the error is associated with a specific node. + * NULL when the error is generic. + */ + cudaGraphNode_t errorNode; + + /** + * The from node of error edge when the topologies do not match. Otherwise NULL. + */ + cudaGraphNode_t errorFromNode; +} cudaGraphExecUpdateResultInfo; + +/** + * Flags to specify search options to be used with ::cudaGetDriverEntryPoint + * For more details see ::cuGetProcAddress + */ +enum __device_builtin__ cudaGetDriverEntryPointFlags { + cudaEnableDefault = 0x0, /**< Default search mode for driver symbols. */ + cudaEnableLegacyStream = 0x1, /**< Search for legacy versions of driver symbols. */ + cudaEnablePerThreadDefaultStream = 0x2 /**< Search for per-thread versions of driver symbols. */ +}; + +/** + * Enum for status from obtaining driver entry points, used with ::cudaApiGetDriverEntryPoint + */ +enum __device_builtin__ cudaDriverEntryPointQueryResult { + cudaDriverEntryPointSuccess = 0, /**< Search for symbol found a match */ + cudaDriverEntryPointSymbolNotFound = 1, /**< Search for symbol was not found */ + cudaDriverEntryPointVersionNotSufficent = 2 /**< Search for symbol was found but version wasn't great enough */ +}; + +/** + * CUDA Graph debug write options + */ +enum __device_builtin__ cudaGraphDebugDotFlags { + cudaGraphDebugDotFlagsVerbose = 1<<0, /**< Output all debug data as if every debug flag is enabled */ + cudaGraphDebugDotFlagsKernelNodeParams = 1<<2, /**< Adds cudaKernelNodeParams to output */ + cudaGraphDebugDotFlagsMemcpyNodeParams = 1<<3, /**< Adds cudaMemcpy3DParms to output */ + cudaGraphDebugDotFlagsMemsetNodeParams = 1<<4, /**< Adds cudaMemsetParams to output */ + cudaGraphDebugDotFlagsHostNodeParams = 1<<5, /**< Adds cudaHostNodeParams to output */ + cudaGraphDebugDotFlagsEventNodeParams = 1<<6, /**< Adds cudaEvent_t handle from record and wait nodes to output */ + cudaGraphDebugDotFlagsExtSemasSignalNodeParams = 1<<7, /**< Adds cudaExternalSemaphoreSignalNodeParams values to output */ + cudaGraphDebugDotFlagsExtSemasWaitNodeParams = 1<<8, /**< Adds cudaExternalSemaphoreWaitNodeParams to output */ + cudaGraphDebugDotFlagsKernelNodeAttributes = 1<<9, /**< Adds cudaKernelNodeAttrID values to output */ + cudaGraphDebugDotFlagsHandles = 1<<10 /**< Adds node handles and every kernel function handle to output */ +}; + +/** + * Flags for instantiating a graph + */ +enum __device_builtin__ cudaGraphInstantiateFlags { + cudaGraphInstantiateFlagAutoFreeOnLaunch = 1 /**< Automatically free memory allocated in a graph before relaunching. */ + , cudaGraphInstantiateFlagUpload = 2 /**< Automatically upload the graph after instantiaton. */ + , cudaGraphInstantiateFlagDeviceLaunch = 4 /**< Instantiate the graph to be launchable from the device. */ + , cudaGraphInstantiateFlagUseNodePriority = 8 /**< Run the graph using the per-node priority attributes rather than the + priority of the stream it is launched into. */ +}; + +typedef __device_builtin__ enum cudaLaunchMemSyncDomain { + cudaLaunchMemSyncDomainDefault = 0, + cudaLaunchMemSyncDomainRemote = 1 +} cudaLaunchMemSyncDomain; + +typedef __device_builtin__ struct cudaLaunchMemSyncDomainMap_st { + unsigned char default_; + unsigned char remote; +} cudaLaunchMemSyncDomainMap; + +/** + * Launch attributes enum; used as id field of ::cudaLaunchAttribute + */ +typedef __device_builtin__ enum cudaLaunchAttributeID { + cudaLaunchAttributeIgnore = 0 /**< Ignored entry, for convenient composition */ + , cudaLaunchAttributeAccessPolicyWindow = 1 /**< Valid for streams, graph nodes, launches. */ + , cudaLaunchAttributeCooperative = 2 /**< Valid for graph nodes, launches. */ + , cudaLaunchAttributeSynchronizationPolicy = 3 /**< Valid for streams. */ + , cudaLaunchAttributeClusterDimension = 4 /**< Valid for graph nodes, launches. */ + , cudaLaunchAttributeClusterSchedulingPolicyPreference = 5 /**< Valid for graph nodes, launches. */ + , cudaLaunchAttributeProgrammaticStreamSerialization = 6 /**< Valid for launches. Setting + programmaticStreamSerializationAllowed to non-0 + signals that the kernel will use programmatic + means to resolve its stream dependency, so that + the CUDA runtime should opportunistically allow + the grid's execution to overlap with the previous + kernel in the stream, if that kernel requests the + overlap. The dependent launches can choose to wait on + the dependency using the programmatic sync + (cudaGridDependencySynchronize() or equivalent PTX + instructions). */ + , cudaLaunchAttributeProgrammaticEvent = 7 /**< Valid for launches. Event recorded through this + launch attribute is guaranteed to only trigger after + all block in the associated kernel trigger the event. + A block can trigger the event programmatically in a + future CUDA release. A trigger can also be inserted at + the beginning of each block's execution if + triggerAtBlockStart is set to non-0. The dependent + launches can choose to wait on the dependency using + the programmatic sync (cudaGridDependencySynchronize() + or equivalent PTX instructions). Note that dependents + (including the CPU thread calling + cudaEventSynchronize()) are not guaranteed to observe + the release precisely when it is released. For + example, cudaEventSynchronize() may only observe the + event trigger long after the associated kernel has + completed. This recording type is primarily meant for + establishing programmatic dependency between device + tasks. The event supplied must not be an interprocess + or interop event. The event must disable timing (i.e. + created with ::cudaEventDisableTiming flag set). */ + , cudaLaunchAttributePriority = 8 /**< Valid for streams, graph nodes, launches. */ + , cudaLaunchAttributeMemSyncDomainMap = 9 + , cudaLaunchAttributeMemSyncDomain = 10 +} cudaLaunchAttributeID; + +/** + * Launch attributes union; used as value field of ::cudaLaunchAttribute + */ +typedef __device_builtin__ union cudaLaunchAttributeValue { + char pad[64]; /* Pad to 64 bytes */ + struct cudaAccessPolicyWindow accessPolicyWindow; /**< Attribute ::cudaAccessPolicyWindow. */ + int cooperative; /**< Nonzero indicates a cooperative kernel (see ::cudaLaunchCooperativeKernel). */ + enum cudaSynchronizationPolicy syncPolicy; /**< ::cudaSynchronizationPolicy for work queued up in this stream */ + struct { + unsigned int x; + unsigned int y; + unsigned int z; + } clusterDim; /**< Cluster dimensions for the kernel node. */ + enum cudaClusterSchedulingPolicy clusterSchedulingPolicyPreference; /**< Cluster scheduling policy preference for the kernel node. */ + int programmaticStreamSerializationAllowed; + struct { + cudaEvent_t event; + int flags; + int triggerAtBlockStart; + } programmaticEvent; + int priority; /**< Execution priority of the kernel. */ + cudaLaunchMemSyncDomainMap memSyncDomainMap; + cudaLaunchMemSyncDomain memSyncDomain; +} cudaLaunchAttributeValue; + +/** + * Launch attribute + */ +typedef __device_builtin__ struct cudaLaunchAttribute_st { + cudaLaunchAttributeID id; + char pad[8 - sizeof(cudaLaunchAttributeID)]; + cudaLaunchAttributeValue val; +} cudaLaunchAttribute; + +/** + * CUDA extensible launch configuration + */ +typedef __device_builtin__ struct cudaLaunchConfig_st { + dim3 gridDim; /**< Grid dimensions */ + dim3 blockDim; /**< Block dimensions */ + size_t dynamicSmemBytes; /**< Dynamic shared-memory size per thread block in bytes */ + cudaStream_t stream; /**< Stream identifier */ + cudaLaunchAttribute *attrs; /**< nullable if numAttrs == 0 */ + unsigned int numAttrs; /**< Number of attributes populated in attrs */ +} cudaLaunchConfig_t; + +#define cudaStreamAttrID cudaLaunchAttributeID +#define cudaStreamAttributeAccessPolicyWindow cudaLaunchAttributeAccessPolicyWindow +#define cudaStreamAttributeSynchronizationPolicy cudaLaunchAttributeSynchronizationPolicy +#define cudaStreamAttributeMemSyncDomainMap cudaLaunchAttributeMemSyncDomainMap +#define cudaStreamAttributeMemSyncDomain cudaLaunchAttributeMemSyncDomain +#define cudaStreamAttributePriority cudaLaunchAttributePriority + +#define cudaStreamAttrValue cudaLaunchAttributeValue + +#define cudaKernelNodeAttrID cudaLaunchAttributeID +#define cudaKernelNodeAttributeAccessPolicyWindow cudaLaunchAttributeAccessPolicyWindow +#define cudaKernelNodeAttributeCooperative cudaLaunchAttributeCooperative +#define cudaKernelNodeAttributePriority cudaLaunchAttributePriority +#define cudaKernelNodeAttributeClusterDimension cudaLaunchAttributeClusterDimension +#define cudaKernelNodeAttributeClusterSchedulingPolicyPreference cudaLaunchAttributeClusterSchedulingPolicyPreference +#define cudaKernelNodeAttributeMemSyncDomainMap cudaLaunchAttributeMemSyncDomainMap +#define cudaKernelNodeAttributeMemSyncDomain cudaLaunchAttributeMemSyncDomain + +#define cudaKernelNodeAttrValue cudaLaunchAttributeValue + +/** @} */ +/** @} */ /* END CUDART_TYPES */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DRIVER_TYPES_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DRIVER_TYPES_H__ +#endif + +#undef __CUDA_DEPRECATED + +#endif /* !__DRIVER_TYPES_H__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/host_config.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/host_config.h new file mode 100644 index 0000000000000000000000000000000000000000..785bec4e5c0652f9605ccf9341b7f761a85471ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/host_config.h @@ -0,0 +1,65 @@ +/* + * Copyright 1993-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("host_config.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "host_config.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H_WRAPPER__ +#endif + +#include "crt/host_config.h" + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H_WRAPPER__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H_WRAPPER__ +#endif diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/host_defines.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/host_defines.h new file mode 100644 index 0000000000000000000000000000000000000000..98a9c98a957e8f60e872b94fde762516c5523367 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/host_defines.h @@ -0,0 +1,65 @@ +/* + * Copyright 1993-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("host_defines.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "host_defines.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_DEFINES_H_WRAPPER__ +#endif + +#include "crt/host_defines.h" + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_DEFINES_H_WRAPPER__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_DEFINES_H_WRAPPER__ +#endif diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/library_types.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/library_types.h new file mode 100644 index 0000000000000000000000000000000000000000..4a7e42c6b89ba4b446d4cf3d52c8bacd74e73b0d --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/library_types.h @@ -0,0 +1,103 @@ +/* + * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__LIBRARY_TYPES_H__) +#define __LIBRARY_TYPES_H__ + + + +typedef enum cudaDataType_t +{ + CUDA_R_16F = 2, /* real as a half */ + CUDA_C_16F = 6, /* complex as a pair of half numbers */ + CUDA_R_16BF = 14, /* real as a nv_bfloat16 */ + CUDA_C_16BF = 15, /* complex as a pair of nv_bfloat16 numbers */ + CUDA_R_32F = 0, /* real as a float */ + CUDA_C_32F = 4, /* complex as a pair of float numbers */ + CUDA_R_64F = 1, /* real as a double */ + CUDA_C_64F = 5, /* complex as a pair of double numbers */ + CUDA_R_4I = 16, /* real as a signed 4-bit int */ + CUDA_C_4I = 17, /* complex as a pair of signed 4-bit int numbers */ + CUDA_R_4U = 18, /* real as a unsigned 4-bit int */ + CUDA_C_4U = 19, /* complex as a pair of unsigned 4-bit int numbers */ + CUDA_R_8I = 3, /* real as a signed 8-bit int */ + CUDA_C_8I = 7, /* complex as a pair of signed 8-bit int numbers */ + CUDA_R_8U = 8, /* real as a unsigned 8-bit int */ + CUDA_C_8U = 9, /* complex as a pair of unsigned 8-bit int numbers */ + CUDA_R_16I = 20, /* real as a signed 16-bit int */ + CUDA_C_16I = 21, /* complex as a pair of signed 16-bit int numbers */ + CUDA_R_16U = 22, /* real as a unsigned 16-bit int */ + CUDA_C_16U = 23, /* complex as a pair of unsigned 16-bit int numbers */ + CUDA_R_32I = 10, /* real as a signed 32-bit int */ + CUDA_C_32I = 11, /* complex as a pair of signed 32-bit int numbers */ + CUDA_R_32U = 12, /* real as a unsigned 32-bit int */ + CUDA_C_32U = 13, /* complex as a pair of unsigned 32-bit int numbers */ + CUDA_R_64I = 24, /* real as a signed 64-bit int */ + CUDA_C_64I = 25, /* complex as a pair of signed 64-bit int numbers */ + CUDA_R_64U = 26, /* real as a unsigned 64-bit int */ + CUDA_C_64U = 27, /* complex as a pair of unsigned 64-bit int numbers */ + CUDA_R_8F_E4M3 = 28, /* real as a nv_fp8_e4m3 */ + CUDA_R_8F_E5M2 = 29, /* real as a nv_fp8_e5m2 */ +} cudaDataType; + + +typedef enum libraryPropertyType_t +{ + MAJOR_VERSION, + MINOR_VERSION, + PATCH_LEVEL +} libraryPropertyType; + + +#ifndef __cplusplus +typedef enum cudaDataType_t cudaDataType_t; +typedef enum libraryPropertyType_t libraryPropertyType_t; +#endif + +#endif /* !__LIBRARY_TYPES_H__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/math_constants.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/math_constants.h new file mode 100644 index 0000000000000000000000000000000000000000..39937e980f88a614d847154f9e4364bd9ba95cbd --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/math_constants.h @@ -0,0 +1,152 @@ +/* + * Copyright 1993-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__MATH_CONSTANTS_H__) +#define __MATH_CONSTANTS_H__ + +/* single precision constants */ +#define CUDART_INF_F __int_as_float(0x7f800000U) +#define CUDART_NAN_F __int_as_float(0x7fffffffU) +#define CUDART_MIN_DENORM_F __int_as_float(0x00000001U) +#define CUDART_MAX_NORMAL_F __int_as_float(0x7f7fffffU) +#define CUDART_NEG_ZERO_F __int_as_float(0x80000000U) +#define CUDART_ZERO_F 0.0F +#define CUDART_ONE_F 1.0F +#define CUDART_SQRT_HALF_F 0.707106781F +#define CUDART_SQRT_HALF_HI_F 0.707106781F +#define CUDART_SQRT_HALF_LO_F 1.210161749e-08F +#define CUDART_SQRT_TWO_F 1.414213562F +#define CUDART_THIRD_F 0.333333333F +#define CUDART_PIO4_F 0.785398163F +#define CUDART_PIO2_F 1.570796327F +#define CUDART_3PIO4_F 2.356194490F +#define CUDART_2_OVER_PI_F 0.636619772F +#define CUDART_SQRT_2_OVER_PI_F 0.797884561F +#define CUDART_PI_F 3.141592654F +#define CUDART_L2E_F 1.442695041F +#define CUDART_L2T_F 3.321928094F +#define CUDART_LG2_F 0.301029996F +#define CUDART_LGE_F 0.434294482F +#define CUDART_LN2_F 0.693147181F +#define CUDART_LNT_F 2.302585093F +#define CUDART_LNPI_F 1.144729886F +#define CUDART_TWO_TO_M126_F 1.175494351e-38F +#define CUDART_TWO_TO_126_F 8.507059173e37F +#define CUDART_NORM_HUGE_F 3.402823466e38F +#define CUDART_TWO_TO_23_F 8388608.0F +#define CUDART_TWO_TO_24_F 16777216.0F +#define CUDART_TWO_TO_31_F 2147483648.0F +#define CUDART_TWO_TO_32_F 4294967296.0F +#define CUDART_REMQUO_BITS_F 3U +#define CUDART_REMQUO_MASK_F (~((~0U)<= 700 +#define __WSB_DEPRECATION_MESSAGE(x) #x"() is not valid on compute_70 and above, and should be replaced with "#x"_sync()."\ + "To continue using "#x"(), specify virtual architecture compute_60 when targeting sm_70 and above, for example, using the pair of compiler options: -arch=compute_60 -code=sm_70." +#elif defined(_NVHPC_CUDA) +#define __WSB_DEPRECATION_MESSAGE(x) #x"() is not valid on cc70 and above, and should be replaced with "#x"_sync()." +#else +#define __WSB_DEPRECATION_MESSAGE(x) #x"() is deprecated in favor of "#x"_sync() and may be removed in a future release (Use -Wno-deprecated-declarations to suppress this warning)." +#endif + +extern "C" +{ +extern __device__ __device_builtin__ void __threadfence_system(void); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Divide two floating-point values in round-to-nearest-even mode. + * + * Divides two floating-point values \p x by \p y in round-to-nearest-even mode. + * + * \return Returns \p x / \p y. + * + * \note_accuracy_double + * \note_requires_fermi + */ +extern __device__ __device_builtin__ double __ddiv_rn(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Divide two floating-point values in round-towards-zero mode. + * + * Divides two floating-point values \p x by \p y in round-towards-zero mode. + * + * \return Returns \p x / \p y. + * + * \note_accuracy_double + * \note_requires_fermi + */ +extern __device__ __device_builtin__ double __ddiv_rz(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Divide two floating-point values in round-up mode. + * + * Divides two floating-point values \p x by \p y in round-up (to positive infinity) mode. + * + * \return Returns \p x / \p y. + * + * \note_accuracy_double + * \note_requires_fermi + */ +extern __device__ __device_builtin__ double __ddiv_ru(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Divide two floating-point values in round-down mode. + * + * Divides two floating-point values \p x by \p y in round-down (to negative infinity) mode. + * + * \return Returns \p x / \p y. + * + * \note_accuracy_double + * \note_requires_fermi + */ +extern __device__ __device_builtin__ double __ddiv_rd(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * + * \endxmlonly + * in round-to-nearest-even mode. + * + * Compute the reciprocal of \p x in round-to-nearest-even mode. + * + * \return Returns + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * \endxmlonly. + * + * \note_accuracy_double + * \note_requires_fermi + */ +extern __device__ __device_builtin__ double __drcp_rn(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * + * \endxmlonly + * in round-towards-zero mode. + * + * Compute the reciprocal of \p x in round-towards-zero mode. + * + * \return Returns + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * \endxmlonly. + * + * \note_accuracy_double + * \note_requires_fermi + */ +extern __device__ __device_builtin__ double __drcp_rz(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * + * \endxmlonly + * in round-up mode. + * + * Compute the reciprocal of \p x in round-up (to positive infinity) mode. + * + * \return Returns + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * \endxmlonly. + * + * \note_accuracy_double + * \note_requires_fermi + */ +extern __device__ __device_builtin__ double __drcp_ru(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * + * \endxmlonly + * in round-down mode. + * + * Compute the reciprocal of \p x in round-down (to negative infinity) mode. + * + * \return Returns + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * \endxmlonly. + * + * \note_accuracy_double + * \note_requires_fermi + */ +extern __device__ __device_builtin__ double __drcp_rd(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * + * \endxmlonly + * in round-to-nearest-even mode. + * + * Compute the square root of \p x in round-to-nearest-even mode. + * + * \return Returns + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * \endxmlonly. + * + * \note_accuracy_double + * \note_requires_fermi + */ +extern __device__ __device_builtin__ double __dsqrt_rn(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * + * \endxmlonly + * in round-towards-zero mode. + * + * Compute the square root of \p x in round-towards-zero mode. + * + * \return Returns + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * \endxmlonly. + * + * \note_accuracy_double + * \note_requires_fermi + */ +extern __device__ __device_builtin__ double __dsqrt_rz(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * + * \endxmlonly + * in round-up mode. + * + * Compute the square root of \p x in round-up (to positive infinity) mode. + * + * \return Returns + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * \endxmlonly. + * + * \note_accuracy_double + * \note_requires_fermi + */ +extern __device__ __device_builtin__ double __dsqrt_ru(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * + * \endxmlonly + * in round-down mode. + * + * Compute the square root of \p x in round-down (to negative infinity) mode. + * + * \return Returns + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * \endxmlonly. + * + * \note_accuracy_double + * \note_requires_fermi + */ +extern __device__ __device_builtin__ double __dsqrt_rd(double x); +extern __device__ __device_builtin__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__ballot)) unsigned int __ballot(int); +extern __device__ __device_builtin__ int __syncthreads_count(int); +extern __device__ __device_builtin__ int __syncthreads_and(int); +extern __device__ __device_builtin__ int __syncthreads_or(int); +extern __device__ __device_builtin__ long long int clock64(void); + + +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute fused multiply-add operation in round-to-nearest-even mode, ignore \p -ftz=true compiler flag + * + * Behavior is the same as ::__fmaf_rn(\p x, \p y, \p z), the difference is in + * handling denormalized inputs and outputs: \p -ftz compiler flag has no effect. + */ +extern __device__ __device_builtin__ float __fmaf_ieee_rn(float x, float y, float z); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute fused multiply-add operation in round-down mode, ignore \p -ftz=true compiler flag + * + * Behavior is the same as ::__fmaf_rd(\p x, \p y, \p z), the difference is in + * handling denormalized inputs and outputs: \p -ftz compiler flag has no effect. + */ +extern __device__ __device_builtin__ float __fmaf_ieee_rd(float x, float y, float z); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute fused multiply-add operation in round-up mode, ignore \p -ftz=true compiler flag + * + * Behavior is the same as ::__fmaf_ru(\p x, \p y, \p z), the difference is in + * handling denormalized inputs and outputs: \p -ftz compiler flag has no effect. + */ +extern __device__ __device_builtin__ float __fmaf_ieee_ru(float x, float y, float z); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute fused multiply-add operation in round-towards-zero mode, ignore \p -ftz=true compiler flag + * + * Behavior is the same as ::__fmaf_rz(\p x, \p y, \p z), the difference is in + * handling denormalized inputs and outputs: \p -ftz compiler flag has no effect. + */ +extern __device__ __device_builtin__ float __fmaf_ieee_rz(float x, float y, float z); + + +// SM_13 intrinsics + +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret bits in a double as a 64-bit signed integer. + * + * Reinterpret the bits in the double-precision floating-point value \p x + * as a signed 64-bit integer. + * \return Returns reinterpreted value. + */ +extern __device__ __device_builtin__ long long int __double_as_longlong(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret bits in a 64-bit signed integer as a double. + * + * Reinterpret the bits in the 64-bit signed integer value \p x as + * a double-precision floating-point value. + * \return Returns reinterpreted value. + */ +extern __device__ __device_builtin__ double __longlong_as_double(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation in round-to-nearest-even mode. + * + * Computes the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation, rounding the + * result once in round-to-nearest-even mode. + * + * \return Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * . + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * . + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ double __fma_rn(double x, double y, double z); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation in round-towards-zero mode. + * + * Computes the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation, rounding the + * result once in round-towards-zero mode. + * + * \return Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * . + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * . + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ double __fma_rz(double x, double y, double z); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation in round-up mode. + * + * Computes the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation, rounding the + * result once in round-up (to positive infinity) mode. + * + * \return Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * . + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * . + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ double __fma_ru(double x, double y, double z); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation in round-down mode. + * + * Computes the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation, rounding the + * result once in round-down (to negative infinity) mode. + * + * \return Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * . + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * . + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ double __fma_rd(double x, double y, double z); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Add two floating-point values in round-to-nearest-even mode. + * + * Adds two floating-point values \p x and \p y in round-to-nearest-even mode. + * + * \return Returns \p x + \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dadd_rn(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Add two floating-point values in round-towards-zero mode. + * + * Adds two floating-point values \p x and \p y in round-towards-zero mode. + * + * \return Returns \p x + \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dadd_rz(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Add two floating-point values in round-up mode. + * + * Adds two floating-point values \p x and \p y in round-up (to positive infinity) mode. + * + * \return Returns \p x + \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dadd_ru(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Add two floating-point values in round-down mode. + * + * Adds two floating-point values \p x and \p y in round-down (to negative infinity) mode. + * + * \return Returns \p x + \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dadd_rd(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Subtract two floating-point values in round-to-nearest-even mode. + * + * Subtracts two floating-point values \p x and \p y in round-to-nearest-even mode. + * + * \return Returns \p x - \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dsub_rn(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Subtract two floating-point values in round-towards-zero mode. + * + * Subtracts two floating-point values \p x and \p y in round-towards-zero mode. + * + * \return Returns \p x - \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dsub_rz(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Subtract two floating-point values in round-up mode. + * + * Subtracts two floating-point values \p x and \p y in round-up (to positive infinity) mode. + * + * \return Returns \p x - \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dsub_ru(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Subtract two floating-point values in round-down mode. + * + * Subtracts two floating-point values \p x and \p y in round-down (to negative infinity) mode. + * + * \return Returns \p x - \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dsub_rd(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Multiply two floating-point values in round-to-nearest-even mode. + * + * Multiplies two floating-point values \p x and \p y in round-to-nearest-even mode. + * + * \return Returns \p x * \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dmul_rn(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Multiply two floating-point values in round-towards-zero mode. + * + * Multiplies two floating-point values \p x and \p y in round-towards-zero mode. + * + * \return Returns \p x * \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dmul_rz(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Multiply two floating-point values in round-up mode. + * + * Multiplies two floating-point values \p x and \p y in round-up (to positive infinity) mode. + * + * \return Returns \p x * \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dmul_ru(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Multiply two floating-point values in round-down mode. + * + * Multiplies two floating-point values \p x and \p y in round-down (to negative infinity) mode. + * + * \return Returns \p x * \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dmul_rd(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a float in round-to-nearest-even mode. + * + * Convert the double-precision floating-point value \p x to a single-precision + * floating-point value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ float __double2float_rn(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a float in round-towards-zero mode. + * + * Convert the double-precision floating-point value \p x to a single-precision + * floating-point value in round-towards-zero mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ float __double2float_rz(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a float in round-up mode. + * + * Convert the double-precision floating-point value \p x to a single-precision + * floating-point value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ float __double2float_ru(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a float in round-down mode. + * + * Convert the double-precision floating-point value \p x to a single-precision + * floating-point value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ float __double2float_rd(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed int in round-to-nearest-even mode. + * + * Convert the double-precision floating-point value \p x to a + * signed integer value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ int __double2int_rn(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed int in round-up mode. + * + * Convert the double-precision floating-point value \p x to a + * signed integer value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ int __double2int_ru(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed int in round-down mode. + * + * Convert the double-precision floating-point value \p x to a + * signed integer value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ int __double2int_rd(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned int in round-to-nearest-even mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned integer value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ unsigned int __double2uint_rn(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned int in round-up mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned integer value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ unsigned int __double2uint_ru(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned int in round-down mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned integer value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ unsigned int __double2uint_rd(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed 64-bit int in round-to-nearest-even mode. + * + * Convert the double-precision floating-point value \p x to a + * signed 64-bit integer value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ long long int __double2ll_rn(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed 64-bit int in round-up mode. + * + * Convert the double-precision floating-point value \p x to a + * signed 64-bit integer value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ long long int __double2ll_ru(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed 64-bit int in round-down mode. + * + * Convert the double-precision floating-point value \p x to a + * signed 64-bit integer value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ long long int __double2ll_rd(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned 64-bit int in round-to-nearest-even mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned 64-bit integer value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ unsigned long long int __double2ull_rn(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned 64-bit int in round-up mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned 64-bit integer value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ unsigned long long int __double2ull_ru(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned 64-bit int in round-down mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned 64-bit integer value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ unsigned long long int __double2ull_rd(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed int to a double. + * + * Convert the signed integer value \p x to a double-precision floating-point value. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __int2double_rn(int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned int to a double. + * + * Convert the unsigned integer value \p x to a double-precision floating-point value. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __uint2double_rn(unsigned int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed 64-bit int to a double in round-to-nearest-even mode. + * + * Convert the signed 64-bit integer value \p x to a double-precision floating-point + * value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ll2double_rn(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed 64-bit int to a double in round-towards-zero mode. + * + * Convert the signed 64-bit integer value \p x to a double-precision floating-point + * value in round-towards-zero mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ll2double_rz(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed 64-bit int to a double in round-up mode. + * + * Convert the signed 64-bit integer value \p x to a double-precision floating-point + * value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ll2double_ru(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed 64-bit int to a double in round-down mode. + * + * Convert the signed 64-bit integer value \p x to a double-precision floating-point + * value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ll2double_rd(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned 64-bit int to a double in round-to-nearest-even mode. + * + * Convert the unsigned 64-bit integer value \p x to a double-precision floating-point + * value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ull2double_rn(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned 64-bit int to a double in round-towards-zero mode. + * + * Convert the unsigned 64-bit integer value \p x to a double-precision floating-point + * value in round-towards-zero mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ull2double_rz(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned 64-bit int to a double in round-up mode. + * + * Convert the unsigned 64-bit integer value \p x to a double-precision floating-point + * value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ull2double_ru(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned 64-bit int to a double in round-down mode. + * + * Convert the unsigned 64-bit integer value \p x to a double-precision floating-point + * value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ull2double_rd(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret high 32 bits in a double as a signed integer. + * + * Reinterpret the high 32 bits in the double-precision floating-point value \p x + * as a signed integer. + * \return Returns reinterpreted value. + */ +extern __device__ __device_builtin__ int __double2hiint(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret low 32 bits in a double as a signed integer. + * + * Reinterpret the low 32 bits in the double-precision floating-point value \p x + * as a signed integer. + * \return Returns reinterpreted value. + */ +extern __device__ __device_builtin__ int __double2loint(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret high and low 32-bit integer values as a double. + * + * Reinterpret the integer value of \p hi as the high 32 bits of a + * double-precision floating-point value and the integer value of \p lo + * as the low 32 bits of the same double-precision floating-point value. + * \return Returns reinterpreted value. + */ +extern __device__ __device_builtin__ double __hiloint2double(int hi, int lo); + + +} + +#if defined(_NVHPC_CUDA) +#undef __device_builtin__ +#define __device_builtin__ +#endif /* _NVHPC_CUDA */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ +__SM_20_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__ballot)) unsigned int ballot(bool pred) __DEF_IF_HOST + +__SM_20_INTRINSICS_DECL__ int syncthreads_count(bool pred) __DEF_IF_HOST + +__SM_20_INTRINSICS_DECL__ bool syncthreads_and(bool pred) __DEF_IF_HOST + +__SM_20_INTRINSICS_DECL__ bool syncthreads_or(bool pred) __DEF_IF_HOST + +#undef __DEPRECATED__ +#undef __WSB_DEPRECATION_MESSAGE + +__SM_20_INTRINSICS_DECL__ unsigned int __isGlobal(const void *ptr) __DEF_IF_HOST +__SM_20_INTRINSICS_DECL__ unsigned int __isShared(const void *ptr) __DEF_IF_HOST +__SM_20_INTRINSICS_DECL__ unsigned int __isConstant(const void *ptr) __DEF_IF_HOST +__SM_20_INTRINSICS_DECL__ unsigned int __isLocal(const void *ptr) __DEF_IF_HOST +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) +__SM_20_INTRINSICS_DECL__ unsigned int __isGridConstant(const void *ptr) __DEF_IF_HOST +#endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) */ +__SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_global(const void *ptr) __DEF_IF_HOST +__SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_shared(const void *ptr) __DEF_IF_HOST +__SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_constant(const void *ptr) __DEF_IF_HOST +__SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_local(const void *ptr) __DEF_IF_HOST +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) +__SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_grid_constant(const void *ptr) __DEF_IF_HOST +#endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) */ + +__SM_20_INTRINSICS_DECL__ void * __cvta_global_to_generic(size_t rawbits) __DEF_IF_HOST +__SM_20_INTRINSICS_DECL__ void * __cvta_shared_to_generic(size_t rawbits) __DEF_IF_HOST +__SM_20_INTRINSICS_DECL__ void * __cvta_constant_to_generic(size_t rawbits) __DEF_IF_HOST +__SM_20_INTRINSICS_DECL__ void * __cvta_local_to_generic(size_t rawbits) __DEF_IF_HOST +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) +__SM_20_INTRINSICS_DECL__ void * __cvta_grid_constant_to_generic(size_t rawbits) __DEF_IF_HOST +#endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) */ +#endif /* __cplusplus && __CUDACC__ */ + +#undef __DEF_IF_HOST +#undef __SM_20_INTRINSICS_DECL__ + +#if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__) +#include "sm_20_intrinsics.hpp" +#endif /* !__CUDACC_RTC__ */ +#endif /* !__SM_20_INTRINSICS_H__ && defined(__CUDA_ARCH__) */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_intrinsics.hpp b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_intrinsics.hpp new file mode 100644 index 0000000000000000000000000000000000000000..30c1ab99e0d66ebbceb8fe88b1122443cbf5f998 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_intrinsics.hpp @@ -0,0 +1,221 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_20_INTRINSICS_HPP__) +#define __SM_20_INTRINSICS_HPP__ + +#if defined(__CUDACC_RTC__) +#define __SM_20_INTRINSICS_DECL__ __device__ +#else /* __CUDACC_RTC__ */ +#define __SM_20_INTRINSICS_DECL__ static __inline__ __device__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__SM_20_INTRINSICS_DECL__ unsigned int ballot(bool pred) +{ + return __ballot((int)pred); +} + +__SM_20_INTRINSICS_DECL__ int syncthreads_count(bool pred) +{ + return __syncthreads_count((int)pred); +} + +__SM_20_INTRINSICS_DECL__ bool syncthreads_and(bool pred) +{ + return (bool)__syncthreads_and((int)pred); +} + +__SM_20_INTRINSICS_DECL__ bool syncthreads_or(bool pred) +{ + return (bool)__syncthreads_or((int)pred); +} + + +extern "C" { + __device__ unsigned __nv_isGlobal_impl(const void *); + __device__ unsigned __nv_isShared_impl(const void *); + __device__ unsigned __nv_isConstant_impl(const void *); + __device__ unsigned __nv_isLocal_impl(const void *); + __device__ unsigned __nv_isGridConstant_impl(const void *); +} + +__SM_20_INTRINSICS_DECL__ unsigned int __isGlobal(const void *ptr) +{ + return __nv_isGlobal_impl(ptr); +} + +__SM_20_INTRINSICS_DECL__ unsigned int __isShared(const void *ptr) +{ + return __nv_isShared_impl(ptr); +} + +__SM_20_INTRINSICS_DECL__ unsigned int __isConstant(const void *ptr) +{ + return __nv_isConstant_impl(ptr); +} + +__SM_20_INTRINSICS_DECL__ unsigned int __isLocal(const void *ptr) +{ + return __nv_isLocal_impl(ptr); +} + +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) +__SM_20_INTRINSICS_DECL__ unsigned int __isGridConstant(const void *ptr) +{ + return __nv_isGridConstant_impl(ptr); +} +#endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) */ + +extern "C" { + __device__ size_t __nv_cvta_generic_to_global_impl(const void *); + __device__ size_t __nv_cvta_generic_to_shared_impl(const void *); + __device__ size_t __nv_cvta_generic_to_constant_impl(const void *); + __device__ size_t __nv_cvta_generic_to_local_impl(const void *); + __device__ void * __nv_cvta_global_to_generic_impl(size_t); + __device__ void * __nv_cvta_shared_to_generic_impl(size_t); + __device__ void * __nv_cvta_constant_to_generic_impl(size_t); + __device__ void * __nv_cvta_local_to_generic_impl(size_t); +} + +__SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_global(const void *p) +{ + return __nv_cvta_generic_to_global_impl(p); +} + +__SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_shared(const void *p) +{ + return __nv_cvta_generic_to_shared_impl(p); +} + +__SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_constant(const void *p) +{ + return __nv_cvta_generic_to_constant_impl(p); +} + +__SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_local(const void *p) +{ + return __nv_cvta_generic_to_local_impl(p); +} + +__SM_20_INTRINSICS_DECL__ void * __cvta_global_to_generic(size_t rawbits) +{ + return __nv_cvta_global_to_generic_impl(rawbits); +} + +__SM_20_INTRINSICS_DECL__ void * __cvta_shared_to_generic(size_t rawbits) +{ + return __nv_cvta_shared_to_generic_impl(rawbits); +} + +__SM_20_INTRINSICS_DECL__ void * __cvta_constant_to_generic(size_t rawbits) +{ + return __nv_cvta_constant_to_generic_impl(rawbits); +} + +__SM_20_INTRINSICS_DECL__ void * __cvta_local_to_generic(size_t rawbits) +{ + return __nv_cvta_local_to_generic_impl(rawbits); +} + +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) +#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__) +#define __CVTA_PTR_64 1 +#endif + +__SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_grid_constant(const void *ptr) +{ +#if __CVTA_PTR_64 + unsigned long long ret; + asm("cvta.to.param.u64 %0, %1;" : "=l"(ret) : "l"(ptr)); +#else /* !__CVTA_PTR_64 */ + unsigned ret; + asm("cvta.to.param.u32 %0, %1;" : "=r"(ret) : "r"(ptr)); +#endif /* __CVTA_PTR_64 */ + return (size_t)ret; + +} + +__SM_20_INTRINSICS_DECL__ void * __cvta_grid_constant_to_generic(size_t rawbits) +{ + void *ret; +#if __CVTA_PTR_64 + unsigned long long in = rawbits; + asm("cvta.param.u64 %0, %1;" : "=l"(ret) : "l"(in)); +#else /* !__CVTA_PTR_64 */ + unsigned in = rawbits; + asm("cvta.param.u32 %0, %1;" : "=r"(ret) : "r"(in)); +#endif /* __CVTA_PTR_64 */ + return ret; +} +#undef __CVTA_PTR_64 +#endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) */ + + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_20_INTRINSICS_DECL__ + +#endif /* !__SM_20_INTRINSICS_HPP__ */ + diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.h new file mode 100644 index 0000000000000000000000000000000000000000..c2efc2e248350f5800f56e44348ebbc9b04b480c --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.h @@ -0,0 +1,221 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_30_INTRINSICS_H__) +#define __SM_30_INTRINSICS_H__ + +#if defined(__CUDACC_RTC__) +#define __SM_30_INTRINSICS_DECL__ __device__ +#elif defined(_NVHPC_CUDA) +#define __SM_30_INTRINSICS_DECL__ extern __device__ __cudart_builtin__ +#else /* !__CUDACC_RTC__ */ +#define __SM_30_INTRINSICS_DECL__ static __device__ __inline__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/* Add !defined(_NVHPC_CUDA) to avoid empty function definition in CUDA + * C++ compiler where the macro __CUDA_ARCH__ is not defined. */ +#if !defined(__CUDA_ARCH__) && !defined(_NVHPC_CUDA) +#define __DEF_IF_HOST { } +#else /* !__CUDA_ARCH__ */ +#define __DEF_IF_HOST ; +#endif /* __CUDA_ARCH__ */ + + +/******************************************************************************* +* * +* Below are declarations of SM-3.0 intrinsics which are included as * +* source (instead of being built in to the compiler) * +* * +*******************************************************************************/ + +#if !defined warpSize && !defined __local_warpSize +#define warpSize 32 +#define __local_warpSize +#endif + +#if defined(_WIN32) +# define __DEPRECATED__(msg) __declspec(deprecated(msg)) +#elif (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 5 && !defined(__clang__)))) +# define __DEPRECATED__(msg) __attribute__((deprecated)) +#else +# define __DEPRECATED__(msg) __attribute__((deprecated(msg))) +#endif + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 +#define __WSB_DEPRECATION_MESSAGE(x) #x"() is deprecated in favor of "#x"_sync() and may be removed in a future release (Use -Wno-deprecated-declarations to suppress this warning)." +#elif defined(_NVHPC_CUDA) +#define __WSB_DEPRECATION_MESSAGE(x) #x"() is not valid on cc70 and above, and should be replaced with "#x"_sync()." +#endif + +__SM_30_INTRINSICS_DECL__ unsigned __fns(unsigned mask, unsigned base, int offset) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ void __barrier_sync(unsigned id) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ void __barrier_sync_count(unsigned id, unsigned cnt) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ void __syncwarp(unsigned mask=0xFFFFFFFF) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ int __all_sync(unsigned mask, int pred) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ int __any_sync(unsigned mask, int pred) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ int __uni_sync(unsigned mask, int pred) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned __ballot_sync(unsigned mask, int pred) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned __activemask() __DEF_IF_HOST + +// Warp register exchange (shuffle) intrinsics. +// Notes: +// a) Warp size is hardcoded to 32 here, because the compiler does not know +// the "warpSize" constant at this time +// b) we cannot map the float __shfl to the int __shfl because it'll mess with +// the register number (especially if you're doing two shfls to move a double). +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) int __shfl(int var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) unsigned int __shfl(unsigned int var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) int __shfl_up(int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) unsigned int __shfl_up(unsigned int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) int __shfl_down(int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) unsigned int __shfl_down(unsigned int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) int __shfl_xor(int var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) unsigned int __shfl_xor(unsigned int var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) float __shfl(float var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) float __shfl_up(float var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) float __shfl_down(float var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) float __shfl_xor(float var, int laneMask, int width=warpSize) __DEF_IF_HOST +#endif + +__SM_30_INTRINSICS_DECL__ int __shfl_sync(unsigned mask, int var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_sync(unsigned mask, unsigned int var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ int __shfl_up_sync(unsigned mask, int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_up_sync(unsigned mask, unsigned int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ int __shfl_down_sync(unsigned mask, int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_down_sync(unsigned mask, unsigned int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ int __shfl_xor_sync(unsigned mask, int var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_xor_sync(unsigned mask, unsigned int var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ float __shfl_sync(unsigned mask, float var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ float __shfl_up_sync(unsigned mask, float var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ float __shfl_down_sync(unsigned mask, float var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ float __shfl_xor_sync(unsigned mask, float var, int laneMask, int width=warpSize) __DEF_IF_HOST + +// 64-bits SHFL +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) unsigned long long __shfl(unsigned long long var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) long long __shfl(long long var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) long long __shfl_up(long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) unsigned long long __shfl_up(unsigned long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) long long __shfl_down(long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) unsigned long long __shfl_down(unsigned long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) long long __shfl_xor(long long var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) unsigned long long __shfl_xor(unsigned long long var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) double __shfl(double var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) double __shfl_up(double var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) double __shfl_down(double var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) double __shfl_xor(double var, int laneMask, int width=warpSize) __DEF_IF_HOST +#endif + +__SM_30_INTRINSICS_DECL__ long long __shfl_sync(unsigned mask, long long var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_sync(unsigned mask, unsigned long long var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ long long __shfl_up_sync(unsigned mask, long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_up_sync(unsigned mask, unsigned long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ long long __shfl_down_sync(unsigned mask, long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_down_sync(unsigned mask, unsigned long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ long long __shfl_xor_sync(unsigned mask, long long var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_xor_sync(unsigned mask, unsigned long long var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ double __shfl_sync(unsigned mask, double var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ double __shfl_up_sync(unsigned mask, double var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ double __shfl_down_sync(unsigned mask, double var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ double __shfl_xor_sync(unsigned mask, double var, int laneMask, int width=warpSize) __DEF_IF_HOST + +// long needs some help to choose between 32-bits and 64-bits +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) long __shfl(long var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) unsigned long __shfl(unsigned long var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) long __shfl_up(long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) unsigned long __shfl_up(unsigned long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) long __shfl_down(long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) unsigned long __shfl_down(unsigned long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) long __shfl_xor(long var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) unsigned long __shfl_xor(unsigned long var, int laneMask, int width=warpSize) __DEF_IF_HOST +#endif + +__SM_30_INTRINSICS_DECL__ long __shfl_sync(unsigned mask, long var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_sync(unsigned mask, unsigned long var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ long __shfl_up_sync(unsigned mask, long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_up_sync(unsigned mask, unsigned long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ long __shfl_down_sync(unsigned mask, long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_down_sync(unsigned mask, unsigned long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ long __shfl_xor_sync(unsigned mask, long var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_xor_sync(unsigned mask, unsigned long var, int laneMask, int width=warpSize) __DEF_IF_HOST + +#undef __DEPRECATED__ +#undef __WSB_DEPRECATION_MESSAGE + +#if defined(__local_warpSize) +#undef warpSize +#undef __local_warpSize +#endif + +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 300 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __DEF_IF_HOST +#undef __SM_30_INTRINSICS_DECL__ + +#if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__) +#include "sm_30_intrinsics.hpp" +#endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */ + +#endif /* !__SM_30_INTRINSICS_H__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.hpp b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.hpp new file mode 100644 index 0000000000000000000000000000000000000000..a5bcac5ee68c0cf547e4de7c08badf37106639dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.hpp @@ -0,0 +1,604 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_30_INTRINSICS_HPP__) +#define __SM_30_INTRINSICS_HPP__ + +#if defined(__CUDACC_RTC__) +#define __SM_30_INTRINSICS_DECL__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __SM_30_INTRINSICS_DECL__ static __device__ __inline__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +// In here are intrinsics which are built in to the compiler. These may be +// referenced by intrinsic implementations from this file. +extern "C" +{ +} + +/******************************************************************************* +* * +* Below are implementations of SM-3.0 intrinsics which are included as * +* source (instead of being built in to the compiler) * +* * +*******************************************************************************/ + +#if !defined warpSize && !defined __local_warpSize +#define warpSize 32 +#define __local_warpSize +#endif + +__SM_30_INTRINSICS_DECL__ +unsigned __fns(unsigned mask, unsigned base, int offset) { + extern __device__ __device_builtin__ unsigned int __nvvm_fns(unsigned int mask, unsigned int base, int offset); + return __nvvm_fns(mask, base, offset); +} + +__SM_30_INTRINSICS_DECL__ +void __barrier_sync(unsigned id) { + extern __device__ __device_builtin__ void __nvvm_barrier_sync(unsigned id); + return __nvvm_barrier_sync(id); +} + +__SM_30_INTRINSICS_DECL__ +void __barrier_sync_count(unsigned id, unsigned cnt) { + extern __device__ __device_builtin__ void __nvvm_barrier_sync_cnt(unsigned id, unsigned cnt); + return __nvvm_barrier_sync_cnt(id, cnt); +} + +__SM_30_INTRINSICS_DECL__ +void __syncwarp(unsigned mask) { + extern __device__ __device_builtin__ void __nvvm_bar_warp_sync(unsigned mask); + return __nvvm_bar_warp_sync(mask); +} + +__SM_30_INTRINSICS_DECL__ +int __all_sync(unsigned mask, int pred) { + extern __device__ __device_builtin__ int __nvvm_vote_all_sync(unsigned int mask, int pred); + return __nvvm_vote_all_sync(mask, pred); +} + +__SM_30_INTRINSICS_DECL__ +int __any_sync(unsigned mask, int pred) { + extern __device__ __device_builtin__ int __nvvm_vote_any_sync(unsigned int mask, int pred); + return __nvvm_vote_any_sync(mask, pred); +} + +__SM_30_INTRINSICS_DECL__ +int __uni_sync(unsigned mask, int pred) { + extern __device__ __device_builtin__ int __nvvm_vote_uni_sync(unsigned int mask, int pred); + return __nvvm_vote_uni_sync(mask, pred); +} + +__SM_30_INTRINSICS_DECL__ +unsigned __ballot_sync(unsigned mask, int pred) { + extern __device__ __device_builtin__ unsigned int __nvvm_vote_ballot_sync(unsigned int mask, int pred); + return __nvvm_vote_ballot_sync(mask, pred); +} + +__SM_30_INTRINSICS_DECL__ +unsigned __activemask() { + unsigned ret; + asm volatile ("activemask.b32 %0;" : "=r"(ret)); + return ret; +} + +// These are removed starting with compute_70 and onwards +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 + +__SM_30_INTRINSICS_DECL__ int __shfl(int var, int srcLane, int width) { + int ret; + int c = ((warpSize-width) << 8) | 0x1f; + asm volatile ("shfl.idx.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(var), "r"(srcLane), "r"(c)); + return ret; +} + +__SM_30_INTRINSICS_DECL__ unsigned int __shfl(unsigned int var, int srcLane, int width) { + return (unsigned int) __shfl((int)var, srcLane, width); +} + +__SM_30_INTRINSICS_DECL__ int __shfl_up(int var, unsigned int delta, int width) { + int ret; + int c = (warpSize-width) << 8; + asm volatile ("shfl.up.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(var), "r"(delta), "r"(c)); + return ret; +} + +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_up(unsigned int var, unsigned int delta, int width) { + return (unsigned int) __shfl_up((int)var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ int __shfl_down(int var, unsigned int delta, int width) { + int ret; + int c = ((warpSize-width) << 8) | 0x1f; + asm volatile ("shfl.down.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(var), "r"(delta), "r"(c)); + return ret; +} + +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_down(unsigned int var, unsigned int delta, int width) { + return (unsigned int) __shfl_down((int)var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ int __shfl_xor(int var, int laneMask, int width) { + int ret; + int c = ((warpSize-width) << 8) | 0x1f; + asm volatile ("shfl.bfly.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(var), "r"(laneMask), "r"(c)); + return ret; +} + +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_xor(unsigned int var, int laneMask, int width) { + return (unsigned int) __shfl_xor((int)var, laneMask, width); +} + +__SM_30_INTRINSICS_DECL__ float __shfl(float var, int srcLane, int width) { + float ret; + int c; + c = ((warpSize-width) << 8) | 0x1f; + asm volatile ("shfl.idx.b32 %0, %1, %2, %3;" : "=f"(ret) : "f"(var), "r"(srcLane), "r"(c)); + return ret; +} + +__SM_30_INTRINSICS_DECL__ float __shfl_up(float var, unsigned int delta, int width) { + float ret; + int c; + c = (warpSize-width) << 8; + asm volatile ("shfl.up.b32 %0, %1, %2, %3;" : "=f"(ret) : "f"(var), "r"(delta), "r"(c)); + return ret; +} + +__SM_30_INTRINSICS_DECL__ float __shfl_down(float var, unsigned int delta, int width) { + float ret; + int c; + c = ((warpSize-width) << 8) | 0x1f; + asm volatile ("shfl.down.b32 %0, %1, %2, %3;" : "=f"(ret) : "f"(var), "r"(delta), "r"(c)); + return ret; +} + +__SM_30_INTRINSICS_DECL__ float __shfl_xor(float var, int laneMask, int width) { + float ret; + int c; + c = ((warpSize-width) << 8) | 0x1f; + asm volatile ("shfl.bfly.b32 %0, %1, %2, %3;" : "=f"(ret) : "f"(var), "r"(laneMask), "r"(c)); + return ret; +} + +// 64-bits SHFL + +__SM_30_INTRINSICS_DECL__ long long __shfl(long long var, int srcLane, int width) { + int lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); + hi = __shfl(hi, srcLane, width); + lo = __shfl(lo, srcLane, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl(unsigned long long var, int srcLane, int width) { + return (unsigned long long) __shfl((long long) var, srcLane, width); +} + +__SM_30_INTRINSICS_DECL__ long long __shfl_up(long long var, unsigned int delta, int width) { + int lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); + hi = __shfl_up(hi, delta, width); + lo = __shfl_up(lo, delta, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_up(unsigned long long var, unsigned int delta, int width) { + return (unsigned long long) __shfl_up((long long) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ long long __shfl_down(long long var, unsigned int delta, int width) { + int lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); + hi = __shfl_down(hi, delta, width); + lo = __shfl_down(lo, delta, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_down(unsigned long long var, unsigned int delta, int width) { + return (unsigned long long) __shfl_down((long long) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ long long __shfl_xor(long long var, int laneMask, int width) { + int lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); + hi = __shfl_xor(hi, laneMask, width); + lo = __shfl_xor(lo, laneMask, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_xor(unsigned long long var, int laneMask, int width) { + return (unsigned long long) __shfl_xor((long long) var, laneMask, width); +} + +__SM_30_INTRINSICS_DECL__ double __shfl(double var, int srcLane, int width) { + unsigned lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); + hi = __shfl(hi, srcLane, width); + lo = __shfl(lo, srcLane, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ double __shfl_up(double var, unsigned int delta, int width) { + unsigned lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); + hi = __shfl_up(hi, delta, width); + lo = __shfl_up(lo, delta, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ double __shfl_down(double var, unsigned int delta, int width) { + unsigned lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); + hi = __shfl_down(hi, delta, width); + lo = __shfl_down(lo, delta, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ double __shfl_xor(double var, int laneMask, int width) { + unsigned lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); + hi = __shfl_xor(hi, laneMask, width); + lo = __shfl_xor(lo, laneMask, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ long __shfl(long var, int srcLane, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl((long long) var, srcLane, width) : + __shfl((int) var, srcLane, width); +} + +__SM_30_INTRINSICS_DECL__ unsigned long __shfl(unsigned long var, int srcLane, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl((unsigned long long) var, srcLane, width) : + __shfl((unsigned int) var, srcLane, width); +} + +__SM_30_INTRINSICS_DECL__ long __shfl_up(long var, unsigned int delta, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_up((long long) var, delta, width) : + __shfl_up((int) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_up(unsigned long var, unsigned int delta, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_up((unsigned long long) var, delta, width) : + __shfl_up((unsigned int) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ long __shfl_down(long var, unsigned int delta, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_down((long long) var, delta, width) : + __shfl_down((int) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_down(unsigned long var, unsigned int delta, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_down((unsigned long long) var, delta, width) : + __shfl_down((unsigned int) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ long __shfl_xor(long var, int laneMask, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_xor((long long) var, laneMask, width) : + __shfl_xor((int) var, laneMask, width); +} + +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_xor(unsigned long var, int laneMask, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_xor((unsigned long long) var, laneMask, width) : + __shfl_xor((unsigned int) var, laneMask, width); +} + +#endif /* defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 */ + +// Warp register exchange (shuffle) intrinsics. +// Notes: +// a) Warp size is hardcoded to 32 here, because the compiler does not know +// the "warpSize" constant at this time +// b) we cannot map the float __shfl to the int __shfl because it'll mess with +// the register number (especially if you're doing two shfls to move a double). +__SM_30_INTRINSICS_DECL__ int __shfl_sync(unsigned mask, int var, int srcLane, int width) { + extern __device__ __device_builtin__ unsigned __nvvm_shfl_idx_sync(unsigned mask, unsigned a, unsigned b, unsigned c); + int ret; + int c = ((warpSize-width) << 8) | 0x1f; + ret = __nvvm_shfl_idx_sync(mask, var, srcLane, c); + return ret; +} + +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_sync(unsigned mask, unsigned int var, int srcLane, int width) { + return (unsigned int) __shfl_sync(mask, (int)var, srcLane, width); +} + +__SM_30_INTRINSICS_DECL__ int __shfl_up_sync(unsigned mask, int var, unsigned int delta, int width) { + extern __device__ __device_builtin__ unsigned __nvvm_shfl_up_sync(unsigned mask, unsigned a, unsigned b, unsigned c); + int ret; + int c = (warpSize-width) << 8; + ret = __nvvm_shfl_up_sync(mask, var, delta, c); + return ret; +} + +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_up_sync(unsigned mask, unsigned int var, unsigned int delta, int width) { + return (unsigned int) __shfl_up_sync(mask, (int)var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ int __shfl_down_sync(unsigned mask, int var, unsigned int delta, int width) { + extern __device__ __device_builtin__ unsigned __nvvm_shfl_down_sync(unsigned mask, unsigned a, unsigned b, unsigned c); + int ret; + int c = ((warpSize-width) << 8) | 0x1f; + ret = __nvvm_shfl_down_sync(mask, var, delta, c); + return ret; +} + +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_down_sync(unsigned mask, unsigned int var, unsigned int delta, int width) { + return (unsigned int) __shfl_down_sync(mask, (int)var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ int __shfl_xor_sync(unsigned mask, int var, int laneMask, int width) { + extern __device__ __device_builtin__ unsigned __nvvm_shfl_bfly_sync(unsigned mask, unsigned a, unsigned b, unsigned c); + int ret; + int c = ((warpSize-width) << 8) | 0x1f; + ret = __nvvm_shfl_bfly_sync(mask, var, laneMask, c); + return ret; +} + +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_xor_sync(unsigned mask, unsigned int var, int laneMask, int width) { + return (unsigned int) __shfl_xor_sync(mask, (int)var, laneMask, width); +} + +__SM_30_INTRINSICS_DECL__ float __shfl_sync(unsigned mask, float var, int srcLane, int width) { + extern __device__ __device_builtin__ unsigned __nvvm_shfl_idx_sync(unsigned mask, unsigned a, unsigned b, unsigned c); + int ret; + int c; + c = ((warpSize-width) << 8) | 0x1f; + ret = __nvvm_shfl_idx_sync(mask, __float_as_int(var), srcLane, c); + return __int_as_float(ret); +} + +__SM_30_INTRINSICS_DECL__ float __shfl_up_sync(unsigned mask, float var, unsigned int delta, int width) { + extern __device__ __device_builtin__ unsigned __nvvm_shfl_up_sync(unsigned mask, unsigned a, unsigned b, unsigned c); + int ret; + int c; + c = (warpSize-width) << 8; + ret = __nvvm_shfl_up_sync(mask, __float_as_int(var), delta, c); + return __int_as_float(ret); +} + +__SM_30_INTRINSICS_DECL__ float __shfl_down_sync(unsigned mask, float var, unsigned int delta, int width) { + extern __device__ __device_builtin__ unsigned __nvvm_shfl_down_sync(unsigned mask, unsigned a, unsigned b, unsigned c); + int ret; + int c; + c = ((warpSize-width) << 8) | 0x1f; + ret = __nvvm_shfl_down_sync(mask, __float_as_int(var), delta, c); + return __int_as_float(ret); +} + +__SM_30_INTRINSICS_DECL__ float __shfl_xor_sync(unsigned mask, float var, int laneMask, int width) { + extern __device__ __device_builtin__ unsigned __nvvm_shfl_bfly_sync(unsigned mask, unsigned a, unsigned b, unsigned c); + int ret; + int c; + c = ((warpSize-width) << 8) | 0x1f; + ret = __nvvm_shfl_bfly_sync(mask, __float_as_int(var), laneMask, c); + return __int_as_float(ret); +} + +// 64-bits SHFL +__SM_30_INTRINSICS_DECL__ long long __shfl_sync(unsigned mask, long long var, int srcLane, int width) { + int lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); + hi = __shfl_sync(mask, hi, srcLane, width); + lo = __shfl_sync(mask, lo, srcLane, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_sync(unsigned mask, unsigned long long var, int srcLane, int width) { + return (unsigned long long) __shfl_sync(mask, (long long) var, srcLane, width); +} + +__SM_30_INTRINSICS_DECL__ long long __shfl_up_sync(unsigned mask, long long var, unsigned int delta, int width) { + int lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); + hi = __shfl_up_sync(mask, hi, delta, width); + lo = __shfl_up_sync(mask, lo, delta, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_up_sync(unsigned mask, unsigned long long var, unsigned int delta, int width) { + return (unsigned long long) __shfl_up_sync(mask, (long long) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ long long __shfl_down_sync(unsigned mask, long long var, unsigned int delta, int width) { + int lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); + hi = __shfl_down_sync(mask, hi, delta, width); + lo = __shfl_down_sync(mask, lo, delta, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_down_sync(unsigned mask, unsigned long long var, unsigned int delta, int width) { + return (unsigned long long) __shfl_down_sync(mask, (long long) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ long long __shfl_xor_sync(unsigned mask, long long var, int laneMask, int width) { + int lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); + hi = __shfl_xor_sync(mask, hi, laneMask, width); + lo = __shfl_xor_sync(mask, lo, laneMask, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_xor_sync(unsigned mask, unsigned long long var, int laneMask, int width) { + return (unsigned long long) __shfl_xor_sync(mask, (long long) var, laneMask, width); +} + +__SM_30_INTRINSICS_DECL__ double __shfl_sync(unsigned mask, double var, int srcLane, int width) { + unsigned lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); + hi = __shfl_sync(mask, hi, srcLane, width); + lo = __shfl_sync(mask, lo, srcLane, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ double __shfl_up_sync(unsigned mask, double var, unsigned int delta, int width) { + unsigned lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); + hi = __shfl_up_sync(mask, hi, delta, width); + lo = __shfl_up_sync(mask, lo, delta, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ double __shfl_down_sync(unsigned mask, double var, unsigned int delta, int width) { + unsigned lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); + hi = __shfl_down_sync(mask, hi, delta, width); + lo = __shfl_down_sync(mask, lo, delta, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ double __shfl_xor_sync(unsigned mask, double var, int laneMask, int width) { + unsigned lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); + hi = __shfl_xor_sync(mask, hi, laneMask, width); + lo = __shfl_xor_sync(mask, lo, laneMask, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi)); + return var; +} + +// long needs some help to choose between 32-bits and 64-bits + +__SM_30_INTRINSICS_DECL__ long __shfl_sync(unsigned mask, long var, int srcLane, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_sync(mask, (long long) var, srcLane, width) : + __shfl_sync(mask, (int) var, srcLane, width); +} + +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_sync(unsigned mask, unsigned long var, int srcLane, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_sync(mask, (unsigned long long) var, srcLane, width) : + __shfl_sync(mask, (unsigned int) var, srcLane, width); +} + +__SM_30_INTRINSICS_DECL__ long __shfl_up_sync(unsigned mask, long var, unsigned int delta, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_up_sync(mask, (long long) var, delta, width) : + __shfl_up_sync(mask, (int) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_up_sync(unsigned mask, unsigned long var, unsigned int delta, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_up_sync(mask, (unsigned long long) var, delta, width) : + __shfl_up_sync(mask, (unsigned int) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ long __shfl_down_sync(unsigned mask, long var, unsigned int delta, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_down_sync(mask, (long long) var, delta, width) : + __shfl_down_sync(mask, (int) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_down_sync(unsigned mask, unsigned long var, unsigned int delta, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_down_sync(mask, (unsigned long long) var, delta, width) : + __shfl_down_sync(mask, (unsigned int) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ long __shfl_xor_sync(unsigned mask, long var, int laneMask, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_xor_sync(mask, (long long) var, laneMask, width) : + __shfl_xor_sync(mask, (int) var, laneMask, width); +} + +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_xor_sync(unsigned mask, unsigned long var, int laneMask, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_xor_sync(mask, (unsigned long long) var, laneMask, width) : + __shfl_xor_sync(mask, (unsigned int) var, laneMask, width); +} + +#if defined(__local_warpSize) +#undef warpSize +#undef __local_warpSize +#endif + +#endif /* _NVHPC_CUDA || !__CUDA_ARCH__ || __CUDA_ARCH__ >= 300 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_30_INTRINSICS_DECL__ + +#endif /* !__SM_30_INTRINSICS_HPP__ */ + diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_atomic_functions.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_atomic_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..3865f1f56326c8a1dcfad672a92508684e2fbc32 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_atomic_functions.h @@ -0,0 +1,141 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 35.235 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.35.235 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_32_ATOMIC_FUNCTIONS_H__) +#define __SM_32_ATOMIC_FUNCTIONS_H__ + +#if defined(__CUDACC_RTC__) +#define __SM_32_ATOMIC_FUNCTIONS_DECL__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __SM_32_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +#if defined(_NVHPC_CUDA) +#undef __device_builtin__ +#define __device_builtin__ __location__(device) __location__(host) +#endif /* _NVHPC_CUDA */ + +#ifndef __CUDA_ARCH__ +#define __DEF_IF_HOST { } +#else /* !__CUDA_ARCH__ */ +#define __DEF_IF_HOST ; +#endif /* __CUDA_ARCH__ */ + + +#ifdef __CUDA_ARCH__ +extern "C" +{ +extern __device__ __device_builtin__ long long __illAtomicMin(long long *address, long long val); +extern __device__ __device_builtin__ long long __illAtomicMax(long long *address, long long val); +extern __device__ __device_builtin__ long long __llAtomicAnd(long long *address, long long val); +extern __device__ __device_builtin__ long long __llAtomicOr(long long *address, long long val); +extern __device__ __device_builtin__ long long __llAtomicXor(long long *address, long long val); +extern __device__ __device_builtin__ unsigned long long __ullAtomicMin(unsigned long long *address, unsigned long long val); +extern __device__ __device_builtin__ unsigned long long __ullAtomicMax(unsigned long long *address, unsigned long long val); +extern __device__ __device_builtin__ unsigned long long __ullAtomicAnd(unsigned long long *address, unsigned long long val); +extern __device__ __device_builtin__ unsigned long long __ullAtomicOr (unsigned long long *address, unsigned long long val); +extern __device__ __device_builtin__ unsigned long long __ullAtomicXor(unsigned long long *address, unsigned long long val); +} +#endif /* __CUDA_ARCH__ */ + +#if defined(_NVHPC_CUDA) +#undef __device_builtin__ +#define __device_builtin__ +#endif /* _NVHPC_CUDA */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicMin(long long *address, long long val) __DEF_IF_HOST + +__SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicMax(long long *address, long long val) __DEF_IF_HOST + +__SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicAnd(long long *address, long long val) __DEF_IF_HOST + +__SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicOr(long long *address, long long val) __DEF_IF_HOST + +__SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicXor(long long *address, long long val) __DEF_IF_HOST + +__SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicMin(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicMax(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicAnd(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicOr(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicXor(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 320 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __DEF_IF_HOST +#undef __SM_32_ATOMIC_FUNCTIONS_DECL__ + +#if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__) +#include "sm_32_atomic_functions.hpp" +#endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */ + +#endif /* !__SM_32_ATOMIC_FUNCTIONS_H__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_atomic_functions.hpp b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_atomic_functions.hpp new file mode 100644 index 0000000000000000000000000000000000000000..9b8ff847157cc366361319b7932a8ffe0f1fd30f --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_atomic_functions.hpp @@ -0,0 +1,134 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 35.235 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.35.235 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_32_ATOMIC_FUNCTIONS_HPP__) +#define __SM_32_ATOMIC_FUNCTIONS_HPP__ + +#if defined(__CUDACC_RTC__) +#define __SM_32_ATOMIC_FUNCTIONS_DECL__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __SM_32_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicMin(long long *address, long long val) +{ + return __illAtomicMin(address, val); +} + +__SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicMax(long long *address, long long val) +{ + return __illAtomicMax(address, val); +} + +__SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicAnd(long long *address, long long val) +{ + return __llAtomicAnd(address, val); +} + +__SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicOr(long long *address, long long val) +{ + return __llAtomicOr(address, val); +} + +__SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicXor(long long *address, long long val) +{ + return __llAtomicXor(address, val); +} + +__SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicMin(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicMin(address, val); +} + +__SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicMax(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicMax(address, val); +} + +__SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicAnd(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicAnd(address, val); +} + +__SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicOr(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicOr(address, val); +} + +__SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicXor(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicXor(address, val); +} + +#endif /* _NVHPC_CUDA || !__CUDA_ARCH__ || __CUDA_ARCH__ >= 320 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_32_ATOMIC_FUNCTIONS_DECL__ + +#endif /* !__SM_32_ATOMIC_FUNCTIONS_HPP__ */ + diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_intrinsics.hpp b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_intrinsics.hpp new file mode 100644 index 0000000000000000000000000000000000000000..d50f9cea5c4d89bc555855a8ca73d617bcfa461a --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_intrinsics.hpp @@ -0,0 +1,588 @@ +/* + * Copyright 1993-2020 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_32_INTRINSICS_HPP__) +#define __SM_32_INTRINSICS_HPP__ + +#if defined(__CUDACC_RTC__) +#define __SM_32_INTRINSICS_DECL__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __SM_32_INTRINSICS_DECL__ static __device__ __inline__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +// In here are intrinsics which are built in to the compiler. These may be +// referenced by intrinsic implementations from this file. +extern "C" +{ + // There are no intrinsics built in to the compiler for SM-3.5, + // all intrinsics are now implemented as inline PTX below. +} + +/******************************************************************************* +* * +* Below are implementations of SM-3.5 intrinsics which are included as * +* source (instead of being built in to the compiler) * +* * +*******************************************************************************/ + +// LDG is a "load from global via texture path" command which can exhibit higher +// bandwidth on GK110 than a regular LD. +// Define a different pointer storage size for 64 and 32 bit +#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__) +#define __LDG_PTR "l" +#else +#define __LDG_PTR "r" +#endif + +/****************************************************************************** + * __ldg * + ******************************************************************************/ + +// Size of long is architecture and OS specific. +#if defined(__LP64__) // 64 bits +__SM_32_INTRINSICS_DECL__ long __ldg(const long *ptr) { unsigned long ret; asm volatile ("ld.global.nc.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long)ret; } +__SM_32_INTRINSICS_DECL__ unsigned long __ldg(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.nc.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; } +#else // 32 bits +__SM_32_INTRINSICS_DECL__ long __ldg(const long *ptr) { unsigned long ret; asm volatile ("ld.global.nc.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (long)ret; } +__SM_32_INTRINSICS_DECL__ unsigned long __ldg(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.nc.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; } +#endif + + +__SM_32_INTRINSICS_DECL__ char __ldg(const char *ptr) { unsigned int ret; asm volatile ("ld.global.nc.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (char)ret; } +__SM_32_INTRINSICS_DECL__ signed char __ldg(const signed char *ptr) { unsigned int ret; asm volatile ("ld.global.nc.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (signed char)ret; } +__SM_32_INTRINSICS_DECL__ short __ldg(const short *ptr) { unsigned short ret; asm volatile ("ld.global.nc.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return (short)ret; } +__SM_32_INTRINSICS_DECL__ int __ldg(const int *ptr) { unsigned int ret; asm volatile ("ld.global.nc.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (int)ret; } +__SM_32_INTRINSICS_DECL__ long long __ldg(const long long *ptr) { unsigned long long ret; asm volatile ("ld.global.nc.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long long)ret; } +__SM_32_INTRINSICS_DECL__ char2 __ldg(const char2 *ptr) { char2 ret; int2 tmp; asm volatile ("ld.global.nc.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; } +__SM_32_INTRINSICS_DECL__ char4 __ldg(const char4 *ptr) { char4 ret; int4 tmp; asm volatile ("ld.global.nc.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; } +__SM_32_INTRINSICS_DECL__ short2 __ldg(const short2 *ptr) { short2 ret; asm volatile ("ld.global.nc.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ short4 __ldg(const short4 *ptr) { short4 ret; asm volatile ("ld.global.nc.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ int2 __ldg(const int2 *ptr) { int2 ret; asm volatile ("ld.global.nc.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ int4 __ldg(const int4 *ptr) { int4 ret; asm volatile ("ld.global.nc.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ longlong2 __ldg(const longlong2 *ptr) { longlong2 ret; asm volatile ("ld.global.nc.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; } + +__SM_32_INTRINSICS_DECL__ unsigned char __ldg(const unsigned char *ptr) { unsigned int ret; asm volatile ("ld.global.nc.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (unsigned char)ret; } +__SM_32_INTRINSICS_DECL__ unsigned short __ldg(const unsigned short *ptr) { unsigned short ret; asm volatile ("ld.global.nc.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ unsigned int __ldg(const unsigned int *ptr) { unsigned int ret; asm volatile ("ld.global.nc.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ unsigned long long __ldg(const unsigned long long *ptr) { unsigned long long ret; asm volatile ("ld.global.nc.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ uchar2 __ldg(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm volatile ("ld.global.nc.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; } +__SM_32_INTRINSICS_DECL__ uchar4 __ldg(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm volatile ("ld.global.nc.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; } +__SM_32_INTRINSICS_DECL__ ushort2 __ldg(const ushort2 *ptr) { ushort2 ret; asm volatile ("ld.global.nc.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ ushort4 __ldg(const ushort4 *ptr) { ushort4 ret; asm volatile ("ld.global.nc.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ uint2 __ldg(const uint2 *ptr) { uint2 ret; asm volatile ("ld.global.nc.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ uint4 __ldg(const uint4 *ptr) { uint4 ret; asm volatile ("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ ulonglong2 __ldg(const ulonglong2 *ptr) { ulonglong2 ret; asm volatile ("ld.global.nc.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; } + +__SM_32_INTRINSICS_DECL__ float __ldg(const float *ptr) { float ret; asm volatile ("ld.global.nc.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ double __ldg(const double *ptr) { double ret; asm volatile ("ld.global.nc.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ float2 __ldg(const float2 *ptr) { float2 ret; asm volatile ("ld.global.nc.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ float4 __ldg(const float4 *ptr) { float4 ret; asm volatile ("ld.global.nc.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ double2 __ldg(const double2 *ptr) { double2 ret; asm volatile ("ld.global.nc.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr)); return ret; } + + +/****************************************************************************** + * __ldcg * + ******************************************************************************/ + +// Size of long is architecture and OS specific. +#if defined(__LP64__) // 64 bits +__SM_32_INTRINSICS_DECL__ long __ldcg(const long *ptr) { unsigned long ret; asm volatile ("ld.global.cg.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long)ret; } +__SM_32_INTRINSICS_DECL__ unsigned long __ldcg(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.cg.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; } +#else // 32 bits +__SM_32_INTRINSICS_DECL__ long __ldcg(const long *ptr) { unsigned long ret; asm volatile ("ld.global.cg.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (long)ret; } +__SM_32_INTRINSICS_DECL__ unsigned long __ldcg(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.cg.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; } +#endif + + +__SM_32_INTRINSICS_DECL__ char __ldcg(const char *ptr) { unsigned int ret; asm volatile ("ld.global.cg.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (char)ret; } +__SM_32_INTRINSICS_DECL__ signed char __ldcg(const signed char *ptr) { unsigned int ret; asm volatile ("ld.global.cg.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (signed char)ret; } +__SM_32_INTRINSICS_DECL__ short __ldcg(const short *ptr) { unsigned short ret; asm volatile ("ld.global.cg.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return (short)ret; } +__SM_32_INTRINSICS_DECL__ int __ldcg(const int *ptr) { unsigned int ret; asm volatile ("ld.global.cg.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (int)ret; } +__SM_32_INTRINSICS_DECL__ long long __ldcg(const long long *ptr) { unsigned long long ret; asm volatile ("ld.global.cg.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long long)ret; } +__SM_32_INTRINSICS_DECL__ char2 __ldcg(const char2 *ptr) { char2 ret; int2 tmp; asm volatile ("ld.global.cg.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; } +__SM_32_INTRINSICS_DECL__ char4 __ldcg(const char4 *ptr) { char4 ret; int4 tmp; asm volatile ("ld.global.cg.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; } +__SM_32_INTRINSICS_DECL__ short2 __ldcg(const short2 *ptr) { short2 ret; asm volatile ("ld.global.cg.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ short4 __ldcg(const short4 *ptr) { short4 ret; asm volatile ("ld.global.cg.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ int2 __ldcg(const int2 *ptr) { int2 ret; asm volatile ("ld.global.cg.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ int4 __ldcg(const int4 *ptr) { int4 ret; asm volatile ("ld.global.cg.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ longlong2 __ldcg(const longlong2 *ptr) { longlong2 ret; asm volatile ("ld.global.cg.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; } + +__SM_32_INTRINSICS_DECL__ unsigned char __ldcg(const unsigned char *ptr) { unsigned int ret; asm volatile ("ld.global.cg.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (unsigned char)ret; } +__SM_32_INTRINSICS_DECL__ unsigned short __ldcg(const unsigned short *ptr) { unsigned short ret; asm volatile ("ld.global.cg.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ unsigned int __ldcg(const unsigned int *ptr) { unsigned int ret; asm volatile ("ld.global.cg.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ unsigned long long __ldcg(const unsigned long long *ptr) { unsigned long long ret; asm volatile ("ld.global.cg.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ uchar2 __ldcg(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm volatile ("ld.global.cg.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; } +__SM_32_INTRINSICS_DECL__ uchar4 __ldcg(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm volatile ("ld.global.cg.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; } +__SM_32_INTRINSICS_DECL__ ushort2 __ldcg(const ushort2 *ptr) { ushort2 ret; asm volatile ("ld.global.cg.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ ushort4 __ldcg(const ushort4 *ptr) { ushort4 ret; asm volatile ("ld.global.cg.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ uint2 __ldcg(const uint2 *ptr) { uint2 ret; asm volatile ("ld.global.cg.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ uint4 __ldcg(const uint4 *ptr) { uint4 ret; asm volatile ("ld.global.cg.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ ulonglong2 __ldcg(const ulonglong2 *ptr) { ulonglong2 ret; asm volatile ("ld.global.cg.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; } + +__SM_32_INTRINSICS_DECL__ float __ldcg(const float *ptr) { float ret; asm volatile ("ld.global.cg.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ double __ldcg(const double *ptr) { double ret; asm volatile ("ld.global.cg.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ float2 __ldcg(const float2 *ptr) { float2 ret; asm volatile ("ld.global.cg.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ float4 __ldcg(const float4 *ptr) { float4 ret; asm volatile ("ld.global.cg.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ double2 __ldcg(const double2 *ptr) { double2 ret; asm volatile ("ld.global.cg.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr)); return ret; } + +/****************************************************************************** + * __ldca * + ******************************************************************************/ + +// Size of long is architecture and OS specific. +#if defined(__LP64__) // 64 bits +__SM_32_INTRINSICS_DECL__ long __ldca(const long *ptr) { unsigned long ret; asm volatile ("ld.global.ca.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long)ret; } +__SM_32_INTRINSICS_DECL__ unsigned long __ldca(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.ca.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; } +#else // 32 bits +__SM_32_INTRINSICS_DECL__ long __ldca(const long *ptr) { unsigned long ret; asm volatile ("ld.global.ca.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (long)ret; } +__SM_32_INTRINSICS_DECL__ unsigned long __ldca(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.ca.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; } +#endif + + +__SM_32_INTRINSICS_DECL__ char __ldca(const char *ptr) { unsigned int ret; asm volatile ("ld.global.ca.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (char)ret; } +__SM_32_INTRINSICS_DECL__ signed char __ldca(const signed char *ptr) { unsigned int ret; asm volatile ("ld.global.ca.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (signed char)ret; } +__SM_32_INTRINSICS_DECL__ short __ldca(const short *ptr) { unsigned short ret; asm volatile ("ld.global.ca.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return (short)ret; } +__SM_32_INTRINSICS_DECL__ int __ldca(const int *ptr) { unsigned int ret; asm volatile ("ld.global.ca.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (int)ret; } +__SM_32_INTRINSICS_DECL__ long long __ldca(const long long *ptr) { unsigned long long ret; asm volatile ("ld.global.ca.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long long)ret; } +__SM_32_INTRINSICS_DECL__ char2 __ldca(const char2 *ptr) { char2 ret; int2 tmp; asm volatile ("ld.global.ca.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; } +__SM_32_INTRINSICS_DECL__ char4 __ldca(const char4 *ptr) { char4 ret; int4 tmp; asm volatile ("ld.global.ca.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; } +__SM_32_INTRINSICS_DECL__ short2 __ldca(const short2 *ptr) { short2 ret; asm volatile ("ld.global.ca.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ short4 __ldca(const short4 *ptr) { short4 ret; asm volatile ("ld.global.ca.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ int2 __ldca(const int2 *ptr) { int2 ret; asm volatile ("ld.global.ca.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ int4 __ldca(const int4 *ptr) { int4 ret; asm volatile ("ld.global.ca.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ longlong2 __ldca(const longlong2 *ptr) { longlong2 ret; asm volatile ("ld.global.ca.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; } + +__SM_32_INTRINSICS_DECL__ unsigned char __ldca(const unsigned char *ptr) { unsigned int ret; asm volatile ("ld.global.ca.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (unsigned char)ret; } +__SM_32_INTRINSICS_DECL__ unsigned short __ldca(const unsigned short *ptr) { unsigned short ret; asm volatile ("ld.global.ca.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ unsigned int __ldca(const unsigned int *ptr) { unsigned int ret; asm volatile ("ld.global.ca.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ unsigned long long __ldca(const unsigned long long *ptr) { unsigned long long ret; asm volatile ("ld.global.ca.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ uchar2 __ldca(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm volatile ("ld.global.ca.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; } +__SM_32_INTRINSICS_DECL__ uchar4 __ldca(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm volatile ("ld.global.ca.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; } +__SM_32_INTRINSICS_DECL__ ushort2 __ldca(const ushort2 *ptr) { ushort2 ret; asm volatile ("ld.global.ca.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ ushort4 __ldca(const ushort4 *ptr) { ushort4 ret; asm volatile ("ld.global.ca.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ uint2 __ldca(const uint2 *ptr) { uint2 ret; asm volatile ("ld.global.ca.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ uint4 __ldca(const uint4 *ptr) { uint4 ret; asm volatile ("ld.global.ca.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ ulonglong2 __ldca(const ulonglong2 *ptr) { ulonglong2 ret; asm volatile ("ld.global.ca.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; } + +__SM_32_INTRINSICS_DECL__ float __ldca(const float *ptr) { float ret; asm volatile ("ld.global.ca.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ double __ldca(const double *ptr) { double ret; asm volatile ("ld.global.ca.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ float2 __ldca(const float2 *ptr) { float2 ret; asm volatile ("ld.global.ca.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ float4 __ldca(const float4 *ptr) { float4 ret; asm volatile ("ld.global.ca.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ double2 __ldca(const double2 *ptr) { double2 ret; asm volatile ("ld.global.ca.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr)); return ret; } + +/****************************************************************************** + * __ldcs * + ******************************************************************************/ + +// Size of long is architecture and OS specific. +#if defined(__LP64__) // 64 bits +__SM_32_INTRINSICS_DECL__ long __ldcs(const long *ptr) { unsigned long ret; asm volatile ("ld.global.cs.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long)ret; } +__SM_32_INTRINSICS_DECL__ unsigned long __ldcs(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.cs.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; } +#else // 32 bits +__SM_32_INTRINSICS_DECL__ long __ldcs(const long *ptr) { unsigned long ret; asm volatile ("ld.global.cs.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (long)ret; } +__SM_32_INTRINSICS_DECL__ unsigned long __ldcs(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.cs.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; } +#endif + + +__SM_32_INTRINSICS_DECL__ char __ldcs(const char *ptr) { unsigned int ret; asm volatile ("ld.global.cs.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (char)ret; } +__SM_32_INTRINSICS_DECL__ signed char __ldcs(const signed char *ptr) { unsigned int ret; asm volatile ("ld.global.cs.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (signed char)ret; } +__SM_32_INTRINSICS_DECL__ short __ldcs(const short *ptr) { unsigned short ret; asm volatile ("ld.global.cs.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return (short)ret; } +__SM_32_INTRINSICS_DECL__ int __ldcs(const int *ptr) { unsigned int ret; asm volatile ("ld.global.cs.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (int)ret; } +__SM_32_INTRINSICS_DECL__ long long __ldcs(const long long *ptr) { unsigned long long ret; asm volatile ("ld.global.cs.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long long)ret; } +__SM_32_INTRINSICS_DECL__ char2 __ldcs(const char2 *ptr) { char2 ret; int2 tmp; asm volatile ("ld.global.cs.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; } +__SM_32_INTRINSICS_DECL__ char4 __ldcs(const char4 *ptr) { char4 ret; int4 tmp; asm volatile ("ld.global.cs.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; } +__SM_32_INTRINSICS_DECL__ short2 __ldcs(const short2 *ptr) { short2 ret; asm volatile ("ld.global.cs.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ short4 __ldcs(const short4 *ptr) { short4 ret; asm volatile ("ld.global.cs.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ int2 __ldcs(const int2 *ptr) { int2 ret; asm volatile ("ld.global.cs.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ int4 __ldcs(const int4 *ptr) { int4 ret; asm volatile ("ld.global.cs.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ longlong2 __ldcs(const longlong2 *ptr) { longlong2 ret; asm volatile ("ld.global.cs.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; } + +__SM_32_INTRINSICS_DECL__ unsigned char __ldcs(const unsigned char *ptr) { unsigned int ret; asm volatile ("ld.global.cs.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (unsigned char)ret; } +__SM_32_INTRINSICS_DECL__ unsigned short __ldcs(const unsigned short *ptr) { unsigned short ret; asm volatile ("ld.global.cs.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ unsigned int __ldcs(const unsigned int *ptr) { unsigned int ret; asm volatile ("ld.global.cs.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ unsigned long long __ldcs(const unsigned long long *ptr) { unsigned long long ret; asm volatile ("ld.global.cs.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ uchar2 __ldcs(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm volatile ("ld.global.cs.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; } +__SM_32_INTRINSICS_DECL__ uchar4 __ldcs(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm volatile ("ld.global.cs.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; } +__SM_32_INTRINSICS_DECL__ ushort2 __ldcs(const ushort2 *ptr) { ushort2 ret; asm volatile ("ld.global.cs.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ ushort4 __ldcs(const ushort4 *ptr) { ushort4 ret; asm volatile ("ld.global.cs.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ uint2 __ldcs(const uint2 *ptr) { uint2 ret; asm volatile ("ld.global.cs.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ uint4 __ldcs(const uint4 *ptr) { uint4 ret; asm volatile ("ld.global.cs.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ ulonglong2 __ldcs(const ulonglong2 *ptr) { ulonglong2 ret; asm volatile ("ld.global.cs.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; } + +__SM_32_INTRINSICS_DECL__ float __ldcs(const float *ptr) { float ret; asm volatile ("ld.global.cs.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ double __ldcs(const double *ptr) { double ret; asm volatile ("ld.global.cs.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ float2 __ldcs(const float2 *ptr) { float2 ret; asm volatile ("ld.global.cs.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ float4 __ldcs(const float4 *ptr) { float4 ret; asm volatile ("ld.global.cs.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ double2 __ldcs(const double2 *ptr) { double2 ret; asm volatile ("ld.global.cs.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr)); return ret; } + +/****************************************************************************** + * __ldlu * + ******************************************************************************/ + +// Size of long is architecture and OS specific. +#if defined(__LP64__) // 64 bits +__SM_32_INTRINSICS_DECL__ long __ldlu(const long *ptr) { unsigned long ret; asm ("ld.global.lu.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return (long)ret; } +__SM_32_INTRINSICS_DECL__ unsigned long __ldlu(const unsigned long *ptr) { unsigned long ret; asm ("ld.global.lu.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +#else // 32 bits +__SM_32_INTRINSICS_DECL__ long __ldlu(const long *ptr) { unsigned long ret; asm ("ld.global.lu.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (long)ret; } +__SM_32_INTRINSICS_DECL__ unsigned long __ldlu(const unsigned long *ptr) { unsigned long ret; asm ("ld.global.lu.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +#endif + + +__SM_32_INTRINSICS_DECL__ char __ldlu(const char *ptr) { unsigned int ret; asm ("ld.global.lu.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (char)ret; } +__SM_32_INTRINSICS_DECL__ signed char __ldlu(const signed char *ptr) { unsigned int ret; asm ("ld.global.lu.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (signed char)ret; } +__SM_32_INTRINSICS_DECL__ short __ldlu(const short *ptr) { unsigned short ret; asm ("ld.global.lu.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr) : "memory"); return (short)ret; } +__SM_32_INTRINSICS_DECL__ int __ldlu(const int *ptr) { unsigned int ret; asm ("ld.global.lu.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (int)ret; } +__SM_32_INTRINSICS_DECL__ long long __ldlu(const long long *ptr) { unsigned long long ret; asm ("ld.global.lu.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return (long long)ret; } +__SM_32_INTRINSICS_DECL__ char2 __ldlu(const char2 *ptr) { char2 ret; int2 tmp; asm ("ld.global.lu.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr) : "memory"); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; } +__SM_32_INTRINSICS_DECL__ char4 __ldlu(const char4 *ptr) { char4 ret; int4 tmp; asm ("ld.global.lu.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr) : "memory"); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; } +__SM_32_INTRINSICS_DECL__ short2 __ldlu(const short2 *ptr) { short2 ret; asm ("ld.global.lu.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ short4 __ldlu(const short4 *ptr) { short4 ret; asm ("ld.global.lu.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ int2 __ldlu(const int2 *ptr) { int2 ret; asm ("ld.global.lu.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ int4 __ldlu(const int4 *ptr) { int4 ret; asm ("ld.global.lu.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ longlong2 __ldlu(const longlong2 *ptr) { longlong2 ret; asm ("ld.global.lu.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } + +__SM_32_INTRINSICS_DECL__ unsigned char __ldlu(const unsigned char *ptr) { unsigned int ret; asm ("ld.global.lu.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (unsigned char)ret; } +__SM_32_INTRINSICS_DECL__ unsigned short __ldlu(const unsigned short *ptr) { unsigned short ret; asm ("ld.global.lu.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ unsigned int __ldlu(const unsigned int *ptr) { unsigned int ret; asm ("ld.global.lu.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ unsigned long long __ldlu(const unsigned long long *ptr) { unsigned long long ret; asm ("ld.global.lu.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ uchar2 __ldlu(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm ("ld.global.lu.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr) : "memory"); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; } +__SM_32_INTRINSICS_DECL__ uchar4 __ldlu(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm ("ld.global.lu.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr) : "memory"); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; } +__SM_32_INTRINSICS_DECL__ ushort2 __ldlu(const ushort2 *ptr) { ushort2 ret; asm ("ld.global.lu.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ ushort4 __ldlu(const ushort4 *ptr) { ushort4 ret; asm ("ld.global.lu.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ uint2 __ldlu(const uint2 *ptr) { uint2 ret; asm ("ld.global.lu.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ uint4 __ldlu(const uint4 *ptr) { uint4 ret; asm ("ld.global.lu.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ ulonglong2 __ldlu(const ulonglong2 *ptr) { ulonglong2 ret; asm ("ld.global.lu.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } + +__SM_32_INTRINSICS_DECL__ float __ldlu(const float *ptr) { float ret; asm ("ld.global.lu.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ double __ldlu(const double *ptr) { double ret; asm ("ld.global.lu.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ float2 __ldlu(const float2 *ptr) { float2 ret; asm ("ld.global.lu.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ float4 __ldlu(const float4 *ptr) { float4 ret; asm ("ld.global.lu.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ double2 __ldlu(const double2 *ptr) { double2 ret; asm ("ld.global.lu.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } + +/****************************************************************************** + * __ldcv * + ******************************************************************************/ + +// Size of long is architecture and OS specific. +#if defined(__LP64__) // 64 bits +__SM_32_INTRINSICS_DECL__ long __ldcv(const long *ptr) { unsigned long ret; asm ("ld.global.cv.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return (long)ret; } +__SM_32_INTRINSICS_DECL__ unsigned long __ldcv(const unsigned long *ptr) { unsigned long ret; asm ("ld.global.cv.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +#else // 32 bits +__SM_32_INTRINSICS_DECL__ long __ldcv(const long *ptr) { unsigned long ret; asm ("ld.global.cv.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (long)ret; } +__SM_32_INTRINSICS_DECL__ unsigned long __ldcv(const unsigned long *ptr) { unsigned long ret; asm ("ld.global.cv.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +#endif + + +__SM_32_INTRINSICS_DECL__ char __ldcv(const char *ptr) { unsigned int ret; asm ("ld.global.cv.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (char)ret; } +__SM_32_INTRINSICS_DECL__ signed char __ldcv(const signed char *ptr) { unsigned int ret; asm ("ld.global.cv.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (signed char)ret; } +__SM_32_INTRINSICS_DECL__ short __ldcv(const short *ptr) { unsigned short ret; asm ("ld.global.cv.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr) : "memory"); return (short)ret; } +__SM_32_INTRINSICS_DECL__ int __ldcv(const int *ptr) { unsigned int ret; asm ("ld.global.cv.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (int)ret; } +__SM_32_INTRINSICS_DECL__ long long __ldcv(const long long *ptr) { unsigned long long ret; asm ("ld.global.cv.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return (long long)ret; } +__SM_32_INTRINSICS_DECL__ char2 __ldcv(const char2 *ptr) { char2 ret; int2 tmp; asm ("ld.global.cv.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr) : "memory"); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; } +__SM_32_INTRINSICS_DECL__ char4 __ldcv(const char4 *ptr) { char4 ret; int4 tmp; asm ("ld.global.cv.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr) : "memory"); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; } +__SM_32_INTRINSICS_DECL__ short2 __ldcv(const short2 *ptr) { short2 ret; asm ("ld.global.cv.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ short4 __ldcv(const short4 *ptr) { short4 ret; asm ("ld.global.cv.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ int2 __ldcv(const int2 *ptr) { int2 ret; asm ("ld.global.cv.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ int4 __ldcv(const int4 *ptr) { int4 ret; asm ("ld.global.cv.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ longlong2 __ldcv(const longlong2 *ptr) { longlong2 ret; asm ("ld.global.cv.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } + +__SM_32_INTRINSICS_DECL__ unsigned char __ldcv(const unsigned char *ptr) { unsigned int ret; asm ("ld.global.cv.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (unsigned char)ret; } +__SM_32_INTRINSICS_DECL__ unsigned short __ldcv(const unsigned short *ptr) { unsigned short ret; asm ("ld.global.cv.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ unsigned int __ldcv(const unsigned int *ptr) { unsigned int ret; asm ("ld.global.cv.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ unsigned long long __ldcv(const unsigned long long *ptr) { unsigned long long ret; asm ("ld.global.cv.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ uchar2 __ldcv(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm ("ld.global.cv.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr) : "memory"); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; } +__SM_32_INTRINSICS_DECL__ uchar4 __ldcv(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm ("ld.global.cv.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr) : "memory"); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; } +__SM_32_INTRINSICS_DECL__ ushort2 __ldcv(const ushort2 *ptr) { ushort2 ret; asm ("ld.global.cv.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ ushort4 __ldcv(const ushort4 *ptr) { ushort4 ret; asm ("ld.global.cv.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ uint2 __ldcv(const uint2 *ptr) { uint2 ret; asm ("ld.global.cv.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ uint4 __ldcv(const uint4 *ptr) { uint4 ret; asm ("ld.global.cv.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ ulonglong2 __ldcv(const ulonglong2 *ptr) { ulonglong2 ret; asm ("ld.global.cv.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } + +__SM_32_INTRINSICS_DECL__ float __ldcv(const float *ptr) { float ret; asm ("ld.global.cv.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ double __ldcv(const double *ptr) { double ret; asm ("ld.global.cv.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ float2 __ldcv(const float2 *ptr) { float2 ret; asm ("ld.global.cv.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ float4 __ldcv(const float4 *ptr) { float4 ret; asm ("ld.global.cv.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ double2 __ldcv(const double2 *ptr) { double2 ret; asm ("ld.global.cv.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } + +/****************************************************************************** + * __stwb * + ******************************************************************************/ + +// Size of long is architecture and OS specific. +#if defined(__LP64__) // 64 bits +__SM_32_INTRINSICS_DECL__ void __stwb(long *ptr, long value) { asm ("st.global.wb.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(unsigned long *ptr, unsigned long value) { asm ("st.global.wb.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +#else // 32 bits +__SM_32_INTRINSICS_DECL__ void __stwb(long *ptr, long value) { asm ("st.global.wb.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(unsigned long *ptr, unsigned long value) { asm ("st.global.wb.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +#endif + + +__SM_32_INTRINSICS_DECL__ void __stwb(char *ptr, char value) { asm ("st.global.wb.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(signed char *ptr, signed char value) { asm ("st.global.wb.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(short *ptr, short value) { asm ("st.global.wb.s16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(int *ptr, int value) { asm ("st.global.wb.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(long long *ptr, long long value) { asm ("st.global.wb.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(char2 *ptr, char2 value) { const int x = value.x, y = value.y; asm ("st.global.wb.v2.s8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(char4 *ptr, char4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.wb.v4.s8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(short2 *ptr, short2 value) { asm ("st.global.wb.v2.s16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(short4 *ptr, short4 value) { asm ("st.global.wb.v4.s16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(int2 *ptr, int2 value) { asm ("st.global.wb.v2.s32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(int4 *ptr, int4 value) { asm ("st.global.wb.v4.s32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(longlong2 *ptr, longlong2 value) { asm ("st.global.wb.v2.s64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); } + +__SM_32_INTRINSICS_DECL__ void __stwb(unsigned char *ptr, unsigned char value) { asm ("st.global.wb.u8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(unsigned short *ptr, unsigned short value) { asm ("st.global.wb.u16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(unsigned int *ptr, unsigned int value) { asm ("st.global.wb.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(unsigned long long *ptr, unsigned long long value) { asm ("st.global.wb.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(uchar2 *ptr, uchar2 value) { const int x = value.x, y = value.y; asm ("st.global.wb.v2.u8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(uchar4 *ptr, uchar4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.wb.v4.u8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(ushort2 *ptr, ushort2 value) { asm ("st.global.wb.v2.u16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(ushort4 *ptr, ushort4 value) { asm ("st.global.wb.v4.u16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(uint2 *ptr, uint2 value) { asm ("st.global.wb.v2.u32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(uint4 *ptr, uint4 value) { asm ("st.global.wb.v4.u32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(ulonglong2 *ptr, ulonglong2 value) { asm ("st.global.wb.v2.u64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); } + +__SM_32_INTRINSICS_DECL__ void __stwb(float *ptr, float value) { asm ("st.global.wb.f32 [%0], %1;" :: __LDG_PTR (ptr), "f"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(double *ptr, double value) { asm ("st.global.wb.f64 [%0], %1;" :: __LDG_PTR (ptr), "d"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(float2 *ptr, float2 value) { asm ("st.global.wb.v2.f32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(float4 *ptr, float4 value) { asm ("st.global.wb.v4.f32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y), "f"(value.z), "f"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(double2 *ptr, double2 value) { asm ("st.global.wb.v2.f64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "d"(value.x), "d"(value.y) : "memory"); } + +/****************************************************************************** + * __stcg * + ******************************************************************************/ + +// Size of long is architecture and OS specific. +#if defined(__LP64__) // 64 bits +__SM_32_INTRINSICS_DECL__ void __stcg(long *ptr, long value) { asm ("st.global.cg.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(unsigned long *ptr, unsigned long value) { asm ("st.global.cg.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +#else // 32 bits +__SM_32_INTRINSICS_DECL__ void __stcg(long *ptr, long value) { asm ("st.global.cg.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(unsigned long *ptr, unsigned long value) { asm ("st.global.cg.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +#endif + + +__SM_32_INTRINSICS_DECL__ void __stcg(char *ptr, char value) { asm ("st.global.cg.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(signed char *ptr, signed char value) { asm ("st.global.cg.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(short *ptr, short value) { asm ("st.global.cg.s16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(int *ptr, int value) { asm ("st.global.cg.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(long long *ptr, long long value) { asm ("st.global.cg.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(char2 *ptr, char2 value) { const int x = value.x, y = value.y; asm ("st.global.cg.v2.s8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(char4 *ptr, char4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.cg.v4.s8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(short2 *ptr, short2 value) { asm ("st.global.cg.v2.s16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(short4 *ptr, short4 value) { asm ("st.global.cg.v4.s16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(int2 *ptr, int2 value) { asm ("st.global.cg.v2.s32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(int4 *ptr, int4 value) { asm ("st.global.cg.v4.s32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(longlong2 *ptr, longlong2 value) { asm ("st.global.cg.v2.s64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); } + +__SM_32_INTRINSICS_DECL__ void __stcg(unsigned char *ptr, unsigned char value) { asm ("st.global.cg.u8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(unsigned short *ptr, unsigned short value) { asm ("st.global.cg.u16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(unsigned int *ptr, unsigned int value) { asm ("st.global.cg.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(unsigned long long *ptr, unsigned long long value) { asm ("st.global.cg.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(uchar2 *ptr, uchar2 value) { const int x = value.x, y = value.y; asm ("st.global.cg.v2.u8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(uchar4 *ptr, uchar4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.cg.v4.u8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(ushort2 *ptr, ushort2 value) { asm ("st.global.cg.v2.u16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(ushort4 *ptr, ushort4 value) { asm ("st.global.cg.v4.u16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(uint2 *ptr, uint2 value) { asm ("st.global.cg.v2.u32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(uint4 *ptr, uint4 value) { asm ("st.global.cg.v4.u32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(ulonglong2 *ptr, ulonglong2 value) { asm ("st.global.cg.v2.u64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); } + +__SM_32_INTRINSICS_DECL__ void __stcg(float *ptr, float value) { asm ("st.global.cg.f32 [%0], %1;" :: __LDG_PTR (ptr), "f"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(double *ptr, double value) { asm ("st.global.cg.f64 [%0], %1;" :: __LDG_PTR (ptr), "d"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(float2 *ptr, float2 value) { asm ("st.global.cg.v2.f32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(float4 *ptr, float4 value) { asm ("st.global.cg.v4.f32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y), "f"(value.z), "f"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(double2 *ptr, double2 value) { asm ("st.global.cg.v2.f64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "d"(value.x), "d"(value.y) : "memory"); } + +/****************************************************************************** + * __stcs * + ******************************************************************************/ + +// Size of long is architecture and OS specific. +#if defined(__LP64__) // 64 bits +__SM_32_INTRINSICS_DECL__ void __stcs(long *ptr, long value) { asm ("st.global.cs.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(unsigned long *ptr, unsigned long value) { asm ("st.global.cs.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +#else // 32 bits +__SM_32_INTRINSICS_DECL__ void __stcs(long *ptr, long value) { asm ("st.global.cs.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(unsigned long *ptr, unsigned long value) { asm ("st.global.cs.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +#endif + + +__SM_32_INTRINSICS_DECL__ void __stcs(char *ptr, char value) { asm ("st.global.cs.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(signed char *ptr, signed char value) { asm ("st.global.cs.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(short *ptr, short value) { asm ("st.global.cs.s16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(int *ptr, int value) { asm ("st.global.cs.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(long long *ptr, long long value) { asm ("st.global.cs.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(char2 *ptr, char2 value) { const int x = value.x, y = value.y; asm ("st.global.cs.v2.s8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(char4 *ptr, char4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.cs.v4.s8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(short2 *ptr, short2 value) { asm ("st.global.cs.v2.s16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(short4 *ptr, short4 value) { asm ("st.global.cs.v4.s16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(int2 *ptr, int2 value) { asm ("st.global.cs.v2.s32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(int4 *ptr, int4 value) { asm ("st.global.cs.v4.s32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(longlong2 *ptr, longlong2 value) { asm ("st.global.cs.v2.s64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); } + +__SM_32_INTRINSICS_DECL__ void __stcs(unsigned char *ptr, unsigned char value) { asm ("st.global.cs.u8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(unsigned short *ptr, unsigned short value) { asm ("st.global.cs.u16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(unsigned int *ptr, unsigned int value) { asm ("st.global.cs.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(unsigned long long *ptr, unsigned long long value) { asm ("st.global.cs.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(uchar2 *ptr, uchar2 value) { const int x = value.x, y = value.y; asm ("st.global.cs.v2.u8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(uchar4 *ptr, uchar4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.cs.v4.u8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(ushort2 *ptr, ushort2 value) { asm ("st.global.cs.v2.u16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(ushort4 *ptr, ushort4 value) { asm ("st.global.cs.v4.u16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(uint2 *ptr, uint2 value) { asm ("st.global.cs.v2.u32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(uint4 *ptr, uint4 value) { asm ("st.global.cs.v4.u32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(ulonglong2 *ptr, ulonglong2 value) { asm ("st.global.cs.v2.u64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); } + +__SM_32_INTRINSICS_DECL__ void __stcs(float *ptr, float value) { asm ("st.global.cs.f32 [%0], %1;" :: __LDG_PTR (ptr), "f"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(double *ptr, double value) { asm ("st.global.cs.f64 [%0], %1;" :: __LDG_PTR (ptr), "d"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(float2 *ptr, float2 value) { asm ("st.global.cs.v2.f32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(float4 *ptr, float4 value) { asm ("st.global.cs.v4.f32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y), "f"(value.z), "f"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(double2 *ptr, double2 value) { asm ("st.global.cs.v2.f64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "d"(value.x), "d"(value.y) : "memory"); } + +/****************************************************************************** + * __stwt * + ******************************************************************************/ + +// Size of long is architecture and OS specific. +#if defined(__LP64__) // 64 bits +__SM_32_INTRINSICS_DECL__ void __stwt(long *ptr, long value) { asm ("st.global.wt.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(unsigned long *ptr, unsigned long value) { asm ("st.global.wt.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +#else // 32 bits +__SM_32_INTRINSICS_DECL__ void __stwt(long *ptr, long value) { asm ("st.global.wt.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(unsigned long *ptr, unsigned long value) { asm ("st.global.wt.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +#endif + + +__SM_32_INTRINSICS_DECL__ void __stwt(char *ptr, char value) { asm ("st.global.wt.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(signed char *ptr, signed char value) { asm ("st.global.wt.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(short *ptr, short value) { asm ("st.global.wt.s16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(int *ptr, int value) { asm ("st.global.wt.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(long long *ptr, long long value) { asm ("st.global.wt.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(char2 *ptr, char2 value) { const int x = value.x, y = value.y; asm ("st.global.wt.v2.s8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(char4 *ptr, char4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.wt.v4.s8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(short2 *ptr, short2 value) { asm ("st.global.wt.v2.s16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(short4 *ptr, short4 value) { asm ("st.global.wt.v4.s16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(int2 *ptr, int2 value) { asm ("st.global.wt.v2.s32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(int4 *ptr, int4 value) { asm ("st.global.wt.v4.s32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(longlong2 *ptr, longlong2 value) { asm ("st.global.wt.v2.s64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); } + +__SM_32_INTRINSICS_DECL__ void __stwt(unsigned char *ptr, unsigned char value) { asm ("st.global.wt.u8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(unsigned short *ptr, unsigned short value) { asm ("st.global.wt.u16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(unsigned int *ptr, unsigned int value) { asm ("st.global.wt.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(unsigned long long *ptr, unsigned long long value) { asm ("st.global.wt.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(uchar2 *ptr, uchar2 value) { const int x = value.x, y = value.y; asm ("st.global.wt.v2.u8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(uchar4 *ptr, uchar4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.wt.v4.u8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(ushort2 *ptr, ushort2 value) { asm ("st.global.wt.v2.u16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(ushort4 *ptr, ushort4 value) { asm ("st.global.wt.v4.u16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(uint2 *ptr, uint2 value) { asm ("st.global.wt.v2.u32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(uint4 *ptr, uint4 value) { asm ("st.global.wt.v4.u32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(ulonglong2 *ptr, ulonglong2 value) { asm ("st.global.wt.v2.u64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); } + +__SM_32_INTRINSICS_DECL__ void __stwt(float *ptr, float value) { asm ("st.global.wt.f32 [%0], %1;" :: __LDG_PTR (ptr), "f"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(double *ptr, double value) { asm ("st.global.wt.f64 [%0], %1;" :: __LDG_PTR (ptr), "d"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(float2 *ptr, float2 value) { asm ("st.global.wt.v2.f32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(float4 *ptr, float4 value) { asm ("st.global.wt.v4.f32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y), "f"(value.z), "f"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(double2 *ptr, double2 value) { asm ("st.global.wt.v2.f64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "d"(value.x), "d"(value.y) : "memory"); } + +#undef __LDG_PTR + + +// SHF is the "funnel shift" operation - an accelerated left/right shift with carry +// operating on 64-bit quantities, which are concatenations of two 32-bit registers. + +// This shifts [b:a] left by "shift" bits, returning the most significant bits of the result. +__SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_l(unsigned int lo, unsigned int hi, unsigned int shift) +{ + unsigned int ret; + asm volatile ("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(lo), "r"(hi), "r"(shift)); + return ret; +} +__SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_lc(unsigned int lo, unsigned int hi, unsigned int shift) +{ + unsigned int ret; + asm volatile ("shf.l.clamp.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(lo), "r"(hi), "r"(shift)); + return ret; +} + +// This shifts [b:a] right by "shift" bits, returning the least significant bits of the result. +__SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_r(unsigned int lo, unsigned int hi, unsigned int shift) +{ + unsigned int ret; + asm volatile ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(lo), "r"(hi), "r"(shift)); + return ret; +} +__SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_rc(unsigned int lo, unsigned int hi, unsigned int shift) +{ + unsigned int ret; + asm volatile ("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(lo), "r"(hi), "r"(shift)); + return ret; +} + + +#endif /* _NVHPC_CUDA || !__CUDA_ARCH__ || __CUDA_ARCH__ >= 320 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_32_INTRINSICS_DECL__ + +#endif /* !__SM_32_INTRINSICS_HPP__ */ + diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_35_atomic_functions.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_35_atomic_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..c8961079aeac4c9e73a7c2825cf9ea10b171af09 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_35_atomic_functions.h @@ -0,0 +1,58 @@ +/* + * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 35.235 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.35.235 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_35_ATOMIC_FUNCTIONS_H__) +#define __SM_35_ATOMIC_FUNCTIONS_H__ + +/******************************************************************************* +* All sm_35 atomics are supported by sm_32 so simply include its header file * +*******************************************************************************/ +#include "sm_32_atomic_functions.h" + +#endif /* !__SM_35_ATOMIC_FUNCTIONS_H__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_35_intrinsics.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_35_intrinsics.h new file mode 100644 index 0000000000000000000000000000000000000000..da1e823a24171ed1ca9414955c6c68159a4411f5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_35_intrinsics.h @@ -0,0 +1,116 @@ +/* + + * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. + + * + + * NOTICE TO LICENSEE: + + * + + * This source code and/or documentation ("Licensed Deliverables") are + + * subject to NVIDIA intellectual property rights under U.S. and + + * international Copyright laws. + + * + + * These Licensed Deliverables contained herein is PROPRIETARY and + + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + + * conditions of a form of NVIDIA software license agreement by and + + * between NVIDIA and Licensee ("License Agreement") or electronically + + * accepted by Licensee. Notwithstanding any terms or conditions to + + * the contrary in the License Agreement, reproduction or disclosure + + * of the Licensed Deliverables to any third party without the express + + * written consent of NVIDIA is prohibited. + + * + + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + + * OF THESE LICENSED DELIVERABLES. + + * + + * U.S. Government End Users. These Licensed Deliverables are a + + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + + * 1995), consisting of "commercial computer software" and "commercial + + * computer software documentation" as such terms are used in 48 + + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + + * U.S. Government End Users acquire the Licensed Deliverables with + + * only those rights set forth herein. + + * + + * Any use of the Licensed Deliverables in individual and commercial + + * software must include, in the user documentation and internal + + * comments to the code, the above Disclaimer and U.S. Government End + + * Users Notice. + + */ + + + +#if !defined(__SM_35_INTRINSICS_H__) + +#define __SM_35_INTRINSICS_H__ + + + +/********************************************************************************** + +* All sm_35 intrinsics are supported by sm_32 so simply include its header file * + +**********************************************************************************/ + +#include "sm_32_intrinsics.h" + + + +#endif /* !__SM_35_INTRINSICS_H__ */ + diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..f2f6da7032f66f3988868ad769d7533d15def5a8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.h @@ -0,0 +1,543 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_60_ATOMIC_FUNCTIONS_H__) +#define __SM_60_ATOMIC_FUNCTIONS_H__ + + +#if defined(__CUDACC_RTC__) +#define __SM_60_ATOMIC_FUNCTIONS_DECL__ __device__ +#elif defined(_NVHPC_CUDA) +#define __SM_60_ATOMIC_FUNCTIONS_DECL__ extern __device__ __cudart_builtin__ +#else /* __CUDACC_RTC__ */ +#define __SM_60_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/* Add !defined(_NVHPC_CUDA) to avoid empty function definition in CUDA + * C++ compiler where the macro __CUDA_ARCH__ is not defined. */ +#if !defined(__CUDA_ARCH__) && !defined(_NVHPC_CUDA) +#define __DEF_IF_HOST { } +#else /* !__CUDA_ARCH__ */ +#define __DEF_IF_HOST ; +#endif /* __CUDA_ARCH__ */ + + + +#ifdef __CUDA_ARCH__ +extern "C" +{ +extern __device__ __device_builtin__ double __dAtomicAdd(double *address, double val); + +extern __device__ __device_builtin__ +int __iAtomicAdd_block(int *address, int val); + +extern __device__ __device_builtin__ +int __iAtomicAdd_system(int *address, int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicAdd_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicAdd_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicAdd_block(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicAdd_system(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +float __fAtomicAdd_block(float *address, float val); + +extern __device__ __device_builtin__ +float __fAtomicAdd_system(float *address, float val); + +extern __device__ __device_builtin__ +double __dAtomicAdd_block(double *address, double val); + +extern __device__ __device_builtin__ +double __dAtomicAdd_system(double *address, double val); + +extern __device__ __device_builtin__ +int __iAtomicExch_block(int *address, int val); + +extern __device__ __device_builtin__ +int __iAtomicExch_system(int *address, int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicExch_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicExch_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicExch_block(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicExch_system(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +float __fAtomicExch_block(float *address, float val); + +extern __device__ __device_builtin__ +float __fAtomicExch_system(float *address, float val); + +extern __device__ __device_builtin__ +int __iAtomicMin_block(int *address, int val); + +extern __device__ __device_builtin__ +int __iAtomicMin_system(int *address, int val); + +extern __device__ __device_builtin__ +long long __illAtomicMin_block(long long *address, long long val); + +extern __device__ __device_builtin__ +long long __illAtomicMin_system(long long *address, long long val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicMin_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicMin_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicMin_block(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicMin_system(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +int __iAtomicMax_block(int *address, int val); + +extern __device__ __device_builtin__ +int __iAtomicMax_system(int *address, int val); + +extern __device__ __device_builtin__ +long long __illAtomicMax_block(long long *address, long long val); + +extern __device__ __device_builtin__ +long long __illAtomicMax_system(long long *address, long long val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicMax_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicMax_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicMax_block(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicMax_system(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicInc_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicInc_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicDec_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicDec_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +int __iAtomicCAS_block(int *address, int compare, int val); + +extern __device__ __device_builtin__ +int __iAtomicCAS_system(int *address, int compare, int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicCAS_block(unsigned int *address, unsigned int compare, + unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicCAS_system(unsigned int *address, unsigned int compare, + unsigned int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicCAS_block(unsigned long long int *address, + unsigned long long int compare, + unsigned long long int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicCAS_system(unsigned long long int *address, + unsigned long long int compare, + unsigned long long int val); + +extern __device__ __device_builtin__ +int __iAtomicAnd_block(int *address, int val); + +extern __device__ __device_builtin__ +int __iAtomicAnd_system(int *address, int val); + +extern __device__ __device_builtin__ +long long __llAtomicAnd_block(long long *address, long long val); + +extern __device__ __device_builtin__ +long long __llAtomicAnd_system(long long *address, long long val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicAnd_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicAnd_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicAnd_block(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicAnd_system(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +int __iAtomicOr_block(int *address, int val); + +extern __device__ __device_builtin__ +int __iAtomicOr_system(int *address, int val); + +extern __device__ __device_builtin__ +long long __llAtomicOr_block(long long *address, long long val); + +extern __device__ __device_builtin__ +long long __llAtomicOr_system(long long *address, long long val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicOr_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicOr_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicOr_block(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicOr_system(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +int __iAtomicXor_block(int *address, int val); + +extern __device__ __device_builtin__ +int __iAtomicXor_system(int *address, int val); + +extern __device__ __device_builtin__ +long long __llAtomicXor_block(long long *address, long long val); + +extern __device__ __device_builtin__ +long long __llAtomicXor_system(long long *address, long long val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicXor_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicXor_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicXor_block(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicXor_system(unsigned long long *address, unsigned long long val); +} +#endif /* __CUDA_ARCH__ */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__SM_60_ATOMIC_FUNCTIONS_DECL__ double atomicAdd(double *address, double val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicAdd_block(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicAdd_system(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicAdd_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicAdd_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicAdd_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicAdd_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +float atomicAdd_block(float *address, float val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +float atomicAdd_system(float *address, float val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +double atomicAdd_block(double *address, double val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +double atomicAdd_system(double *address, double val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicSub_block(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicSub_system(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicSub_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicSub_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicExch_block(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicExch_system(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicExch_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicExch_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicExch_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicExch_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +float atomicExch_block(float *address, float val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +float atomicExch_system(float *address, float val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicMin_block(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicMin_system(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicMin_block(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicMin_system(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicMin_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicMin_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicMin_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicMin_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicMax_block(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicMax_system(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicMax_block(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicMax_system(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicMax_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicMax_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicMax_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicMax_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicInc_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicInc_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicDec_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicDec_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicCAS_block(int *address, int compare, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicCAS_system(int *address, int compare, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicCAS_block(unsigned int *address, unsigned int compare, + unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicCAS_system(unsigned int *address, unsigned int compare, + unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long int atomicCAS_block(unsigned long long int *address, + unsigned long long int compare, + unsigned long long int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long int atomicCAS_system(unsigned long long int *address, + unsigned long long int compare, + unsigned long long int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicAnd_block(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicAnd_system(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicAnd_block(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicAnd_system(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicAnd_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicAnd_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicAnd_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicAnd_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicOr_block(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicOr_system(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicOr_block(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicOr_system(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicOr_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicOr_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicOr_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicOr_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicXor_block(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicXor_system(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicXor_block(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicXor_system(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicXor_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicXor_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicXor_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicXor_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 600 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_60_ATOMIC_FUNCTIONS_DECL__ +#undef __DEF_IF_HOST + +#if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__) +#include "sm_60_atomic_functions.hpp" +#endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */ + +#endif /* !__SM_60_ATOMIC_FUNCTIONS_H__ */ + diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.hpp b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.hpp new file mode 100644 index 0000000000000000000000000000000000000000..858b373238a4d87f8d5fc669bf4145f4f2a6e877 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.hpp @@ -0,0 +1,527 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_60_ATOMIC_FUNCTIONS_HPP__) +#define __SM_60_ATOMIC_FUNCTIONS_HPP__ + +#if defined(__CUDACC_RTC__) +#define __SM_60_ATOMIC_FUNCTIONS_DECL__ __device__ +#else /* __CUDACC_RTC__ */ +#define __SM_60_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__SM_60_ATOMIC_FUNCTIONS_DECL__ double atomicAdd(double *address, double val) +{ + return __dAtomicAdd(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicAdd_block(int *address, int val) +{ + return __iAtomicAdd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicAdd_system(int *address, int val) +{ + return __iAtomicAdd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicAdd_block(unsigned int *address, unsigned int val) +{ + return __uAtomicAdd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicAdd_system(unsigned int *address, unsigned int val) +{ + return __uAtomicAdd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicAdd_block(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicAdd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicAdd_system(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicAdd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +float atomicAdd_block(float *address, float val) +{ + return __fAtomicAdd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +float atomicAdd_system(float *address, float val) +{ + return __fAtomicAdd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +double atomicAdd_block(double *address, double val) +{ + return __dAtomicAdd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +double atomicAdd_system(double *address, double val) +{ + return __dAtomicAdd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicSub_block(int *address, int val) +{ + return __iAtomicAdd_block(address, (unsigned int)-(int)val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicSub_system(int *address, int val) +{ + return __iAtomicAdd_system(address, (unsigned int)-(int)val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicSub_block(unsigned int *address, unsigned int val) +{ + return __uAtomicAdd_block(address, (unsigned int)-(int)val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicSub_system(unsigned int *address, unsigned int val) +{ + return __uAtomicAdd_system(address, (unsigned int)-(int)val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicExch_block(int *address, int val) +{ + return __iAtomicExch_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicExch_system(int *address, int val) +{ + return __iAtomicExch_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicExch_block(unsigned int *address, unsigned int val) +{ + return __uAtomicExch_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicExch_system(unsigned int *address, unsigned int val) +{ + return __uAtomicExch_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicExch_block(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicExch_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicExch_system(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicExch_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +float atomicExch_block(float *address, float val) +{ + return __fAtomicExch_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +float atomicExch_system(float *address, float val) +{ + return __fAtomicExch_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicMin_block(int *address, int val) +{ + return __iAtomicMin_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicMin_system(int *address, int val) +{ + return __iAtomicMin_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicMin_block(long long *address, long long val) +{ + return __illAtomicMin_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicMin_system(long long *address, long long val) +{ + return __illAtomicMin_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicMin_block(unsigned int *address, unsigned int val) +{ + return __uAtomicMin_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicMin_system(unsigned int *address, unsigned int val) +{ + return __uAtomicMin_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicMin_block(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicMin_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicMin_system(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicMin_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicMax_block(int *address, int val) +{ + return __iAtomicMax_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicMax_system(int *address, int val) +{ + return __iAtomicMax_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicMax_block(long long *address, long long val) +{ + return __illAtomicMax_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicMax_system(long long *address, long long val) +{ + return __illAtomicMax_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicMax_block(unsigned int *address, unsigned int val) +{ + return __uAtomicMax_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicMax_system(unsigned int *address, unsigned int val) +{ + return __uAtomicMax_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicMax_block(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicMax_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicMax_system(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicMax_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicInc_block(unsigned int *address, unsigned int val) +{ + return __uAtomicInc_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicInc_system(unsigned int *address, unsigned int val) +{ + return __uAtomicInc_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicDec_block(unsigned int *address, unsigned int val) +{ + return __uAtomicDec_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicDec_system(unsigned int *address, unsigned int val) +{ + return __uAtomicDec_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicCAS_block(int *address, int compare, int val) +{ + return __iAtomicCAS_block(address, compare, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicCAS_system(int *address, int compare, int val) +{ + return __iAtomicCAS_system(address, compare, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicCAS_block(unsigned int *address, unsigned int compare, + unsigned int val) +{ + return __uAtomicCAS_block(address, compare, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicCAS_system(unsigned int *address, unsigned int compare, + unsigned int val) +{ + return __uAtomicCAS_system(address, compare, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long int atomicCAS_block(unsigned long long int *address, + unsigned long long int compare, + unsigned long long int val) +{ + return __ullAtomicCAS_block(address, compare, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long int atomicCAS_system(unsigned long long int *address, + unsigned long long int compare, + unsigned long long int val) +{ + return __ullAtomicCAS_system(address, compare, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicAnd_block(int *address, int val) +{ + return __iAtomicAnd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicAnd_system(int *address, int val) +{ + return __iAtomicAnd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicAnd_block(long long *address, long long val) +{ + return __llAtomicAnd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicAnd_system(long long *address, long long val) +{ + return __llAtomicAnd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicAnd_block(unsigned int *address, unsigned int val) +{ + return __uAtomicAnd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicAnd_system(unsigned int *address, unsigned int val) +{ + return __uAtomicAnd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicAnd_block(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicAnd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicAnd_system(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicAnd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicOr_block(int *address, int val) +{ + return __iAtomicOr_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicOr_system(int *address, int val) +{ + return __iAtomicOr_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicOr_block(long long *address, long long val) +{ + return __llAtomicOr_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicOr_system(long long *address, long long val) +{ + return __llAtomicOr_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicOr_block(unsigned int *address, unsigned int val) +{ + return __uAtomicOr_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicOr_system(unsigned int *address, unsigned int val) +{ + return __uAtomicOr_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicOr_block(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicOr_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicOr_system(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicOr_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicXor_block(int *address, int val) +{ + return __iAtomicXor_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicXor_system(int *address, int val) +{ + return __iAtomicXor_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicXor_block(long long *address, long long val) +{ + return __llAtomicXor_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicXor_system(long long *address, long long val) +{ + return __llAtomicXor_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicXor_block(unsigned int *address, unsigned int val) +{ + return __uAtomicXor_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicXor_system(unsigned int *address, unsigned int val) +{ + return __uAtomicXor_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicXor_block(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicXor_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicXor_system(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicXor_system(address, val); +} + +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 600 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_60_ATOMIC_FUNCTIONS_DECL__ + +#endif /* !__SM_60_ATOMIC_FUNCTIONS_HPP__ */ + diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_61_intrinsics.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_61_intrinsics.h new file mode 100644 index 0000000000000000000000000000000000000000..ff78f1a7e559190773d1be284727b335a2317e1e --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_61_intrinsics.h @@ -0,0 +1,123 @@ +/* + * Copyright 2016 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_61_INTRINSICS_H__) +#define __SM_61_INTRINSICS_H__ + +#if defined(__CUDACC_RTC__) +#define __SM_61_INTRINSICS_DECL__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __SM_61_INTRINSICS_DECL__ static __device__ __inline__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 610 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +#ifndef __CUDA_ARCH__ +#define __DEF_IF_HOST { } +#else /* !__CUDA_ARCH__ */ +#define __DEF_IF_HOST ; +#endif /* __CUDA_ARCH__ */ + +/******************************************************************************* +* * +* Below are declarations of SM-6.1 intrinsics which are included as * +* source (instead of being built in to the compiler) * +* * +*******************************************************************************/ + + +/****************************************************************************** + * __dp2a * + ******************************************************************************/ +// Generic [_lo] +__SM_61_INTRINSICS_DECL__ int __dp2a_lo(int srcA, int srcB, int c) __DEF_IF_HOST +__SM_61_INTRINSICS_DECL__ unsigned int __dp2a_lo(unsigned int srcA, unsigned int srcB, unsigned int c) __DEF_IF_HOST +// Vector-style [_lo] +__SM_61_INTRINSICS_DECL__ int __dp2a_lo(short2 srcA, char4 srcB, int c) __DEF_IF_HOST +__SM_61_INTRINSICS_DECL__ unsigned int __dp2a_lo(ushort2 srcA, uchar4 srcB, unsigned int c) __DEF_IF_HOST +// Generic [_hi] +__SM_61_INTRINSICS_DECL__ int __dp2a_hi(int srcA, int srcB, int c) __DEF_IF_HOST +__SM_61_INTRINSICS_DECL__ unsigned int __dp2a_hi(unsigned int srcA, unsigned int srcB, unsigned int c) __DEF_IF_HOST +// Vector-style [_hi] +__SM_61_INTRINSICS_DECL__ int __dp2a_hi(short2 srcA, char4 srcB, int c) __DEF_IF_HOST +__SM_61_INTRINSICS_DECL__ unsigned int __dp2a_hi(ushort2 srcA, uchar4 srcB, unsigned int c) __DEF_IF_HOST + + +/****************************************************************************** + * __dp4a * + ******************************************************************************/ +// Generic +__SM_61_INTRINSICS_DECL__ int __dp4a(int srcA, int srcB, int c) __DEF_IF_HOST +__SM_61_INTRINSICS_DECL__ unsigned int __dp4a(unsigned int srcA, unsigned int srcB, unsigned int c) __DEF_IF_HOST +// Vector-style +__SM_61_INTRINSICS_DECL__ int __dp4a(char4 srcA, char4 srcB, int c) __DEF_IF_HOST +__SM_61_INTRINSICS_DECL__ unsigned int __dp4a(uchar4 srcA, uchar4 srcB, unsigned int c) __DEF_IF_HOST + +#endif /* _NVHPC_CUDA || !__CUDA_ARCH__ || __CUDA_ARCH__ >= 610 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __DEF_IF_HOST +#undef __SM_61_INTRINSICS_DECL__ + +#if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__) +#include "sm_61_intrinsics.hpp" +#endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */ + +#endif /* !__SM_61_INTRINSICS_H__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_61_intrinsics.hpp b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_61_intrinsics.hpp new file mode 100644 index 0000000000000000000000000000000000000000..5a561384b08a65445eed86bfc96a0694e5b9190c --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_61_intrinsics.hpp @@ -0,0 +1,161 @@ +/* + * Copyright 2016 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_61_INTRINSICS_HPP__) +#define __SM_61_INTRINSICS_HPP__ + +#if defined(__CUDACC_RTC__) +#define __SM_61_INTRINSICS_DECL__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __SM_61_INTRINSICS_DECL__ static __device__ __inline__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 610 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/******************************************************************************* +* * +* Below are implementations of SM-6.1 intrinsics which are included as * +* source (instead of being built in to the compiler) * +* * +*******************************************************************************/ + +// 4a +__SM_61_INTRINSICS_DECL__ int __dp4a(int srcA, int srcB, int c) { + int ret; + asm volatile ("dp4a.s32.s32 %0, %1, %2, %3;" : "=r"(ret) : "r"(srcA), "r"(srcB), "r"(c)); + return ret; +} + +__SM_61_INTRINSICS_DECL__ unsigned int __dp4a(unsigned int srcA, unsigned int srcB, unsigned int c) { + unsigned int ret; + asm volatile ("dp4a.u32.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(srcA), "r"(srcB), "r"(c)); + return ret; +} + +__SM_61_INTRINSICS_DECL__ int __dp4a(char4 srcA, char4 srcB, int c) { + int ret; + asm volatile ("dp4a.s32.s32 %0, %1, %2, %3;" : "=r"(ret) : "r"(*(int *)&srcA), "r"(*(int *)&srcB), "r"(c)); + return ret; +} + +__SM_61_INTRINSICS_DECL__ unsigned int __dp4a(uchar4 srcA, uchar4 srcB, unsigned int c) { + unsigned int ret; + asm volatile ("dp4a.u32.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(*(unsigned int *)&srcA), "r"(*(unsigned int *)&srcB), "r"(c)); + return ret; +} + +// 2a.lo +__SM_61_INTRINSICS_DECL__ int __dp2a_lo(int srcA, int srcB, int c) { + int ret; + asm volatile ("dp2a.lo.s32.s32 %0, %1, %2, %3;" : "=r"(ret) : "r"(srcA), "r"(srcB), "r"(c)); + return ret; +} + +__SM_61_INTRINSICS_DECL__ unsigned int __dp2a_lo(unsigned int srcA, unsigned int srcB, unsigned int c) { + unsigned int ret; + asm volatile ("dp2a.lo.u32.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(srcA), "r"(srcB), "r"(c)); + return ret; +} + +__SM_61_INTRINSICS_DECL__ int __dp2a_lo(short2 srcA, char4 srcB, int c) { + int ret; + asm volatile ("dp2a.lo.s32.s32 %0, %1, %2, %3;" : "=r"(ret) : "r"(*(int *)&srcA), "r"(*(int *)&srcB), "r"(c)); + return ret; +} + +__SM_61_INTRINSICS_DECL__ unsigned int __dp2a_lo(ushort2 srcA, uchar4 srcB, unsigned int c) { + unsigned int ret; + asm volatile ("dp2a.lo.u32.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(*(unsigned int *)&srcA), "r"(*(unsigned int *)&srcB), "r"(c)); + return ret; +} + +// 2a.hi +__SM_61_INTRINSICS_DECL__ int __dp2a_hi(int srcA, int srcB, int c) { + int ret; + asm volatile ("dp2a.hi.s32.s32 %0, %1, %2, %3;" : "=r"(ret) : "r"(srcA), "r"(srcB), "r"(c)); + return ret; +} + +__SM_61_INTRINSICS_DECL__ unsigned int __dp2a_hi(unsigned int srcA, unsigned int srcB, unsigned int c) { + unsigned int ret; + asm volatile ("dp2a.hi.u32.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(srcA), "r"(srcB), "r"(c)); + return ret; +} + +__SM_61_INTRINSICS_DECL__ int __dp2a_hi(short2 srcA, char4 srcB, int c) { + int ret; + asm volatile ("dp2a.hi.s32.s32 %0, %1, %2, %3;" : "=r"(ret) : "r"(*(int *)&srcA), "r"(*(int *)&srcB), "r"(c)); + return ret; +} + +__SM_61_INTRINSICS_DECL__ unsigned int __dp2a_hi(ushort2 srcA, uchar4 srcB, unsigned int c) { + unsigned int ret; + asm volatile ("dp2a.hi.u32.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(*(unsigned int *)&srcA), "r"(*(unsigned int *)&srcB), "r"(c)); + return ret; +} + + +#endif /* _NVHPC_CUDA || !__CUDA_ARCH__ || __CUDA_ARCH__ >= 610 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_61_INTRINSICS_DECL__ + +#endif /* !__SM_61_INTRINSICS_HPP__ */ + diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/surface_functions.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/surface_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..2fb940c1d2bd5ee7b4a5020e12297bc2927e0386 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/surface_functions.h @@ -0,0 +1,124 @@ +/* + * Copyright 1993-2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SURFACE_FUNCTIONS_H__) +#define __SURFACE_FUNCTIONS_H__ + + +#if defined(__cplusplus) && defined(__CUDACC__) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" +#include "cuda_surface_types.h" + +#if defined(_WIN32) +# define __DEPRECATED__ __declspec(deprecated) +#else +# define __DEPRECATED__ __attribute__((deprecated)) +#endif + +template struct __nv_surf_trait { typedef void * cast_type; }; + +template<> struct __nv_surf_trait { typedef char * cast_type; }; +template<> struct __nv_surf_trait { typedef signed char * cast_type; }; +template<> struct __nv_surf_trait { typedef unsigned char * cast_type; }; +template<> struct __nv_surf_trait { typedef char1 * cast_type; }; +template<> struct __nv_surf_trait { typedef uchar1 * cast_type; }; +template<> struct __nv_surf_trait { typedef char2 * cast_type; }; +template<> struct __nv_surf_trait { typedef uchar2 * cast_type; }; +template<> struct __nv_surf_trait { typedef char4 * cast_type; }; +template<> struct __nv_surf_trait { typedef uchar4 * cast_type; }; +template<> struct __nv_surf_trait { typedef short * cast_type; }; +template<> struct __nv_surf_trait { typedef unsigned short * cast_type; }; +template<> struct __nv_surf_trait { typedef short1 * cast_type; }; +template<> struct __nv_surf_trait { typedef ushort1 * cast_type; }; +template<> struct __nv_surf_trait { typedef short2 * cast_type; }; +template<> struct __nv_surf_trait { typedef ushort2 * cast_type; }; +template<> struct __nv_surf_trait { typedef short4 * cast_type; }; +template<> struct __nv_surf_trait { typedef ushort4 * cast_type; }; +template<> struct __nv_surf_trait { typedef int * cast_type; }; +template<> struct __nv_surf_trait { typedef unsigned int * cast_type; }; +template<> struct __nv_surf_trait { typedef int1 * cast_type; }; +template<> struct __nv_surf_trait { typedef uint1 * cast_type; }; +template<> struct __nv_surf_trait { typedef int2 * cast_type; }; +template<> struct __nv_surf_trait { typedef uint2 * cast_type; }; +template<> struct __nv_surf_trait { typedef int4 * cast_type; }; +template<> struct __nv_surf_trait { typedef uint4 * cast_type; }; +template<> struct __nv_surf_trait { typedef long long * cast_type; }; +template<> struct __nv_surf_trait { typedef unsigned long long * cast_type; }; +template<> struct __nv_surf_trait { typedef longlong1 * cast_type; }; +template<> struct __nv_surf_trait { typedef ulonglong1 * cast_type; }; +template<> struct __nv_surf_trait { typedef longlong2 * cast_type; }; +template<> struct __nv_surf_trait { typedef ulonglong2 * cast_type; }; +#if !defined(__LP64__) +template<> struct __nv_surf_trait { typedef int * cast_type; }; +template<> struct __nv_surf_trait { typedef unsigned int * cast_type; }; +template<> struct __nv_surf_trait { typedef int1 * cast_type; }; +template<> struct __nv_surf_trait { typedef uint1 * cast_type; }; +template<> struct __nv_surf_trait { typedef int2 * cast_type; }; +template<> struct __nv_surf_trait { typedef uint2 * cast_type; }; +template<> struct __nv_surf_trait { typedef uint4 * cast_type; }; +template<> struct __nv_surf_trait { typedef int4 * cast_type; }; +#endif +template<> struct __nv_surf_trait { typedef float * cast_type; }; +template<> struct __nv_surf_trait { typedef float1 * cast_type; }; +template<> struct __nv_surf_trait { typedef float2 * cast_type; }; +template<> struct __nv_surf_trait { typedef float4 * cast_type; }; + + +#undef __DEPRECATED__ + + +#endif /* __cplusplus && __CUDACC__ */ +#endif /* !__SURFACE_FUNCTIONS_H__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/surface_indirect_functions.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/surface_indirect_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..a93faf052f98d81f8cb65bd9591d08ec90c994d9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/surface_indirect_functions.h @@ -0,0 +1,243 @@ +/* + * Copyright 1993-2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + + +#ifndef __SURFACE_INDIRECT_FUNCTIONS_H__ +#define __SURFACE_INDIRECT_FUNCTIONS_H__ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#include "cuda_runtime_api.h" + +template struct __nv_isurf_trait { }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; + +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; + +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; + + +template +static __device__ typename __nv_isurf_trait::type surf1Dread(T *ptr, cudaSurfaceObject_t obj, int x, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurf1Dread", ptr, obj, x, mode); +} + +template +static __device__ T surf1Dread(cudaSurfaceObject_t surfObject, int x, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap) +{ + T ret; + surf1Dread(&ret, surfObject, x, boundaryMode); + return ret; +} + +template +static __device__ typename __nv_isurf_trait::type surf2Dread(T *ptr, cudaSurfaceObject_t obj, int x, int y, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurf2Dread", ptr, obj, x, y, mode); +} + +template +static __device__ T surf2Dread(cudaSurfaceObject_t surfObject, int x, int y, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap) +{ + T ret; + surf2Dread(&ret, surfObject, x, y, boundaryMode); + return ret; +} + + +template +static __device__ typename __nv_isurf_trait::type surf3Dread(T *ptr, cudaSurfaceObject_t obj, int x, int y, int z, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurf3Dread", ptr, obj, x, y, z, mode); +} + +template +static __device__ T surf3Dread(cudaSurfaceObject_t surfObject, int x, int y, int z, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap) +{ + T ret; + surf3Dread(&ret, surfObject, x, y, z, boundaryMode); + return ret; +} + +template +static __device__ typename __nv_isurf_trait::type surf1DLayeredread(T *ptr, cudaSurfaceObject_t obj, int x, int layer, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurf1DLayeredread", ptr, obj, x, layer, mode); +} + +template +static __device__ T surf1DLayeredread(cudaSurfaceObject_t surfObject, int x, int layer, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap) +{ + T ret; + surf1DLayeredread(&ret, surfObject, x, layer, boundaryMode); + return ret; +} + +template +static __device__ typename __nv_isurf_trait::type surf2DLayeredread(T *ptr, cudaSurfaceObject_t obj, int x, int y, int layer, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurf2DLayeredread", ptr, obj, x, y, layer, mode); +} + +template +static __device__ T surf2DLayeredread(cudaSurfaceObject_t surfObject, int x, int y, int layer, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap) +{ + T ret; + surf2DLayeredread(&ret, surfObject, x, y, layer, boundaryMode); + return ret; +} + +template +static __device__ typename __nv_isurf_trait::type surfCubemapread(T *ptr, cudaSurfaceObject_t obj, int x, int y, int face, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurfCubemapread", ptr, obj, x, y, face, mode); +} + +template +static __device__ T surfCubemapread(cudaSurfaceObject_t surfObject, int x, int y, int face, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap) +{ + T ret; + surfCubemapread(&ret, surfObject, x, y, face, boundaryMode); + return ret; +} + +template +static __device__ typename __nv_isurf_trait::type surfCubemapLayeredread(T *ptr, cudaSurfaceObject_t obj, int x, int y, int layerface, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurfCubemapLayeredread", ptr, obj, x, y, layerface, mode); +} + +template +static __device__ T surfCubemapLayeredread(cudaSurfaceObject_t surfObject, int x, int y, int layerface, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap) +{ + T ret; + surfCubemapLayeredread(&ret, surfObject, x, y, layerface, boundaryMode); + return ret; +} + +template +static __device__ typename __nv_isurf_trait::type surf1Dwrite(T val, cudaSurfaceObject_t obj, int x, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurf1Dwrite_v2", &val, obj, x, mode); +} + +template +static __device__ typename __nv_isurf_trait::type surf2Dwrite(T val, cudaSurfaceObject_t obj, int x, int y, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurf2Dwrite_v2", &val, obj, x, y, mode); +} + +template +static __device__ typename __nv_isurf_trait::type surf3Dwrite(T val, cudaSurfaceObject_t obj, int x, int y, int z, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurf3Dwrite_v2", &val, obj, x, y, z, mode); +} + +template +static __device__ typename __nv_isurf_trait::type surf1DLayeredwrite(T val, cudaSurfaceObject_t obj, int x, int layer, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurf1DLayeredwrite_v2", &val, obj, x, layer, mode); +} + +template +static __device__ typename __nv_isurf_trait::type surf2DLayeredwrite(T val, cudaSurfaceObject_t obj, int x, int y, int layer, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurf2DLayeredwrite_v2", &val, obj, x, y, layer, mode); +} + +template +static __device__ typename __nv_isurf_trait::type surfCubemapwrite(T val, cudaSurfaceObject_t obj, int x, int y, int face, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurfCubemapwrite_v2", &val, obj, x, y, face, mode); +} + +template +static __device__ typename __nv_isurf_trait::type surfCubemapLayeredwrite(T val, cudaSurfaceObject_t obj, int x, int y, int layerface, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurfCubemapLayeredwrite_v2", &val, obj, x, y, layerface, mode); +} + +#endif // __cplusplus && __CUDACC__ + +#endif // __SURFACE_INDIRECT_FUNCTIONS_H__ + + diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/texture_indirect_functions.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/texture_indirect_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..1e5537d87294ee78ecec567893a6aaec333db317 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/texture_indirect_functions.h @@ -0,0 +1,638 @@ +/* + * Copyright 1993-2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + + +#ifndef __TEXTURE_INDIRECT_FUNCTIONS_H__ +#define __TEXTURE_INDIRECT_FUNCTIONS_H__ + + +#if defined(__cplusplus) && defined(__CUDACC__) + + +#include "cuda_runtime_api.h" + + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 600) +#define __NV_TEX_SPARSE 1 +#endif /* endif */ + +template struct __nv_itex_trait { }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +#if !defined(__LP64__) +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +#endif /* !__LP64__ */ +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; +template<> struct __nv_itex_trait { typedef void type; }; + + + +template +static __device__ typename __nv_itex_trait::type tex1Dfetch(T *ptr, cudaTextureObject_t obj, int x) +{ + __nv_tex_surf_handler("__itex1Dfetch", ptr, obj, x); +} + +template +static __device__ T tex1Dfetch(cudaTextureObject_t texObject, int x) +{ + T ret; + tex1Dfetch(&ret, texObject, x); + return ret; +} + +template +static __device__ typename __nv_itex_trait::type tex1D(T *ptr, cudaTextureObject_t obj, float x) +{ + __nv_tex_surf_handler("__itex1D", ptr, obj, x); +} + + +template +static __device__ T tex1D(cudaTextureObject_t texObject, float x) +{ + T ret; + tex1D(&ret, texObject, x); + return ret; +} + + +template +static __device__ typename __nv_itex_trait::type tex2D(T *ptr, cudaTextureObject_t obj, float x, float y) +{ + __nv_tex_surf_handler("__itex2D", ptr, obj, x, y); +} + +template +static __device__ T tex2D(cudaTextureObject_t texObject, float x, float y) +{ + T ret; + tex2D(&ret, texObject, x, y); + return ret; +} + +#if __NV_TEX_SPARSE +template +static __device__ typename __nv_itex_trait::type tex2D(T *ptr, cudaTextureObject_t obj, float x, float y, + bool* isResident) +{ + unsigned char res; + __nv_tex_surf_handler("__itex2D_sparse", ptr, obj, x, y, &res); + *isResident = (res != 0); +} + +template +static __device__ T tex2D(cudaTextureObject_t texObject, float x, float y, bool* isResident) +{ + T ret; + tex2D(&ret, texObject, x, y, isResident); + return ret; +} + +#endif /* __NV_TEX_SPARSE */ + + +template +static __device__ typename __nv_itex_trait::type tex3D(T *ptr, cudaTextureObject_t obj, float x, float y, float z) +{ + __nv_tex_surf_handler("__itex3D", ptr, obj, x, y, z); +} + +template +static __device__ T tex3D(cudaTextureObject_t texObject, float x, float y, float z) +{ + T ret; + tex3D(&ret, texObject, x, y, z); + return ret; +} + +#if __NV_TEX_SPARSE +template +static __device__ typename __nv_itex_trait::type tex3D(T *ptr, cudaTextureObject_t obj, float x, float y, float z, + bool* isResident) +{ + unsigned char res; + __nv_tex_surf_handler("__itex3D_sparse", ptr, obj, x, y, z, &res); + *isResident = (res != 0); +} + +template +static __device__ T tex3D(cudaTextureObject_t texObject, float x, float y, float z, bool* isResident) +{ + T ret; + tex3D(&ret, texObject, x, y, z, isResident); + return ret; +} +#endif /* __NV_TEX_SPARSE */ + + +template +static __device__ typename __nv_itex_trait::type tex1DLayered(T *ptr, cudaTextureObject_t obj, float x, int layer) +{ + __nv_tex_surf_handler("__itex1DLayered", ptr, obj, x, layer); +} + +template +static __device__ T tex1DLayered(cudaTextureObject_t texObject, float x, int layer) +{ + T ret; + tex1DLayered(&ret, texObject, x, layer); + return ret; +} + +template +static __device__ typename __nv_itex_trait::type tex2DLayered(T *ptr, cudaTextureObject_t obj, float x, float y, int layer) +{ + __nv_tex_surf_handler("__itex2DLayered", ptr, obj, x, y, layer); +} + +template +static __device__ T tex2DLayered(cudaTextureObject_t texObject, float x, float y, int layer) +{ + T ret; + tex2DLayered(&ret, texObject, x, y, layer); + return ret; +} + +#if __NV_TEX_SPARSE +template +static __device__ typename __nv_itex_trait::type tex2DLayered(T *ptr, cudaTextureObject_t obj, float x, float y, int layer, bool* isResident) +{ + unsigned char res; + __nv_tex_surf_handler("__itex2DLayered_sparse", ptr, obj, x, y, layer, &res); + *isResident = (res != 0); +} + +template +static __device__ T tex2DLayered(cudaTextureObject_t texObject, float x, float y, int layer, bool* isResident) +{ + T ret; + tex2DLayered(&ret, texObject, x, y, layer, isResident); + return ret; +} +#endif /* __NV_TEX_SPARSE */ + + +template +static __device__ typename __nv_itex_trait::type texCubemap(T *ptr, cudaTextureObject_t obj, float x, float y, float z) +{ + __nv_tex_surf_handler("__itexCubemap", ptr, obj, x, y, z); +} + + +template +static __device__ T texCubemap(cudaTextureObject_t texObject, float x, float y, float z) +{ + T ret; + texCubemap(&ret, texObject, x, y, z); + return ret; +} + + +template +static __device__ typename __nv_itex_trait::type texCubemapLayered(T *ptr, cudaTextureObject_t obj, float x, float y, float z, int layer) +{ + __nv_tex_surf_handler("__itexCubemapLayered", ptr, obj, x, y, z, layer); +} + +template +static __device__ T texCubemapLayered(cudaTextureObject_t texObject, float x, float y, float z, int layer) +{ + T ret; + texCubemapLayered(&ret, texObject, x, y, z, layer); + return ret; +} + +template +static __device__ typename __nv_itex_trait::type tex2Dgather(T *ptr, cudaTextureObject_t obj, float x, float y, int comp = 0) +{ + __nv_tex_surf_handler("__itex2Dgather", ptr, obj, x, y, comp); +} + +template +static __device__ T tex2Dgather(cudaTextureObject_t to, float x, float y, int comp = 0) +{ + T ret; + tex2Dgather(&ret, to, x, y, comp); + return ret; +} + +#if __NV_TEX_SPARSE +template +static __device__ typename __nv_itex_trait::type tex2Dgather(T *ptr, cudaTextureObject_t obj, float x, float y, bool* isResident, int comp = 0) +{ + unsigned char res; + __nv_tex_surf_handler("__itex2Dgather_sparse", ptr, obj, x, y, comp, &res); + *isResident = (res != 0); +} + +template +static __device__ T tex2Dgather(cudaTextureObject_t to, float x, float y, bool* isResident, int comp = 0) +{ + T ret; + tex2Dgather(&ret, to, x, y, isResident, comp); + return ret; +} + +#endif /* __NV_TEX_SPARSE */ + +template +static __device__ typename __nv_itex_trait::type tex1DLod(T *ptr, cudaTextureObject_t obj, float x, float level) +{ + __nv_tex_surf_handler("__itex1DLod", ptr, obj, x, level); +} + +template +static __device__ T tex1DLod(cudaTextureObject_t texObject, float x, float level) +{ + T ret; + tex1DLod(&ret, texObject, x, level); + return ret; +} + + +template +static __device__ typename __nv_itex_trait::type tex2DLod(T *ptr, cudaTextureObject_t obj, float x, float y, float level) +{ + __nv_tex_surf_handler("__itex2DLod", ptr, obj, x, y, level); +} + +template +static __device__ T tex2DLod(cudaTextureObject_t texObject, float x, float y, float level) +{ + T ret; + tex2DLod(&ret, texObject, x, y, level); + return ret; +} + +#if __NV_TEX_SPARSE + +template +static __device__ typename __nv_itex_trait::type tex2DLod(T *ptr, cudaTextureObject_t obj, float x, float y, float level, bool* isResident) +{ + unsigned char res; + __nv_tex_surf_handler("__itex2DLod_sparse", ptr, obj, x, y, level, &res); + *isResident = (res != 0); +} + +template +static __device__ T tex2DLod(cudaTextureObject_t texObject, float x, float y, float level, bool* isResident) +{ + T ret; + tex2DLod(&ret, texObject, x, y, level, isResident); + return ret; +} + +#endif /* __NV_TEX_SPARSE */ + + +template +static __device__ typename __nv_itex_trait::type tex3DLod(T *ptr, cudaTextureObject_t obj, float x, float y, float z, float level) +{ + __nv_tex_surf_handler("__itex3DLod", ptr, obj, x, y, z, level); +} + +template +static __device__ T tex3DLod(cudaTextureObject_t texObject, float x, float y, float z, float level) +{ + T ret; + tex3DLod(&ret, texObject, x, y, z, level); + return ret; +} + +#if __NV_TEX_SPARSE +template +static __device__ typename __nv_itex_trait::type tex3DLod(T *ptr, cudaTextureObject_t obj, float x, float y, float z, float level, bool* isResident) +{ + unsigned char res; + __nv_tex_surf_handler("__itex3DLod_sparse", ptr, obj, x, y, z, level, &res); + *isResident = (res != 0); +} + +template +static __device__ T tex3DLod(cudaTextureObject_t texObject, float x, float y, float z, float level, bool* isResident) +{ + T ret; + tex3DLod(&ret, texObject, x, y, z, level, isResident); + return ret; +} + +#endif /* __NV_TEX_SPARSE */ + + +template +static __device__ typename __nv_itex_trait::type tex1DLayeredLod(T *ptr, cudaTextureObject_t obj, float x, int layer, float level) +{ + __nv_tex_surf_handler("__itex1DLayeredLod", ptr, obj, x, layer, level); +} + +template +static __device__ T tex1DLayeredLod(cudaTextureObject_t texObject, float x, int layer, float level) +{ + T ret; + tex1DLayeredLod(&ret, texObject, x, layer, level); + return ret; +} + + +template +static __device__ typename __nv_itex_trait::type tex2DLayeredLod(T *ptr, cudaTextureObject_t obj, float x, float y, int layer, float level) +{ + __nv_tex_surf_handler("__itex2DLayeredLod", ptr, obj, x, y, layer, level); +} + +template +static __device__ T tex2DLayeredLod(cudaTextureObject_t texObject, float x, float y, int layer, float level) +{ + T ret; + tex2DLayeredLod(&ret, texObject, x, y, layer, level); + return ret; +} + +#if __NV_TEX_SPARSE +template +static __device__ typename __nv_itex_trait::type tex2DLayeredLod(T *ptr, cudaTextureObject_t obj, float x, float y, int layer, float level, bool* isResident) +{ + unsigned char res; + __nv_tex_surf_handler("__itex2DLayeredLod_sparse", ptr, obj, x, y, layer, level, &res); + *isResident = (res != 0); +} + +template +static __device__ T tex2DLayeredLod(cudaTextureObject_t texObject, float x, float y, int layer, float level, bool* isResident) +{ + T ret; + tex2DLayeredLod(&ret, texObject, x, y, layer, level, isResident); + return ret; +} +#endif /* __NV_TEX_SPARSE */ + +template +static __device__ typename __nv_itex_trait::type texCubemapLod(T *ptr, cudaTextureObject_t obj, float x, float y, float z, float level) +{ + __nv_tex_surf_handler("__itexCubemapLod", ptr, obj, x, y, z, level); +} + +template +static __device__ T texCubemapLod(cudaTextureObject_t texObject, float x, float y, float z, float level) +{ + T ret; + texCubemapLod(&ret, texObject, x, y, z, level); + return ret; +} + + +template +static __device__ typename __nv_itex_trait::type texCubemapGrad(T *ptr, cudaTextureObject_t obj, float x, float y, float z, float4 dPdx, float4 dPdy) +{ + __nv_tex_surf_handler("__itexCubemapGrad_v2", ptr, obj, x, y, z, &dPdx, &dPdy); +} + +template +static __device__ T texCubemapGrad(cudaTextureObject_t texObject, float x, float y, float z, float4 dPdx, float4 dPdy) +{ + T ret; + texCubemapGrad(&ret, texObject, x, y, z, dPdx, dPdy); + return ret; +} + +template +static __device__ typename __nv_itex_trait::type texCubemapLayeredLod(T *ptr, cudaTextureObject_t obj, float x, float y, float z, int layer, float level) +{ + __nv_tex_surf_handler("__itexCubemapLayeredLod", ptr, obj, x, y, z, layer, level); +} + +template +static __device__ T texCubemapLayeredLod(cudaTextureObject_t texObject, float x, float y, float z, int layer, float level) +{ + T ret; + texCubemapLayeredLod(&ret, texObject, x, y, z, layer, level); + return ret; +} + +template +static __device__ typename __nv_itex_trait::type tex1DGrad(T *ptr, cudaTextureObject_t obj, float x, float dPdx, float dPdy) +{ + __nv_tex_surf_handler("__itex1DGrad", ptr, obj, x, dPdx, dPdy); +} + +template +static __device__ T tex1DGrad(cudaTextureObject_t texObject, float x, float dPdx, float dPdy) +{ + T ret; + tex1DGrad(&ret, texObject, x, dPdx, dPdy); + return ret; +} + + +template +static __device__ typename __nv_itex_trait::type tex2DGrad(T *ptr, cudaTextureObject_t obj, float x, float y, float2 dPdx, float2 dPdy) +{ + __nv_tex_surf_handler("__itex2DGrad_v2", ptr, obj, x, y, &dPdx, &dPdy); +} + +template +static __device__ T tex2DGrad(cudaTextureObject_t texObject, float x, float y, float2 dPdx, float2 dPdy) +{ + T ret; + tex2DGrad(&ret, texObject, x, y, dPdx, dPdy); + return ret; +} + +#if __NV_TEX_SPARSE +template +static __device__ typename __nv_itex_trait::type tex2DGrad(T *ptr, cudaTextureObject_t obj, float x, float y, float2 dPdx, float2 dPdy, bool* isResident) +{ + unsigned char res; + __nv_tex_surf_handler("__itex2DGrad_sparse", ptr, obj, x, y, &dPdx, &dPdy, &res); + *isResident = (res != 0); +} + +template +static __device__ T tex2DGrad(cudaTextureObject_t texObject, float x, float y, float2 dPdx, float2 dPdy, bool* isResident) +{ + T ret; + tex2DGrad(&ret, texObject, x, y, dPdx, dPdy, isResident); + return ret; +} +#endif /* __NV_TEX_SPARSE */ + + +template +static __device__ typename __nv_itex_trait::type tex3DGrad(T *ptr, cudaTextureObject_t obj, float x, float y, float z, float4 dPdx, float4 dPdy) +{ + __nv_tex_surf_handler("__itex3DGrad_v2", ptr, obj, x, y, z, &dPdx, &dPdy); +} + +template +static __device__ T tex3DGrad(cudaTextureObject_t texObject, float x, float y, float z, float4 dPdx, float4 dPdy) +{ + T ret; + tex3DGrad(&ret, texObject, x, y, z, dPdx, dPdy); + return ret; +} + +#if __NV_TEX_SPARSE +template +static __device__ typename __nv_itex_trait::type tex3DGrad(T *ptr, cudaTextureObject_t obj, float x, float y, float z, float4 dPdx, float4 dPdy, bool* isResident) +{ + unsigned char res; + __nv_tex_surf_handler("__itex3DGrad_sparse", ptr, obj, x, y, z, &dPdx, &dPdy, &res); + *isResident = (res != 0); +} + +template +static __device__ T tex3DGrad(cudaTextureObject_t texObject, float x, float y, float z, float4 dPdx, float4 dPdy, bool* isResident) +{ + T ret; + tex3DGrad(&ret, texObject, x, y, z, dPdx, dPdy, isResident); + return ret; +} + +#endif /* __NV_TEX_SPARSE */ + + +template +static __device__ typename __nv_itex_trait::type tex1DLayeredGrad(T *ptr, cudaTextureObject_t obj, float x, int layer, float dPdx, float dPdy) +{ + __nv_tex_surf_handler("__itex1DLayeredGrad", ptr, obj, x, layer, dPdx, dPdy); +} + +template +static __device__ T tex1DLayeredGrad(cudaTextureObject_t texObject, float x, int layer, float dPdx, float dPdy) +{ + T ret; + tex1DLayeredGrad(&ret, texObject, x, layer, dPdx, dPdy); + return ret; +} + + +template +static __device__ typename __nv_itex_trait::type tex2DLayeredGrad(T * ptr, cudaTextureObject_t obj, float x, float y, int layer, float2 dPdx, float2 dPdy) +{ + __nv_tex_surf_handler("__itex2DLayeredGrad_v2", ptr, obj, x, y, layer, &dPdx, &dPdy); +} + +template +static __device__ T tex2DLayeredGrad(cudaTextureObject_t texObject, float x, float y, int layer, float2 dPdx, float2 dPdy) +{ + T ret; + tex2DLayeredGrad(&ret, texObject, x, y, layer, dPdx, dPdy); + return ret; +} + +#if __NV_TEX_SPARSE +template +static __device__ typename __nv_itex_trait::type tex2DLayeredGrad(T * ptr, cudaTextureObject_t obj, float x, float y, int layer, float2 dPdx, float2 dPdy, bool* isResident) +{ + unsigned char res; + __nv_tex_surf_handler("__itex2DLayeredGrad_sparse", ptr, obj, x, y, layer, &dPdx, &dPdy, &res); + *isResident = (res != 0); +} + +template +static __device__ T tex2DLayeredGrad(cudaTextureObject_t texObject, float x, float y, int layer, float2 dPdx, float2 dPdy, bool* isResident) +{ + T ret; + tex2DLayeredGrad(&ret, texObject, x, y, layer, dPdx, dPdy, isResident); + return ret; +} +#endif /* __NV_TEX_SPARSE */ + + +template +static __device__ typename __nv_itex_trait::type texCubemapLayeredGrad(T *ptr, cudaTextureObject_t obj, float x, float y, float z, int layer, float4 dPdx, float4 dPdy) +{ + __nv_tex_surf_handler("__itexCubemapLayeredGrad_v2", ptr, obj, x, y, z, layer, &dPdx, &dPdy); +} + +template +static __device__ T texCubemapLayeredGrad(cudaTextureObject_t texObject, float x, float y, float z, int layer, float4 dPdx, float4 dPdy) +{ + T ret; + texCubemapLayeredGrad(&ret, texObject, x, y, z, layer, dPdx, dPdy); + return ret; +} + +#undef __NV_TEX_SPARSE + +#endif // __cplusplus && __CUDACC__ +#endif // __TEXTURE_INDIRECT_FUNCTIONS_H__ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/texture_types.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/texture_types.h new file mode 100644 index 0000000000000000000000000000000000000000..5289dc6bbcab9f08bfad11e0a3dde1acca6adde4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/texture_types.h @@ -0,0 +1,177 @@ +/* + * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__TEXTURE_TYPES_H__) +#define __TEXTURE_TYPES_H__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "driver_types.h" + +/** + * \addtogroup CUDART_TYPES + * + * @{ + */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#define cudaTextureType1D 0x01 +#define cudaTextureType2D 0x02 +#define cudaTextureType3D 0x03 +#define cudaTextureTypeCubemap 0x0C +#define cudaTextureType1DLayered 0xF1 +#define cudaTextureType2DLayered 0xF2 +#define cudaTextureTypeCubemapLayered 0xFC + +/** + * CUDA texture address modes + */ +enum __device_builtin__ cudaTextureAddressMode +{ + cudaAddressModeWrap = 0, /**< Wrapping address mode */ + cudaAddressModeClamp = 1, /**< Clamp to edge address mode */ + cudaAddressModeMirror = 2, /**< Mirror address mode */ + cudaAddressModeBorder = 3 /**< Border address mode */ +}; + +/** + * CUDA texture filter modes + */ +enum __device_builtin__ cudaTextureFilterMode +{ + cudaFilterModePoint = 0, /**< Point filter mode */ + cudaFilterModeLinear = 1 /**< Linear filter mode */ +}; + +/** + * CUDA texture read modes + */ +enum __device_builtin__ cudaTextureReadMode +{ + cudaReadModeElementType = 0, /**< Read texture as specified element type */ + cudaReadModeNormalizedFloat = 1 /**< Read texture as normalized float */ +}; + +/** + * CUDA texture descriptor + */ +struct __device_builtin__ cudaTextureDesc +{ + /** + * Texture address mode for up to 3 dimensions + */ + enum cudaTextureAddressMode addressMode[3]; + /** + * Texture filter mode + */ + enum cudaTextureFilterMode filterMode; + /** + * Texture read mode + */ + enum cudaTextureReadMode readMode; + /** + * Perform sRGB->linear conversion during texture read + */ + int sRGB; + /** + * Texture Border Color + */ + float borderColor[4]; + /** + * Indicates whether texture reads are normalized or not + */ + int normalizedCoords; + /** + * Limit to the anisotropy ratio + */ + unsigned int maxAnisotropy; + /** + * Mipmap filter mode + */ + enum cudaTextureFilterMode mipmapFilterMode; + /** + * Offset applied to the supplied mipmap level + */ + float mipmapLevelBias; + /** + * Lower end of the mipmap level range to clamp access to + */ + float minMipmapLevelClamp; + /** + * Upper end of the mipmap level range to clamp access to + */ + float maxMipmapLevelClamp; + /** + * Disable any trilinear filtering optimizations. + */ + int disableTrilinearOptimization; + /** + * Enable seamless cube map filtering. + */ + int seamlessCubemap; +}; + +/** + * An opaque value that represents a CUDA texture object + */ +typedef __device_builtin__ unsigned long long cudaTextureObject_t; + +/** @} */ +/** @} */ /* END CUDART_TYPES */ + +#endif /* !__TEXTURE_TYPES_H__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..bee6cd32c36d94bde65aad1c867352493d07a0dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.h @@ -0,0 +1,175 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__VECTOR_FUNCTIONS_H__) +#define __VECTOR_FUNCTIONS_H__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +#if defined(__CUDACC_RTC__) +#define __VECTOR_FUNCTIONS_DECL__ __host__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __VECTOR_FUNCTIONS_DECL__ static __inline__ __host__ __device__ +#endif /* __CUDACC_RTC__ */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__VECTOR_FUNCTIONS_DECL__ char1 make_char1(signed char x); + +__VECTOR_FUNCTIONS_DECL__ uchar1 make_uchar1(unsigned char x); + +__VECTOR_FUNCTIONS_DECL__ char2 make_char2(signed char x, signed char y); + +__VECTOR_FUNCTIONS_DECL__ uchar2 make_uchar2(unsigned char x, unsigned char y); + +__VECTOR_FUNCTIONS_DECL__ char3 make_char3(signed char x, signed char y, signed char z); + +__VECTOR_FUNCTIONS_DECL__ uchar3 make_uchar3(unsigned char x, unsigned char y, unsigned char z); + +__VECTOR_FUNCTIONS_DECL__ char4 make_char4(signed char x, signed char y, signed char z, signed char w); + +__VECTOR_FUNCTIONS_DECL__ uchar4 make_uchar4(unsigned char x, unsigned char y, unsigned char z, unsigned char w); + +__VECTOR_FUNCTIONS_DECL__ short1 make_short1(short x); + +__VECTOR_FUNCTIONS_DECL__ ushort1 make_ushort1(unsigned short x); + +__VECTOR_FUNCTIONS_DECL__ short2 make_short2(short x, short y); + +__VECTOR_FUNCTIONS_DECL__ ushort2 make_ushort2(unsigned short x, unsigned short y); + +__VECTOR_FUNCTIONS_DECL__ short3 make_short3(short x,short y, short z); + +__VECTOR_FUNCTIONS_DECL__ ushort3 make_ushort3(unsigned short x, unsigned short y, unsigned short z); + +__VECTOR_FUNCTIONS_DECL__ short4 make_short4(short x, short y, short z, short w); + +__VECTOR_FUNCTIONS_DECL__ ushort4 make_ushort4(unsigned short x, unsigned short y, unsigned short z, unsigned short w); + +__VECTOR_FUNCTIONS_DECL__ int1 make_int1(int x); + +__VECTOR_FUNCTIONS_DECL__ uint1 make_uint1(unsigned int x); + +__VECTOR_FUNCTIONS_DECL__ int2 make_int2(int x, int y); + +__VECTOR_FUNCTIONS_DECL__ uint2 make_uint2(unsigned int x, unsigned int y); + +__VECTOR_FUNCTIONS_DECL__ int3 make_int3(int x, int y, int z); + +__VECTOR_FUNCTIONS_DECL__ uint3 make_uint3(unsigned int x, unsigned int y, unsigned int z); + +__VECTOR_FUNCTIONS_DECL__ int4 make_int4(int x, int y, int z, int w); + +__VECTOR_FUNCTIONS_DECL__ uint4 make_uint4(unsigned int x, unsigned int y, unsigned int z, unsigned int w); + +__VECTOR_FUNCTIONS_DECL__ long1 make_long1(long int x); + +__VECTOR_FUNCTIONS_DECL__ ulong1 make_ulong1(unsigned long int x); + +__VECTOR_FUNCTIONS_DECL__ long2 make_long2(long int x, long int y); + +__VECTOR_FUNCTIONS_DECL__ ulong2 make_ulong2(unsigned long int x, unsigned long int y); + +__VECTOR_FUNCTIONS_DECL__ long3 make_long3(long int x, long int y, long int z); + +__VECTOR_FUNCTIONS_DECL__ ulong3 make_ulong3(unsigned long int x, unsigned long int y, unsigned long int z); + +__VECTOR_FUNCTIONS_DECL__ long4 make_long4(long int x, long int y, long int z, long int w); + +__VECTOR_FUNCTIONS_DECL__ ulong4 make_ulong4(unsigned long int x, unsigned long int y, unsigned long int z, unsigned long int w); + +__VECTOR_FUNCTIONS_DECL__ float1 make_float1(float x); + +__VECTOR_FUNCTIONS_DECL__ float2 make_float2(float x, float y); + +__VECTOR_FUNCTIONS_DECL__ float3 make_float3(float x, float y, float z); + +__VECTOR_FUNCTIONS_DECL__ float4 make_float4(float x, float y, float z, float w); + +__VECTOR_FUNCTIONS_DECL__ longlong1 make_longlong1(long long int x); + +__VECTOR_FUNCTIONS_DECL__ ulonglong1 make_ulonglong1(unsigned long long int x); + +__VECTOR_FUNCTIONS_DECL__ longlong2 make_longlong2(long long int x, long long int y); + +__VECTOR_FUNCTIONS_DECL__ ulonglong2 make_ulonglong2(unsigned long long int x, unsigned long long int y); + +__VECTOR_FUNCTIONS_DECL__ longlong3 make_longlong3(long long int x, long long int y, long long int z); + +__VECTOR_FUNCTIONS_DECL__ ulonglong3 make_ulonglong3(unsigned long long int x, unsigned long long int y, unsigned long long int z); + +__VECTOR_FUNCTIONS_DECL__ longlong4 make_longlong4(long long int x, long long int y, long long int z, long long int w); + +__VECTOR_FUNCTIONS_DECL__ ulonglong4 make_ulonglong4(unsigned long long int x, unsigned long long int y, unsigned long long int z, unsigned long long int w); + +__VECTOR_FUNCTIONS_DECL__ double1 make_double1(double x); + +__VECTOR_FUNCTIONS_DECL__ double2 make_double2(double x, double y); + +__VECTOR_FUNCTIONS_DECL__ double3 make_double3(double x, double y, double z); + +__VECTOR_FUNCTIONS_DECL__ double4 make_double4(double x, double y, double z, double w); + +#undef __VECTOR_FUNCTIONS_DECL__ + +#if !defined(__CUDACC_RTC__) +#include "vector_functions.hpp" +#endif /* !__CUDACC_RTC__ */ + +#endif /* !__VECTOR_FUNCTIONS_H__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.hpp b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.hpp new file mode 100644 index 0000000000000000000000000000000000000000..ab69cf38045a7c44dae67e7149d49ac4c6148747 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.hpp @@ -0,0 +1,316 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__VECTOR_FUNCTIONS_HPP__) +#define __VECTOR_FUNCTIONS_HPP__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +#if defined(__CUDACC_RTC__) +#define __VECTOR_FUNCTIONS_DECL__ __host__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __VECTOR_FUNCTIONS_DECL__ static __inline__ __host__ __device__ +#endif /* __CUDACC_RTC__ */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__VECTOR_FUNCTIONS_DECL__ char1 make_char1(signed char x) +{ + char1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ uchar1 make_uchar1(unsigned char x) +{ + uchar1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ char2 make_char2(signed char x, signed char y) +{ + char2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ uchar2 make_uchar2(unsigned char x, unsigned char y) +{ + uchar2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ char3 make_char3(signed char x, signed char y, signed char z) +{ + char3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ uchar3 make_uchar3(unsigned char x, unsigned char y, unsigned char z) +{ + uchar3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ char4 make_char4(signed char x, signed char y, signed char z, signed char w) +{ + char4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ uchar4 make_uchar4(unsigned char x, unsigned char y, unsigned char z, unsigned char w) +{ + uchar4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ short1 make_short1(short x) +{ + short1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ushort1 make_ushort1(unsigned short x) +{ + ushort1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ short2 make_short2(short x, short y) +{ + short2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ushort2 make_ushort2(unsigned short x, unsigned short y) +{ + ushort2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ short3 make_short3(short x,short y, short z) +{ + short3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ushort3 make_ushort3(unsigned short x, unsigned short y, unsigned short z) +{ + ushort3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ short4 make_short4(short x, short y, short z, short w) +{ + short4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ushort4 make_ushort4(unsigned short x, unsigned short y, unsigned short z, unsigned short w) +{ + ushort4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ int1 make_int1(int x) +{ + int1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ uint1 make_uint1(unsigned int x) +{ + uint1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ int2 make_int2(int x, int y) +{ + int2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ uint2 make_uint2(unsigned int x, unsigned int y) +{ + uint2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ int3 make_int3(int x, int y, int z) +{ + int3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ uint3 make_uint3(unsigned int x, unsigned int y, unsigned int z) +{ + uint3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ int4 make_int4(int x, int y, int z, int w) +{ + int4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ uint4 make_uint4(unsigned int x, unsigned int y, unsigned int z, unsigned int w) +{ + uint4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ long1 make_long1(long int x) +{ + long1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ulong1 make_ulong1(unsigned long int x) +{ + ulong1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ long2 make_long2(long int x, long int y) +{ + long2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ulong2 make_ulong2(unsigned long int x, unsigned long int y) +{ + ulong2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ long3 make_long3(long int x, long int y, long int z) +{ + long3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ulong3 make_ulong3(unsigned long int x, unsigned long int y, unsigned long int z) +{ + ulong3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ long4 make_long4(long int x, long int y, long int z, long int w) +{ + long4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ulong4 make_ulong4(unsigned long int x, unsigned long int y, unsigned long int z, unsigned long int w) +{ + ulong4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ float1 make_float1(float x) +{ + float1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ float2 make_float2(float x, float y) +{ + float2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ float3 make_float3(float x, float y, float z) +{ + float3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ float4 make_float4(float x, float y, float z, float w) +{ + float4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ longlong1 make_longlong1(long long int x) +{ + longlong1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ulonglong1 make_ulonglong1(unsigned long long int x) +{ + ulonglong1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ longlong2 make_longlong2(long long int x, long long int y) +{ + longlong2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ulonglong2 make_ulonglong2(unsigned long long int x, unsigned long long int y) +{ + ulonglong2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ longlong3 make_longlong3(long long int x, long long int y, long long int z) +{ + longlong3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ulonglong3 make_ulonglong3(unsigned long long int x, unsigned long long int y, unsigned long long int z) +{ + ulonglong3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ longlong4 make_longlong4(long long int x, long long int y, long long int z, long long int w) +{ + longlong4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ulonglong4 make_ulonglong4(unsigned long long int x, unsigned long long int y, unsigned long long int z, unsigned long long int w) +{ + ulonglong4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ double1 make_double1(double x) +{ + double1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ double2 make_double2(double x, double y) +{ + double2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ double3 make_double3(double x, double y, double z) +{ + double3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ double4 make_double4(double x, double y, double z, double w) +{ + double4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +#undef __VECTOR_FUNCTIONS_DECL__ + +#endif /* !__VECTOR_FUNCTIONS_HPP__ */ + diff --git a/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_types.h b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_types.h new file mode 100644 index 0000000000000000000000000000000000000000..4cfabcff8a25adaf3f589d38d531bc63cae2fcf6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_types.h @@ -0,0 +1,443 @@ +/* + * Copyright 1993-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__VECTOR_TYPES_H__) +#define __VECTOR_TYPES_H__ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_VECTOR_TYPES_H__ +#endif + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#ifndef __DOXYGEN_ONLY__ +#include "crt/host_defines.h" +#endif + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if !defined(__CUDACC__) && !defined(__CUDACC_RTC__) && \ + defined(_WIN32) && !defined(_WIN64) + +#pragma warning(push) +#pragma warning(disable: 4201 4408) + +#define __cuda_builtin_vector_align8(tag, members) \ +struct __device_builtin__ tag \ +{ \ + union \ + { \ + struct { members }; \ + struct { long long int :1,:0; }; \ + }; \ +} + +#else /* !__CUDACC__ && !__CUDACC_RTC__ && _WIN32 && !_WIN64 */ + +#define __cuda_builtin_vector_align8(tag, members) \ +struct __device_builtin__ __align__(8) tag \ +{ \ + members \ +} + +#endif /* !__CUDACC__ && !__CUDACC_RTC__ && _WIN32 && !_WIN64 */ + +struct __device_builtin__ char1 +{ + signed char x; +}; + +struct __device_builtin__ uchar1 +{ + unsigned char x; +}; + + +struct __device_builtin__ __align__(2) char2 +{ + signed char x, y; +}; + +struct __device_builtin__ __align__(2) uchar2 +{ + unsigned char x, y; +}; + +struct __device_builtin__ char3 +{ + signed char x, y, z; +}; + +struct __device_builtin__ uchar3 +{ + unsigned char x, y, z; +}; + +struct __device_builtin__ __align__(4) char4 +{ + signed char x, y, z, w; +}; + +struct __device_builtin__ __align__(4) uchar4 +{ + unsigned char x, y, z, w; +}; + +struct __device_builtin__ short1 +{ + short x; +}; + +struct __device_builtin__ ushort1 +{ + unsigned short x; +}; + +struct __device_builtin__ __align__(4) short2 +{ + short x, y; +}; + +struct __device_builtin__ __align__(4) ushort2 +{ + unsigned short x, y; +}; + +struct __device_builtin__ short3 +{ + short x, y, z; +}; + +struct __device_builtin__ ushort3 +{ + unsigned short x, y, z; +}; + +__cuda_builtin_vector_align8(short4, short x; short y; short z; short w;); +__cuda_builtin_vector_align8(ushort4, unsigned short x; unsigned short y; unsigned short z; unsigned short w;); + +struct __device_builtin__ int1 +{ + int x; +}; + +struct __device_builtin__ uint1 +{ + unsigned int x; +}; + +__cuda_builtin_vector_align8(int2, int x; int y;); +__cuda_builtin_vector_align8(uint2, unsigned int x; unsigned int y;); + +struct __device_builtin__ int3 +{ + int x, y, z; +}; + +struct __device_builtin__ uint3 +{ + unsigned int x, y, z; +}; + +struct __device_builtin__ __builtin_align__(16) int4 +{ + int x, y, z, w; +}; + +struct __device_builtin__ __builtin_align__(16) uint4 +{ + unsigned int x, y, z, w; +}; + +struct __device_builtin__ long1 +{ + long int x; +}; + +struct __device_builtin__ ulong1 +{ + unsigned long x; +}; + +#if defined(_WIN32) +__cuda_builtin_vector_align8(long2, long int x; long int y;); +__cuda_builtin_vector_align8(ulong2, unsigned long int x; unsigned long int y;); +#else /* !_WIN32 */ + +struct __device_builtin__ __align__(2*sizeof(long int)) long2 +{ + long int x, y; +}; + +struct __device_builtin__ __align__(2*sizeof(unsigned long int)) ulong2 +{ + unsigned long int x, y; +}; + +#endif /* _WIN32 */ + +struct __device_builtin__ long3 +{ + long int x, y, z; +}; + +struct __device_builtin__ ulong3 +{ + unsigned long int x, y, z; +}; + +struct __device_builtin__ __builtin_align__(16) long4 +{ + long int x, y, z, w; +}; + +struct __device_builtin__ __builtin_align__(16) ulong4 +{ + unsigned long int x, y, z, w; +}; + +struct __device_builtin__ float1 +{ + float x; +}; + +#if !defined(__CUDACC__) && defined(__arm__) && \ + defined(__ARM_PCS_VFP) && __GNUC__ == 4 && __GNUC_MINOR__ == 6 + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-pedantic" + +struct __device_builtin__ __attribute__((aligned(8))) float2 +{ + float x; float y; float __cuda_gnu_arm_ice_workaround[0]; +}; + +#pragma GCC poison __cuda_gnu_arm_ice_workaround +#pragma GCC diagnostic pop + +#else /* !__CUDACC__ && __arm__ && __ARM_PCS_VFP && + __GNUC__ == 4&& __GNUC_MINOR__ == 6 */ + +__cuda_builtin_vector_align8(float2, float x; float y;); + +#endif /* !__CUDACC__ && __arm__ && __ARM_PCS_VFP && + __GNUC__ == 4&& __GNUC_MINOR__ == 6 */ + +struct __device_builtin__ float3 +{ + float x, y, z; +}; + +struct __device_builtin__ __builtin_align__(16) float4 +{ + float x, y, z, w; +}; + +struct __device_builtin__ longlong1 +{ + long long int x; +}; + +struct __device_builtin__ ulonglong1 +{ + unsigned long long int x; +}; + +struct __device_builtin__ __builtin_align__(16) longlong2 +{ + long long int x, y; +}; + +struct __device_builtin__ __builtin_align__(16) ulonglong2 +{ + unsigned long long int x, y; +}; + +struct __device_builtin__ longlong3 +{ + long long int x, y, z; +}; + +struct __device_builtin__ ulonglong3 +{ + unsigned long long int x, y, z; +}; + +struct __device_builtin__ __builtin_align__(16) longlong4 +{ + long long int x, y, z ,w; +}; + +struct __device_builtin__ __builtin_align__(16) ulonglong4 +{ + unsigned long long int x, y, z, w; +}; + +struct __device_builtin__ double1 +{ + double x; +}; + +struct __device_builtin__ __builtin_align__(16) double2 +{ + double x, y; +}; + +struct __device_builtin__ double3 +{ + double x, y, z; +}; + +struct __device_builtin__ __builtin_align__(16) double4 +{ + double x, y, z, w; +}; + +#if !defined(__CUDACC__) && defined(_WIN32) && !defined(_WIN64) + +#pragma warning(pop) + +#endif /* !__CUDACC__ && _WIN32 && !_WIN64 */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +typedef __device_builtin__ struct char1 char1; +typedef __device_builtin__ struct uchar1 uchar1; +typedef __device_builtin__ struct char2 char2; +typedef __device_builtin__ struct uchar2 uchar2; +typedef __device_builtin__ struct char3 char3; +typedef __device_builtin__ struct uchar3 uchar3; +typedef __device_builtin__ struct char4 char4; +typedef __device_builtin__ struct uchar4 uchar4; +typedef __device_builtin__ struct short1 short1; +typedef __device_builtin__ struct ushort1 ushort1; +typedef __device_builtin__ struct short2 short2; +typedef __device_builtin__ struct ushort2 ushort2; +typedef __device_builtin__ struct short3 short3; +typedef __device_builtin__ struct ushort3 ushort3; +typedef __device_builtin__ struct short4 short4; +typedef __device_builtin__ struct ushort4 ushort4; +typedef __device_builtin__ struct int1 int1; +typedef __device_builtin__ struct uint1 uint1; +typedef __device_builtin__ struct int2 int2; +typedef __device_builtin__ struct uint2 uint2; +typedef __device_builtin__ struct int3 int3; +typedef __device_builtin__ struct uint3 uint3; +typedef __device_builtin__ struct int4 int4; +typedef __device_builtin__ struct uint4 uint4; +typedef __device_builtin__ struct long1 long1; +typedef __device_builtin__ struct ulong1 ulong1; +typedef __device_builtin__ struct long2 long2; +typedef __device_builtin__ struct ulong2 ulong2; +typedef __device_builtin__ struct long3 long3; +typedef __device_builtin__ struct ulong3 ulong3; +typedef __device_builtin__ struct long4 long4; +typedef __device_builtin__ struct ulong4 ulong4; +typedef __device_builtin__ struct float1 float1; +typedef __device_builtin__ struct float2 float2; +typedef __device_builtin__ struct float3 float3; +typedef __device_builtin__ struct float4 float4; +typedef __device_builtin__ struct longlong1 longlong1; +typedef __device_builtin__ struct ulonglong1 ulonglong1; +typedef __device_builtin__ struct longlong2 longlong2; +typedef __device_builtin__ struct ulonglong2 ulonglong2; +typedef __device_builtin__ struct longlong3 longlong3; +typedef __device_builtin__ struct ulonglong3 ulonglong3; +typedef __device_builtin__ struct longlong4 longlong4; +typedef __device_builtin__ struct ulonglong4 ulonglong4; +typedef __device_builtin__ struct double1 double1; +typedef __device_builtin__ struct double2 double2; +typedef __device_builtin__ struct double3 double3; +typedef __device_builtin__ struct double4 double4; + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +struct __device_builtin__ dim3 +{ + unsigned int x, y, z; +#if defined(__cplusplus) +#if __cplusplus >= 201103L + __host__ __device__ constexpr dim3(unsigned int vx = 1, unsigned int vy = 1, unsigned int vz = 1) : x(vx), y(vy), z(vz) {} + __host__ __device__ constexpr dim3(uint3 v) : x(v.x), y(v.y), z(v.z) {} + __host__ __device__ constexpr operator uint3(void) const { return uint3{x, y, z}; } +#else + __host__ __device__ dim3(unsigned int vx = 1, unsigned int vy = 1, unsigned int vz = 1) : x(vx), y(vy), z(vz) {} + __host__ __device__ dim3(uint3 v) : x(v.x), y(v.y), z(v.z) {} + __host__ __device__ operator uint3(void) const { uint3 t; t.x = x; t.y = y; t.z = z; return t; } +#endif +#endif /* __cplusplus */ +}; + +typedef __device_builtin__ struct dim3 dim3; + +#undef __cuda_builtin_vector_align8 + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_VECTOR_TYPES_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_VECTOR_TYPES_H__ +#endif + +#endif /* !__VECTOR_TYPES_H__ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cudnn/include/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nvidia/cudnn/include/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a478fb62de8ec00653ece28440734598e0dfbbe0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nvidia/cudnn/include/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_train.h b/venv/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_train.h new file mode 100644 index 0000000000000000000000000000000000000000..6879af86b214a69138897ac78968149858b54737 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_train.h @@ -0,0 +1,540 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* cudnn_adv_train : cuDNN's advanced and experimental features. + +*/ + +#if !defined(CUDNN_ADV_TRAIN_H_) +#define CUDNN_ADV_TRAIN_H_ + +#include +#include + +#include "cudnn_version.h" +#include "cudnn_ops_infer.h" +#include "cudnn_ops_train.h" +#include "cudnn_adv_infer.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_ADV_TRAIN_MAJOR 8 +#define CUDNN_ADV_TRAIN_MINOR 9 +#define CUDNN_ADV_TRAIN_PATCH 2 + +#if (CUDNN_ADV_TRAIN_MAJOR != CUDNN_MAJOR) || (CUDNN_ADV_TRAIN_MINOR != CUDNN_MINOR) || \ + (CUDNN_ADV_TRAIN_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN ADV TRAIN!!! +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +typedef enum { + CUDNN_WGRAD_MODE_ADD = 0, /* add partial gradients to wgrad output buffers */ + CUDNN_WGRAD_MODE_SET = 1, /* write partial gradients to wgrad output buffers */ +} cudnnWgradMode_t; + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNForwardTraining(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *xDesc, + const void *x, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t cxDesc, + const void *cx, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t *yDesc, + void *y, + const cudnnTensorDescriptor_t hyDesc, + void *hy, + const cudnnTensorDescriptor_t cyDesc, + void *cy, + void *workSpace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNBackwardData(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *yDesc, + const void *y, + const cudnnTensorDescriptor_t *dyDesc, + const void *dy, + const cudnnTensorDescriptor_t dhyDesc, + const void *dhy, + const cudnnTensorDescriptor_t dcyDesc, + const void *dcy, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t cxDesc, + const void *cx, + const cudnnTensorDescriptor_t *dxDesc, + void *dx, + const cudnnTensorDescriptor_t dhxDesc, + void *dhx, + const cudnnTensorDescriptor_t dcxDesc, + void *dcx, + void *workSpace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnRNNBackwardData_v8(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + const int32_t devSeqLengths[], + cudnnRNNDataDescriptor_t yDesc, + const void *y, + const void *dy, + cudnnRNNDataDescriptor_t xDesc, + void *dx, + cudnnTensorDescriptor_t hDesc, + const void *hx, + const void *dhy, + void *dhx, + cudnnTensorDescriptor_t cDesc, + const void *cx, + const void *dcy, + void *dcx, + size_t weightSpaceSize, + const void *weightSpace, + size_t workSpaceSize, + void *workSpace, + size_t reserveSpaceSize, + void *reserveSpace); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNBackwardWeights(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *xDesc, + const void *x, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t *yDesc, + const void *y, + const void *workSpace, + size_t workSpaceSizeInBytes, + const cudnnFilterDescriptor_t dwDesc, + void *dw, + const void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnRNNBackwardWeights_v8(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + cudnnWgradMode_t addGrad, + const int32_t devSeqLengths[], + cudnnRNNDataDescriptor_t xDesc, + const void *x, + cudnnTensorDescriptor_t hDesc, + const void *hx, + cudnnRNNDataDescriptor_t yDesc, + const void *y, + size_t weightSpaceSize, + void *dweightSpace, + size_t workSpaceSize, + void *workSpace, + size_t reserveSpaceSize, + void *reserveSpace); + +/* RNN EX API */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNForwardTrainingEx(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const cudnnRNNDataDescriptor_t xDesc, + const void *x, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t cxDesc, + const void *cx, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnRNNDataDescriptor_t yDesc, + void *y, + const cudnnTensorDescriptor_t hyDesc, + void *hy, + const cudnnTensorDescriptor_t cyDesc, + void *cy, + const cudnnRNNDataDescriptor_t kDesc, /* reserved, should pass NULL */ + const void *keys, /* reserved, should pass NULL */ + const cudnnRNNDataDescriptor_t cDesc, /* reserved, should pass NULL */ + void *cAttn, /* reserved, should pass NULL */ + const cudnnRNNDataDescriptor_t iDesc, /* reserved, should pass NULL */ + void *iAttn, /* reserved, should pass NULL */ + const cudnnRNNDataDescriptor_t qDesc, /* reserved, should pass NULL */ + void *queries, /* reserved, should pass NULL */ + void *workSpace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNBackwardDataEx(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const cudnnRNNDataDescriptor_t yDesc, + const void *y, + const cudnnRNNDataDescriptor_t dyDesc, + const void *dy, + const cudnnRNNDataDescriptor_t dcDesc, /* reserved, should pass NULL */ + const void *dcAttn, /* reserved, should pass NULL */ + const cudnnTensorDescriptor_t dhyDesc, + const void *dhy, + const cudnnTensorDescriptor_t dcyDesc, + const void *dcy, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t cxDesc, + const void *cx, + const cudnnRNNDataDescriptor_t dxDesc, + void *dx, + const cudnnTensorDescriptor_t dhxDesc, + void *dhx, + const cudnnTensorDescriptor_t dcxDesc, + void *dcx, + const cudnnRNNDataDescriptor_t dkDesc, /* reserved, should pass NULL */ + void *dkeys, /* reserved, should pass NULL */ + void *workSpace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNBackwardWeightsEx(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const cudnnRNNDataDescriptor_t xDesc, + const void *x, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnRNNDataDescriptor_t yDesc, + const void *y, + void *workSpace, + size_t workSpaceSizeInBytes, + const cudnnFilterDescriptor_t dwDesc, + void *dw, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +/* RNN FIND API */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNForwardTrainingAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnFindRNNForwardTrainingAlgorithmEx(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *xDesc, + const void *x, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t cxDesc, + const void *cx, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t *yDesc, + void *y, + const cudnnTensorDescriptor_t hyDesc, + void *hy, + const cudnnTensorDescriptor_t cyDesc, + void *cy, + const float findIntensity, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnAlgorithmPerformance_t *perfResults, + void *workspace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNBackwardDataAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnFindRNNBackwardDataAlgorithmEx(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *yDesc, + const void *y, + const cudnnTensorDescriptor_t *dyDesc, + const void *dy, + const cudnnTensorDescriptor_t dhyDesc, + const void *dhy, + const cudnnTensorDescriptor_t dcyDesc, + const void *dcy, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t cxDesc, + const void *cx, + const cudnnTensorDescriptor_t *dxDesc, + void *dx, + const cudnnTensorDescriptor_t dhxDesc, + void *dhx, + const cudnnTensorDescriptor_t dcxDesc, + void *dcx, + const float findIntensity, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnAlgorithmPerformance_t *perfResults, + void *workspace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNBackwardWeightsAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnFindRNNBackwardWeightsAlgorithmEx(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *xDesc, + const void *x, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t *yDesc, + const void *y, + const float findIntensity, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnAlgorithmPerformance_t *perfResults, + const void *workspace, + size_t workSpaceSizeInBytes, + const cudnnFilterDescriptor_t dwDesc, + void *dw, + const void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnMultiHeadAttnBackwardData(cudnnHandle_t handle, + const cudnnAttnDescriptor_t attnDesc, + const int loWinIdx[], + const int hiWinIdx[], + const int devSeqLengthsDQDO[], + const int devSeqLengthsDKDV[], + const cudnnSeqDataDescriptor_t doDesc, + const void *dout, + const cudnnSeqDataDescriptor_t dqDesc, + void *dqueries, + const void *queries, + const cudnnSeqDataDescriptor_t dkDesc, + void *dkeys, + const void *keys, + const cudnnSeqDataDescriptor_t dvDesc, + void *dvalues, + const void *values, + size_t weightSizeInBytes, + const void *weights, + size_t workSpaceSizeInBytes, + void *workSpace, + size_t reserveSpaceSizeInBytes, + void *reserveSpace); + +cudnnStatus_t CUDNNWINAPI +cudnnMultiHeadAttnBackwardWeights(cudnnHandle_t handle, + const cudnnAttnDescriptor_t attnDesc, + cudnnWgradMode_t addGrad, + const cudnnSeqDataDescriptor_t qDesc, + const void *queries, + const cudnnSeqDataDescriptor_t kDesc, + const void *keys, + const cudnnSeqDataDescriptor_t vDesc, + const void *values, + const cudnnSeqDataDescriptor_t doDesc, + const void *dout, + size_t weightSizeInBytes, + const void *weights, + void *dweights, + size_t workSpaceSizeInBytes, + void *workSpace, + size_t reserveSpaceSizeInBytes, + void *reserveSpace); + +/* +* CTC (Connectionist Temporal Classification) loss descriptor create/destory/set/get functions +*/ +/* Input normalization mode for loss function */ +typedef enum { + CUDNN_LOSS_NORMALIZATION_NONE = 0, + CUDNN_LOSS_NORMALIZATION_SOFTMAX = 1, +} cudnnLossNormalizationMode_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateCTCLossDescriptor(cudnnCTCLossDescriptor_t *ctcLossDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc, cudnnDataType_t compType); + +cudnnStatus_t CUDNNWINAPI +cudnnSetCTCLossDescriptorEx(cudnnCTCLossDescriptor_t ctcLossDesc, + cudnnDataType_t compType, + cudnnLossNormalizationMode_t normMode, + cudnnNanPropagation_t gradMode); + +cudnnStatus_t CUDNNWINAPI +cudnnSetCTCLossDescriptor_v8(cudnnCTCLossDescriptor_t ctcLossDesc, + cudnnDataType_t compType, + cudnnLossNormalizationMode_t normMode, + cudnnNanPropagation_t gradMode, + int maxLabelLength); + +cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc, cudnnDataType_t *compType); + +cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossDescriptorEx(cudnnCTCLossDescriptor_t ctcLossDesc, + cudnnDataType_t *compType, + cudnnLossNormalizationMode_t *normMode, + cudnnNanPropagation_t *gradMode); + +cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossDescriptor_v8(cudnnCTCLossDescriptor_t ctcLossDesc, + cudnnDataType_t *compType, + cudnnLossNormalizationMode_t *normMode, + cudnnNanPropagation_t *gradMode, + int *maxLabelLength); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc); + +/* return the ctc costs and gradients, given the probabilities and labels */ +cudnnStatus_t CUDNNWINAPI +cudnnCTCLoss( + cudnnHandle_t handle, + const cudnnTensorDescriptor_t + probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the timing steps, N is the + mini batch size, A is the alphabet size) */ + const void *probs, /* probabilities after softmax, in GPU memory */ + const int hostLabels[], /* labels, in CPU memory */ + const int hostLabelLengths[], /* the length of each label, in CPU memory */ + const int hostInputLengths[], /* the lengths of timing steps in each batch, in CPU memory */ + void *costs, /* the returned costs of CTC, in GPU memory */ + const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the dimensions are T,N,A */ + void *gradients, /* the returned CTC gradients, in GPU memory, to compute costs only, set it to NULL */ + cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */ + cudnnCTCLossDescriptor_t ctcLossDesc, + void *workspace, /* pointer to the workspace, in GPU memory */ + size_t workSpaceSizeInBytes); /* size of the workspace */ + +/* return the ctc costs and gradients, given the probabilities and labels */ +cudnnStatus_t CUDNNWINAPI +cudnnCTCLoss_v8( + cudnnHandle_t handle, + cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */ + cudnnCTCLossDescriptor_t ctcLossDesc, + const cudnnTensorDescriptor_t + probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the timing steps, N is the + mini batch size, A is the alphabet size) */ + const void *probs, /* probabilities after softmax, in GPU memory */ + const int labels[], /* labels, in GPU memory */ + const int labelLengths[], /* the length of each label, in GPU memory */ + const int inputLengths[], /* the lengths of timing steps in each batch, in GPU memory */ + void *costs, /* the returned costs of CTC, in GPU memory */ + const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the dimensions are T,N,A */ + void *gradients, /* the returned CTC gradients, in GPU memory, to compute costs only, set it to NULL */ + size_t workSpaceSizeInBytes, /* size of the workspace */ + void *workspace); /* pointer to the workspace, in GPU memory */ + +/* return the workspace size needed for ctc */ +cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossWorkspaceSize( + cudnnHandle_t handle, + const cudnnTensorDescriptor_t probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the + timing steps, N is the mini batch size, A is the alphabet size) */ + const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the + dimensions are T,N,A. To compute costs + only, set it to NULL */ + const int *labels, /* labels, in CPU memory */ + const int *labelLengths, /* the length of each label, in CPU memory */ + const int *inputLengths, /* the lengths of timing steps in each batch, in CPU memory */ + cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */ + cudnnCTCLossDescriptor_t ctcLossDesc, + size_t *sizeInBytes); /* pointer to the returned workspace size */ + +/* return the workspace size needed for ctc */ +cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossWorkspaceSize_v8( + cudnnHandle_t handle, + cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */ + cudnnCTCLossDescriptor_t ctcLossDesc, + const cudnnTensorDescriptor_t probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the + timing steps, N is the mini batch size, A is the alphabet size) */ + const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the + dimensions are T,N,A. To compute costs + only, set it to NULL */ + size_t *sizeInBytes); /* pointer to the returned workspace size */ + +/* + * \brief Cross-library version checker. + * This function is implemented differently in each sub-library. Each sublib + * checks whether its own version matches that of its dependencies. + * \returns CUDNN_STATUS_SUCCESS if the version check passes, + * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent. + */ +cudnnStatus_t CUDNNWINAPI +cudnnAdvTrainVersionCheck(void); + +#if defined(__cplusplus) +} +#endif + +#endif /* CUDNN_ADV_TRAIN_H_ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_backend.h b/venv/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_backend.h new file mode 100644 index 0000000000000000000000000000000000000000..b0f41de3b1e87286037ed7d0351057d93287d88f --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_backend.h @@ -0,0 +1,608 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CUDNN_BACKEND_H_ +#define _CUDNN_BACKEND_H_ + +/* + * The content in this header file is under development to be included in cudnn.h in the future + * Production code should have all include of this header file remove. + */ + +#include "cudnn_ops_infer.h" +#include "cudnn_cnn_infer.h" + +/* NOTE: definition in extern "C" to be copied later to public header */ +#if defined(__cplusplus) +extern "C" { +#endif + +typedef void *cudnnBackendDescriptor_t; + +typedef struct cudnnFractionStruct { + int64_t numerator; + int64_t denominator; +} cudnnFraction_t; + +typedef enum { + CUDNN_POINTWISE_ADD = 0, + CUDNN_POINTWISE_ADD_SQUARE = 5, + CUDNN_POINTWISE_DIV = 6, + CUDNN_POINTWISE_MAX = 3, + CUDNN_POINTWISE_MIN = 2, + CUDNN_POINTWISE_MOD = 7, + CUDNN_POINTWISE_MUL = 1, + CUDNN_POINTWISE_POW = 8, + CUDNN_POINTWISE_SUB = 9, + + CUDNN_POINTWISE_ABS = 10, + CUDNN_POINTWISE_CEIL = 11, + CUDNN_POINTWISE_COS = 12, + CUDNN_POINTWISE_EXP = 13, + CUDNN_POINTWISE_FLOOR = 14, + CUDNN_POINTWISE_LOG = 15, + CUDNN_POINTWISE_NEG = 16, + CUDNN_POINTWISE_RSQRT = 17, + CUDNN_POINTWISE_SIN = 18, + CUDNN_POINTWISE_SQRT = 4, + CUDNN_POINTWISE_TAN = 19, + CUDNN_POINTWISE_ERF = 20, + CUDNN_POINTWISE_IDENTITY = 21, + CUDNN_POINTWISE_RECIPROCAL = 22, + + CUDNN_POINTWISE_RELU_FWD = 100, + CUDNN_POINTWISE_TANH_FWD = 101, + CUDNN_POINTWISE_SIGMOID_FWD = 102, + CUDNN_POINTWISE_ELU_FWD = 103, + CUDNN_POINTWISE_GELU_FWD = 104, + CUDNN_POINTWISE_SOFTPLUS_FWD = 105, + CUDNN_POINTWISE_SWISH_FWD = 106, + CUDNN_POINTWISE_GELU_APPROX_TANH_FWD = 107, + + CUDNN_POINTWISE_RELU_BWD = 200, + CUDNN_POINTWISE_TANH_BWD = 201, + CUDNN_POINTWISE_SIGMOID_BWD = 202, + CUDNN_POINTWISE_ELU_BWD = 203, + CUDNN_POINTWISE_GELU_BWD = 204, + CUDNN_POINTWISE_SOFTPLUS_BWD = 205, + CUDNN_POINTWISE_SWISH_BWD = 206, + CUDNN_POINTWISE_GELU_APPROX_TANH_BWD = 207, + + CUDNN_POINTWISE_CMP_EQ = 300, + CUDNN_POINTWISE_CMP_NEQ = 301, + CUDNN_POINTWISE_CMP_GT = 302, + CUDNN_POINTWISE_CMP_GE = 303, + CUDNN_POINTWISE_CMP_LT = 304, + CUDNN_POINTWISE_CMP_LE = 305, + + CUDNN_POINTWISE_LOGICAL_AND = 400, + CUDNN_POINTWISE_LOGICAL_OR = 401, + CUDNN_POINTWISE_LOGICAL_NOT = 402, + + CUDNN_POINTWISE_GEN_INDEX = 501, + + CUDNN_POINTWISE_BINARY_SELECT = 601, +} cudnnPointwiseMode_t; + +typedef enum { + CUDNN_RESAMPLE_NEAREST = 0, + CUDNN_RESAMPLE_BILINEAR = 1, + CUDNN_RESAMPLE_AVGPOOL = 2, + CUDNN_RESAMPLE_AVGPOOL_INCLUDE_PADDING = 2, + CUDNN_RESAMPLE_AVGPOOL_EXCLUDE_PADDING = 4, + CUDNN_RESAMPLE_MAXPOOL = 3, +} cudnnResampleMode_t; + +typedef enum { + CUDNN_SIGNAL_SET = 0, + CUDNN_SIGNAL_WAIT = 1, +} cudnnSignalMode_t; + +typedef enum { + CUDNN_GENSTATS_SUM_SQSUM = 0, +} cudnnGenStatsMode_t; + +typedef enum { + CUDNN_BN_FINALIZE_STATISTICS_TRAINING = 0, + CUDNN_BN_FINALIZE_STATISTICS_INFERENCE = 1, +} cudnnBnFinalizeStatsMode_t; + +typedef enum { + CUDNN_RNG_DISTRIBUTION_BERNOULLI, + CUDNN_RNG_DISTRIBUTION_UNIFORM, + CUDNN_RNG_DISTRIBUTION_NORMAL, +} cudnnRngDistribution_t; + +typedef enum { + CUDNN_ATTR_POINTWISE_MODE = 0, + CUDNN_ATTR_POINTWISE_MATH_PREC = 1, + CUDNN_ATTR_POINTWISE_NAN_PROPAGATION = 2, + CUDNN_ATTR_POINTWISE_RELU_LOWER_CLIP = 3, + CUDNN_ATTR_POINTWISE_RELU_UPPER_CLIP = 4, + CUDNN_ATTR_POINTWISE_RELU_LOWER_CLIP_SLOPE = 5, + CUDNN_ATTR_POINTWISE_ELU_ALPHA = 6, + CUDNN_ATTR_POINTWISE_SOFTPLUS_BETA = 7, + CUDNN_ATTR_POINTWISE_SWISH_BETA = 8, + CUDNN_ATTR_POINTWISE_AXIS = 9, + + CUDNN_ATTR_CONVOLUTION_COMP_TYPE = 100, + CUDNN_ATTR_CONVOLUTION_CONV_MODE = 101, + CUDNN_ATTR_CONVOLUTION_DILATIONS = 102, + CUDNN_ATTR_CONVOLUTION_FILTER_STRIDES = 103, + CUDNN_ATTR_CONVOLUTION_POST_PADDINGS = 104, + CUDNN_ATTR_CONVOLUTION_PRE_PADDINGS = 105, + CUDNN_ATTR_CONVOLUTION_SPATIAL_DIMS = 106, + + CUDNN_ATTR_ENGINEHEUR_MODE = 200, + CUDNN_ATTR_ENGINEHEUR_OPERATION_GRAPH = 201, + CUDNN_ATTR_ENGINEHEUR_RESULTS = 202, + + CUDNN_ATTR_ENGINECFG_ENGINE = 300, + CUDNN_ATTR_ENGINECFG_INTERMEDIATE_INFO = 301, + CUDNN_ATTR_ENGINECFG_KNOB_CHOICES = 302, + + CUDNN_ATTR_EXECUTION_PLAN_HANDLE = 400, + CUDNN_ATTR_EXECUTION_PLAN_ENGINE_CONFIG = 401, + CUDNN_ATTR_EXECUTION_PLAN_WORKSPACE_SIZE = 402, + CUDNN_ATTR_EXECUTION_PLAN_COMPUTED_INTERMEDIATE_UIDS = 403, + CUDNN_ATTR_EXECUTION_PLAN_RUN_ONLY_INTERMEDIATE_UIDS = 404, + CUDNN_ATTR_EXECUTION_PLAN_JSON_REPRESENTATION = 405, + + CUDNN_ATTR_INTERMEDIATE_INFO_UNIQUE_ID = 500, + CUDNN_ATTR_INTERMEDIATE_INFO_SIZE = 501, + CUDNN_ATTR_INTERMEDIATE_INFO_DEPENDENT_DATA_UIDS = 502, + CUDNN_ATTR_INTERMEDIATE_INFO_DEPENDENT_ATTRIBUTES = 503, + + CUDNN_ATTR_KNOB_CHOICE_KNOB_TYPE = 600, + CUDNN_ATTR_KNOB_CHOICE_KNOB_VALUE = 601, + + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_ALPHA = 700, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_BETA = 701, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_CONV_DESC = 702, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_W = 703, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_X = 704, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_Y = 705, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_ALPHA = 706, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_BETA = 707, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_CONV_DESC = 708, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_W = 709, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_DX = 710, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_DY = 711, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_ALPHA = 712, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_BETA = 713, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_CONV_DESC = 714, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_DW = 715, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_X = 716, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_DY = 717, + + CUDNN_ATTR_OPERATION_POINTWISE_PW_DESCRIPTOR = 750, + CUDNN_ATTR_OPERATION_POINTWISE_XDESC = 751, + CUDNN_ATTR_OPERATION_POINTWISE_BDESC = 752, + CUDNN_ATTR_OPERATION_POINTWISE_YDESC = 753, + CUDNN_ATTR_OPERATION_POINTWISE_ALPHA1 = 754, + CUDNN_ATTR_OPERATION_POINTWISE_ALPHA2 = 755, + CUDNN_ATTR_OPERATION_POINTWISE_DXDESC = 756, + CUDNN_ATTR_OPERATION_POINTWISE_DYDESC = 757, + CUDNN_ATTR_OPERATION_POINTWISE_TDESC = 758, + + CUDNN_ATTR_OPERATION_GENSTATS_MODE = 770, + CUDNN_ATTR_OPERATION_GENSTATS_MATH_PREC = 771, + CUDNN_ATTR_OPERATION_GENSTATS_XDESC = 772, + CUDNN_ATTR_OPERATION_GENSTATS_SUMDESC = 773, + CUDNN_ATTR_OPERATION_GENSTATS_SQSUMDESC = 774, + + CUDNN_ATTR_OPERATION_BN_FINALIZE_STATS_MODE = 780, + CUDNN_ATTR_OPERATION_BN_FINALIZE_MATH_PREC = 781, + CUDNN_ATTR_OPERATION_BN_FINALIZE_Y_SUM_DESC = 782, + CUDNN_ATTR_OPERATION_BN_FINALIZE_Y_SQ_SUM_DESC = 783, + CUDNN_ATTR_OPERATION_BN_FINALIZE_SCALE_DESC = 784, + CUDNN_ATTR_OPERATION_BN_FINALIZE_BIAS_DESC = 785, + CUDNN_ATTR_OPERATION_BN_FINALIZE_PREV_RUNNING_MEAN_DESC = 786, + CUDNN_ATTR_OPERATION_BN_FINALIZE_PREV_RUNNING_VAR_DESC = 787, + CUDNN_ATTR_OPERATION_BN_FINALIZE_UPDATED_RUNNING_MEAN_DESC = 788, + CUDNN_ATTR_OPERATION_BN_FINALIZE_UPDATED_RUNNING_VAR_DESC = 789, + CUDNN_ATTR_OPERATION_BN_FINALIZE_SAVED_MEAN_DESC = 790, + CUDNN_ATTR_OPERATION_BN_FINALIZE_SAVED_INV_STD_DESC = 791, + CUDNN_ATTR_OPERATION_BN_FINALIZE_EQ_SCALE_DESC = 792, + CUDNN_ATTR_OPERATION_BN_FINALIZE_EQ_BIAS_DESC = 793, + CUDNN_ATTR_OPERATION_BN_FINALIZE_ACCUM_COUNT_DESC = 794, + CUDNN_ATTR_OPERATION_BN_FINALIZE_EPSILON_DESC = 795, + CUDNN_ATTR_OPERATION_BN_FINALIZE_EXP_AVERATE_FACTOR_DESC = 796, + + CUDNN_ATTR_OPERATIONGRAPH_HANDLE = 800, + CUDNN_ATTR_OPERATIONGRAPH_OPS = 801, + CUDNN_ATTR_OPERATIONGRAPH_ENGINE_GLOBAL_COUNT = 802, + + CUDNN_ATTR_TENSOR_BYTE_ALIGNMENT = 900, + CUDNN_ATTR_TENSOR_DATA_TYPE = 901, + CUDNN_ATTR_TENSOR_DIMENSIONS = 902, + CUDNN_ATTR_TENSOR_STRIDES = 903, + CUDNN_ATTR_TENSOR_VECTOR_COUNT = 904, + CUDNN_ATTR_TENSOR_VECTORIZED_DIMENSION = 905, + CUDNN_ATTR_TENSOR_UNIQUE_ID = 906, + CUDNN_ATTR_TENSOR_IS_VIRTUAL = 907, + CUDNN_ATTR_TENSOR_IS_BY_VALUE = 908, + CUDNN_ATTR_TENSOR_REORDERING_MODE = 909, + CUDNN_ATTR_TENSOR_RAGGED_OFFSET_DESC = 913, + + CUDNN_ATTR_VARIANT_PACK_UNIQUE_IDS = 1000, + CUDNN_ATTR_VARIANT_PACK_DATA_POINTERS = 1001, + CUDNN_ATTR_VARIANT_PACK_INTERMEDIATES = 1002, + CUDNN_ATTR_VARIANT_PACK_WORKSPACE = 1003, + + CUDNN_ATTR_LAYOUT_INFO_TENSOR_UID = 1100, + CUDNN_ATTR_LAYOUT_INFO_TYPES = 1101, + + CUDNN_ATTR_KNOB_INFO_TYPE = 1200, + CUDNN_ATTR_KNOB_INFO_MAXIMUM_VALUE = 1201, + CUDNN_ATTR_KNOB_INFO_MINIMUM_VALUE = 1202, + CUDNN_ATTR_KNOB_INFO_STRIDE = 1203, + + CUDNN_ATTR_ENGINE_OPERATION_GRAPH = 1300, + CUDNN_ATTR_ENGINE_GLOBAL_INDEX = 1301, + CUDNN_ATTR_ENGINE_KNOB_INFO = 1302, + CUDNN_ATTR_ENGINE_NUMERICAL_NOTE = 1303, + CUDNN_ATTR_ENGINE_LAYOUT_INFO = 1304, + CUDNN_ATTR_ENGINE_BEHAVIOR_NOTE = 1305, + + CUDNN_ATTR_MATMUL_COMP_TYPE = 1500, + CUDNN_ATTR_MATMUL_PADDING_VALUE = 1503, + + CUDNN_ATTR_OPERATION_MATMUL_ADESC = 1520, + CUDNN_ATTR_OPERATION_MATMUL_BDESC = 1521, + CUDNN_ATTR_OPERATION_MATMUL_CDESC = 1522, + CUDNN_ATTR_OPERATION_MATMUL_DESC = 1523, + CUDNN_ATTR_OPERATION_MATMUL_IRREGULARLY_STRIDED_BATCH_COUNT = 1524, + CUDNN_ATTR_OPERATION_MATMUL_GEMM_M_OVERRIDE_DESC = 1525, + CUDNN_ATTR_OPERATION_MATMUL_GEMM_N_OVERRIDE_DESC = 1526, + CUDNN_ATTR_OPERATION_MATMUL_GEMM_K_OVERRIDE_DESC = 1527, + + CUDNN_ATTR_REDUCTION_OPERATOR = 1600, + CUDNN_ATTR_REDUCTION_COMP_TYPE = 1601, + + CUDNN_ATTR_OPERATION_REDUCTION_XDESC = 1610, + CUDNN_ATTR_OPERATION_REDUCTION_YDESC = 1611, + CUDNN_ATTR_OPERATION_REDUCTION_DESC = 1612, + + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_MATH_PREC = 1620, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_MEAN_DESC = 1621, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_INVSTD_DESC = 1622, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_BN_SCALE_DESC = 1623, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_X_DESC = 1624, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DY_DESC = 1625, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DBN_SCALE_DESC = 1626, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DBN_BIAS_DESC = 1627, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_DY_SCALE_DESC = 1628, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_X_SCALE_DESC = 1629, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_BIAS = 1630, + + CUDNN_ATTR_RESAMPLE_MODE = 1700, + CUDNN_ATTR_RESAMPLE_COMP_TYPE = 1701, + CUDNN_ATTR_RESAMPLE_SPATIAL_DIMS = 1702, + CUDNN_ATTR_RESAMPLE_POST_PADDINGS = 1703, + CUDNN_ATTR_RESAMPLE_PRE_PADDINGS = 1704, + CUDNN_ATTR_RESAMPLE_STRIDES = 1705, + CUDNN_ATTR_RESAMPLE_WINDOW_DIMS = 1706, + CUDNN_ATTR_RESAMPLE_NAN_PROPAGATION = 1707, + CUDNN_ATTR_RESAMPLE_PADDING_MODE = 1708, + + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_XDESC = 1710, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_YDESC = 1711, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_IDXDESC = 1712, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_ALPHA = 1713, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_BETA = 1714, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_DESC = 1716, + + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DXDESC = 1720, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DYDESC = 1721, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_IDXDESC = 1722, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_ALPHA = 1723, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_BETA = 1724, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DESC = 1725, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_XDESC = 1726, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_YDESC = 1727, + + CUDNN_ATTR_OPERATION_CONCAT_AXIS = 1800, + CUDNN_ATTR_OPERATION_CONCAT_INPUT_DESCS = 1801, + CUDNN_ATTR_OPERATION_CONCAT_INPLACE_INDEX = 1802, + CUDNN_ATTR_OPERATION_CONCAT_OUTPUT_DESC = 1803, + + CUDNN_ATTR_OPERATION_SIGNAL_MODE = 1900, + CUDNN_ATTR_OPERATION_SIGNAL_FLAGDESC = 1901, + CUDNN_ATTR_OPERATION_SIGNAL_VALUE = 1902, + CUDNN_ATTR_OPERATION_SIGNAL_XDESC = 1903, + CUDNN_ATTR_OPERATION_SIGNAL_YDESC = 1904, + + CUDNN_ATTR_OPERATION_NORM_FWD_MODE = 2000, + CUDNN_ATTR_OPERATION_NORM_FWD_PHASE = 2001, + CUDNN_ATTR_OPERATION_NORM_FWD_XDESC = 2002, + CUDNN_ATTR_OPERATION_NORM_FWD_MEAN_DESC = 2003, + CUDNN_ATTR_OPERATION_NORM_FWD_INV_VARIANCE_DESC = 2004, + CUDNN_ATTR_OPERATION_NORM_FWD_SCALE_DESC = 2005, + CUDNN_ATTR_OPERATION_NORM_FWD_BIAS_DESC = 2006, + CUDNN_ATTR_OPERATION_NORM_FWD_EPSILON_DESC = 2007, + CUDNN_ATTR_OPERATION_NORM_FWD_EXP_AVG_FACTOR_DESC = 2008, + CUDNN_ATTR_OPERATION_NORM_FWD_INPUT_RUNNING_MEAN_DESC = 2009, + CUDNN_ATTR_OPERATION_NORM_FWD_INPUT_RUNNING_VAR_DESC = 2010, + CUDNN_ATTR_OPERATION_NORM_FWD_OUTPUT_RUNNING_MEAN_DESC = 2011, + CUDNN_ATTR_OPERATION_NORM_FWD_OUTPUT_RUNNING_VAR_DESC = 2012, + CUDNN_ATTR_OPERATION_NORM_FWD_YDESC = 2013, + CUDNN_ATTR_OPERATION_NORM_FWD_PEER_STAT_DESCS = 2014, + + CUDNN_ATTR_OPERATION_NORM_BWD_MODE = 2100, + CUDNN_ATTR_OPERATION_NORM_BWD_XDESC = 2101, + CUDNN_ATTR_OPERATION_NORM_BWD_MEAN_DESC = 2102, + CUDNN_ATTR_OPERATION_NORM_BWD_INV_VARIANCE_DESC = 2103, + CUDNN_ATTR_OPERATION_NORM_BWD_DYDESC = 2104, + CUDNN_ATTR_OPERATION_NORM_BWD_SCALE_DESC = 2105, + CUDNN_ATTR_OPERATION_NORM_BWD_EPSILON_DESC = 2106, + CUDNN_ATTR_OPERATION_NORM_BWD_DSCALE_DESC = 2107, + CUDNN_ATTR_OPERATION_NORM_BWD_DBIAS_DESC = 2108, + CUDNN_ATTR_OPERATION_NORM_BWD_DXDESC = 2109, + CUDNN_ATTR_OPERATION_NORM_BWD_PEER_STAT_DESCS = 2110, + + CUDNN_ATTR_OPERATION_RESHAPE_XDESC = 2200, + CUDNN_ATTR_OPERATION_RESHAPE_YDESC = 2201, + + CUDNN_ATTR_RNG_DISTRIBUTION = 2300, + CUDNN_ATTR_RNG_NORMAL_DIST_MEAN = 2301, + CUDNN_ATTR_RNG_NORMAL_DIST_STANDARD_DEVIATION = 2302, + CUDNN_ATTR_RNG_UNIFORM_DIST_MAXIMUM = 2303, + CUDNN_ATTR_RNG_UNIFORM_DIST_MINIMUM = 2304, + CUDNN_ATTR_RNG_BERNOULLI_DIST_PROBABILITY = 2305, + + CUDNN_ATTR_OPERATION_RNG_YDESC = 2310, + CUDNN_ATTR_OPERATION_RNG_SEED = 2311, + CUDNN_ATTR_OPERATION_RNG_DESC = 2312, + CUDNN_ATTR_OPERATION_RNG_OFFSET_DESC = 2313, + +} cudnnBackendAttributeName_t; + +typedef enum { + CUDNN_TYPE_HANDLE = 0, + CUDNN_TYPE_DATA_TYPE, + CUDNN_TYPE_BOOLEAN, + CUDNN_TYPE_INT64, + CUDNN_TYPE_FLOAT, + CUDNN_TYPE_DOUBLE, + CUDNN_TYPE_VOID_PTR, + CUDNN_TYPE_CONVOLUTION_MODE, + CUDNN_TYPE_HEUR_MODE, + CUDNN_TYPE_KNOB_TYPE, + CUDNN_TYPE_NAN_PROPOGATION, + CUDNN_TYPE_NUMERICAL_NOTE, + CUDNN_TYPE_LAYOUT_TYPE, + CUDNN_TYPE_ATTRIB_NAME, + CUDNN_TYPE_POINTWISE_MODE, + CUDNN_TYPE_BACKEND_DESCRIPTOR, + CUDNN_TYPE_GENSTATS_MODE, + CUDNN_TYPE_BN_FINALIZE_STATS_MODE, + CUDNN_TYPE_REDUCTION_OPERATOR_TYPE, + CUDNN_TYPE_BEHAVIOR_NOTE, + CUDNN_TYPE_TENSOR_REORDERING_MODE, + CUDNN_TYPE_RESAMPLE_MODE, + CUDNN_TYPE_PADDING_MODE, + CUDNN_TYPE_INT32, + CUDNN_TYPE_CHAR, + CUDNN_TYPE_SIGNAL_MODE, + CUDNN_TYPE_FRACTION, + CUDNN_TYPE_NORM_MODE, + CUDNN_TYPE_NORM_FWD_PHASE, + CUDNN_TYPE_RNG_DISTRIBUTION +} cudnnBackendAttributeType_t; + +typedef enum { + CUDNN_BACKEND_POINTWISE_DESCRIPTOR = 0, + CUDNN_BACKEND_CONVOLUTION_DESCRIPTOR, + CUDNN_BACKEND_ENGINE_DESCRIPTOR, + CUDNN_BACKEND_ENGINECFG_DESCRIPTOR, + CUDNN_BACKEND_ENGINEHEUR_DESCRIPTOR, + CUDNN_BACKEND_EXECUTION_PLAN_DESCRIPTOR, + CUDNN_BACKEND_INTERMEDIATE_INFO_DESCRIPTOR, + CUDNN_BACKEND_KNOB_CHOICE_DESCRIPTOR, + CUDNN_BACKEND_KNOB_INFO_DESCRIPTOR, + CUDNN_BACKEND_LAYOUT_INFO_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_CONVOLUTION_FORWARD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_FILTER_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_DATA_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_POINTWISE_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_GEN_STATS_DESCRIPTOR, + CUDNN_BACKEND_OPERATIONGRAPH_DESCRIPTOR, + CUDNN_BACKEND_VARIANT_PACK_DESCRIPTOR, + CUDNN_BACKEND_TENSOR_DESCRIPTOR, + CUDNN_BACKEND_MATMUL_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_MATMUL_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_BN_FINALIZE_STATISTICS_DESCRIPTOR, + CUDNN_BACKEND_REDUCTION_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_REDUCTION_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_BN_BWD_WEIGHTS_DESCRIPTOR, + CUDNN_BACKEND_RESAMPLE_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_RESAMPLE_FWD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_RESAMPLE_BWD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_CONCAT_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_SIGNAL_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_NORM_FORWARD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_NORM_BACKWARD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_RESHAPE_DESCRIPTOR, + CUDNN_BACKEND_RNG_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_RNG_DESCRIPTOR +} cudnnBackendDescriptorType_t; + +typedef enum { + CUDNN_NUMERICAL_NOTE_TENSOR_CORE = 0, + CUDNN_NUMERICAL_NOTE_DOWN_CONVERT_INPUTS, + CUDNN_NUMERICAL_NOTE_REDUCED_PRECISION_REDUCTION, + CUDNN_NUMERICAL_NOTE_FFT, + CUDNN_NUMERICAL_NOTE_NONDETERMINISTIC, + CUDNN_NUMERICAL_NOTE_WINOGRAD, + CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_4x4, + CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_6x6, + CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_13x13, + CUDNN_NUMERICAL_NOTE_TYPE_COUNT, +} cudnnBackendNumericalNote_t; + +typedef enum { + CUDNN_BEHAVIOR_NOTE_RUNTIME_COMPILATION = 0, + CUDNN_BEHAVIOR_NOTE_REQUIRES_FILTER_INT8x32_REORDER = 1, + CUDNN_BEHAVIOR_NOTE_REQUIRES_BIAS_INT8x32_REORDER = 2, + CUDNN_BEHAVIOR_NOTE_TYPE_COUNT, +} cudnnBackendBehaviorNote_t; + +typedef enum { + CUDNN_KNOB_TYPE_SPLIT_K = 0, + CUDNN_KNOB_TYPE_SWIZZLE = 1, + CUDNN_KNOB_TYPE_TILE_SIZE = 2, + CUDNN_KNOB_TYPE_USE_TEX = 3, + CUDNN_KNOB_TYPE_EDGE = 4, + CUDNN_KNOB_TYPE_KBLOCK = 5, + CUDNN_KNOB_TYPE_LDGA = 6, + CUDNN_KNOB_TYPE_LDGB = 7, + CUDNN_KNOB_TYPE_CHUNK_K = 8, + CUDNN_KNOB_TYPE_SPLIT_H = 9, + CUDNN_KNOB_TYPE_WINO_TILE = 10, + CUDNN_KNOB_TYPE_MULTIPLY = 11, + CUDNN_KNOB_TYPE_SPLIT_K_BUF = 12, + CUDNN_KNOB_TYPE_TILEK = 13, + CUDNN_KNOB_TYPE_STAGES = 14, + CUDNN_KNOB_TYPE_REDUCTION_MODE = 15, + CUDNN_KNOB_TYPE_CTA_SPLIT_K_MODE = 16, + CUDNN_KNOB_TYPE_SPLIT_K_SLC = 17, + CUDNN_KNOB_TYPE_IDX_MODE = 18, + CUDNN_KNOB_TYPE_SLICED = 19, + CUDNN_KNOB_TYPE_SPLIT_RS = 20, + CUDNN_KNOB_TYPE_SINGLEBUFFER = 21, + CUDNN_KNOB_TYPE_LDGC = 22, + CUDNN_KNOB_TYPE_SPECFILT = 23, + CUDNN_KNOB_TYPE_KERNEL_CFG = 24, + CUDNN_KNOB_TYPE_WORKSPACE = 25, + CUDNN_KNOB_TYPE_TILE_CGA = 26, + CUDNN_KNOB_TYPE_TILE_CGA_M = 27, + CUDNN_KNOB_TYPE_TILE_CGA_N = 28, + CUDNN_KNOB_TYPE_BLOCK_SIZE = 29, + CUDNN_KNOB_TYPE_OCCUPANCY = 30, + CUDNN_KNOB_TYPE_ARRAY_SIZE_PER_THREAD = 31, + CUDNN_KNOB_TYPE_NUM_C_PER_BLOCK = 32, + CUDNN_KNOB_TYPE_COUNTS, +} cudnnBackendKnobType_t; + +typedef enum { + CUDNN_LAYOUT_TYPE_PREFERRED_NCHW = 0, + CUDNN_LAYOUT_TYPE_PREFERRED_NHWC = 1, + CUDNN_LAYOUT_TYPE_PREFERRED_PAD4CK = 2, + CUDNN_LAYOUT_TYPE_PREFERRED_PAD8CK = 3, + CUDNN_LAYOUT_TYPE_COUNT = 4, +} cudnnBackendLayoutType_t; + +typedef enum { + CUDNN_HEUR_MODE_INSTANT = 0, + CUDNN_HEUR_MODE_B = 1, + CUDNN_HEUR_MODE_FALLBACK = 2, + CUDNN_HEUR_MODE_A = 3, + CUDNN_HEUR_MODES_COUNT = 4, +} cudnnBackendHeurMode_t; + +typedef enum { + CUDNN_TENSOR_REORDERING_NONE = 0, + CUDNN_TENSOR_REORDERING_INT8x32 = 1, + CUDNN_TENSOR_REORDERING_F16x16 = 2, +} cudnnBackendTensorReordering_t; + +typedef enum { + CUDNN_ZERO_PAD = 0, + CUDNN_NEG_INF_PAD = 1, + CUDNN_EDGE_VAL_PAD = 2, +} cudnnPaddingMode_t; + +typedef enum { + CUDNN_LAYER_NORM = 0, + CUDNN_INSTANCE_NORM = 1, + CUDNN_BATCH_NORM = 2, + CUDNN_GROUP_NORM = 3, +} cudnnBackendNormMode_t; + +typedef enum { + CUDNN_NORM_FWD_INFERENCE = 0, + CUDNN_NORM_FWD_TRAINING = 1, +} cudnnBackendNormFwdPhase_t; + +cudnnStatus_t CUDNNWINAPI +cudnnBackendCreateDescriptor(cudnnBackendDescriptorType_t descriptorType, cudnnBackendDescriptor_t *descriptor); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendDestroyDescriptor(cudnnBackendDescriptor_t descriptor); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendInitialize(cudnnBackendDescriptor_t descriptor); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendFinalize(cudnnBackendDescriptor_t descriptor); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendSetAttribute(cudnnBackendDescriptor_t descriptor, + cudnnBackendAttributeName_t attributeName, + cudnnBackendAttributeType_t attributeType, + int64_t elementCount, + const void *arrayOfElements); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendGetAttribute(cudnnBackendDescriptor_t const descriptor, + cudnnBackendAttributeName_t attributeName, + cudnnBackendAttributeType_t attributeType, + int64_t requestedElementCount, + int64_t *elementCount, + void *arrayOfElements); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendExecute(cudnnHandle_t handle, cudnnBackendDescriptor_t executionPlan, cudnnBackendDescriptor_t variantPack); + +#if defined(__cplusplus) +} +#endif + +#endif /* _CUDNN_BACKEND_H_ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_infer.h b/venv/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_infer.h new file mode 100644 index 0000000000000000000000000000000000000000..5e4c91c93bdc0b5e69d9d6326b4e7384e35a8ca6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_infer.h @@ -0,0 +1,571 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * cudnn_cnn_infer : cuDNN's basic definitions and inference CNN functions. + */ + +#if !defined(CUDNN_CNN_INFER_H_) +#define CUDNN_CNN_INFER_H_ + +#pragma once +#include +#include + +#include "cudnn_version.h" +#include "cudnn_ops_infer.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_CNN_INFER_MAJOR 8 +#define CUDNN_CNN_INFER_MINOR 9 +#define CUDNN_CNN_INFER_PATCH 2 + +#if (CUDNN_CNN_INFER_MAJOR != CUDNN_MAJOR) || (CUDNN_CNN_INFER_MINOR != CUDNN_MINOR) || \ + (CUDNN_CNN_INFER_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN CNN INFER!!! +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +typedef struct cudnnConvolutionStruct *cudnnConvolutionDescriptor_t; + +/* + * convolution mode + */ +typedef enum { CUDNN_CONVOLUTION = 0, CUDNN_CROSS_CORRELATION = 1 } cudnnConvolutionMode_t; + +/* + * CUDNN Reorder + */ +typedef enum { + CUDNN_DEFAULT_REORDER = 0, + CUDNN_NO_REORDER = 1, +} cudnnReorderType_t; + +typedef struct cudnnConvolutionFwdAlgoPerfStruct { + cudnnConvolutionFwdAlgo_t algo; + cudnnStatus_t status; + float time; + size_t memory; + cudnnDeterminism_t determinism; + cudnnMathType_t mathType; + int reserved[3]; +} cudnnConvolutionFwdAlgoPerf_t; + +/* Create an instance of convolution descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateConvolutionDescriptor(cudnnConvolutionDescriptor_t *convDesc); + +/* Destroy an instance of convolution descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnDestroyConvolutionDescriptor(cudnnConvolutionDescriptor_t convDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetConvolutionMathType(cudnnConvolutionDescriptor_t convDesc, cudnnMathType_t mathType); + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionMathType(cudnnConvolutionDescriptor_t convDesc, cudnnMathType_t *mathType); + +cudnnStatus_t CUDNNWINAPI +cudnnSetConvolutionGroupCount(cudnnConvolutionDescriptor_t convDesc, int groupCount); + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionGroupCount(cudnnConvolutionDescriptor_t convDesc, int *groupCount); + +cudnnStatus_t CUDNNWINAPI +cudnnSetConvolutionReorderType(cudnnConvolutionDescriptor_t convDesc, cudnnReorderType_t reorderType); + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionReorderType(cudnnConvolutionDescriptor_t convDesc, cudnnReorderType_t *reorderType); + +cudnnStatus_t CUDNNWINAPI +cudnnSetConvolution2dDescriptor(cudnnConvolutionDescriptor_t convDesc, + int pad_h, /* zero-padding height */ + int pad_w, /* zero-padding width */ + int u, /* vertical filter stride */ + int v, /* horizontal filter stride */ + int dilation_h, /* filter dilation in the vertical dimension */ + int dilation_w, /* filter dilation in the horizontal dimension */ + cudnnConvolutionMode_t mode, + cudnnDataType_t computeType); + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolution2dDescriptor(const cudnnConvolutionDescriptor_t convDesc, + int *pad_h, /* zero-padding height */ + int *pad_w, /* zero-padding width */ + int *u, /* vertical filter stride */ + int *v, /* horizontal filter stride */ + int *dilation_h, /* filter dilation in the vertical dimension */ + int *dilation_w, /* filter dilation in the horizontal dimension */ + cudnnConvolutionMode_t *mode, + cudnnDataType_t *computeType); + +cudnnStatus_t CUDNNWINAPI +cudnnSetConvolutionNdDescriptor(cudnnConvolutionDescriptor_t convDesc, + int arrayLength, /* nbDims-2 size */ + const int padA[], + const int filterStrideA[], + const int dilationA[], + cudnnConvolutionMode_t mode, + cudnnDataType_t computeType); /* convolution data type */ + +/* Helper function to return the dimensions of the output tensor given a convolution descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionNdDescriptor(const cudnnConvolutionDescriptor_t convDesc, + int arrayLengthRequested, + int *arrayLength, + int padA[], + int strideA[], + int dilationA[], + cudnnConvolutionMode_t *mode, + cudnnDataType_t *computeType); /* convolution data type */ + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolution2dForwardOutputDim(const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t inputTensorDesc, + const cudnnFilterDescriptor_t filterDesc, + int *n, + int *c, + int *h, + int *w); + +/* Helper function to return the dimensions of the output tensor given a convolution descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionNdForwardOutputDim(const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t inputTensorDesc, + const cudnnFilterDescriptor_t filterDesc, + int nbDims, + int tensorOuputDimA[]); + +/* helper function to provide the convolution forward algo that fit best the requirement */ +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionForwardAlgorithmMaxCount(cudnnHandle_t handle, int *count); + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionForwardAlgorithm_v7(cudnnHandle_t handle, + const cudnnTensorDescriptor_t srcDesc, + const cudnnFilterDescriptor_t filterDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t destDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionFwdAlgoPerf_t *perfResults); + +cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionForwardAlgorithm(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const cudnnFilterDescriptor_t wDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t yDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionFwdAlgoPerf_t *perfResults); + +cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionForwardAlgorithmEx(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t yDesc, + void *y, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionFwdAlgoPerf_t *perfResults, + void *workSpace, + size_t workSpaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnIm2Col(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnFilterDescriptor_t wDesc, + const cudnnConvolutionDescriptor_t convDesc, + void *colBuffer); + +cudnnStatus_t CUDNNWINAPI +cudnnReorderFilterAndBias(cudnnHandle_t handle, + const cudnnFilterDescriptor_t filterDesc, + cudnnReorderType_t reorderType, + const void *filterData, + void *reorderedFilterData, + int reorderBias, + const void *biasData, + void *reorderedBiasData); + +/* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/ +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionForwardWorkspaceSize(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const cudnnFilterDescriptor_t wDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t yDesc, + cudnnConvolutionFwdAlgo_t algo, + size_t *sizeInBytes); + +/* Convolution functions: All of the form "output = alpha * Op(inputs) + beta * output" */ + +/* Function to perform the forward pass for batch convolution */ +cudnnStatus_t CUDNNWINAPI +cudnnConvolutionForward(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnConvolutionDescriptor_t convDesc, + cudnnConvolutionFwdAlgo_t algo, + void *workSpace, + size_t workSpaceSizeInBytes, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* Fused conv/bias/activation operation : y = Act( alpha1 * conv(x) + alpha2 * z + bias ) */ +cudnnStatus_t CUDNNWINAPI +cudnnConvolutionBiasActivationForward(cudnnHandle_t handle, + const void *alpha1, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnConvolutionDescriptor_t convDesc, + cudnnConvolutionFwdAlgo_t algo, + void *workSpace, + size_t workSpaceSizeInBytes, + const void *alpha2, + const cudnnTensorDescriptor_t zDesc, + const void *z, + const cudnnTensorDescriptor_t biasDesc, + const void *bias, + const cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* helper function to provide the convolution backward data algo that fit best the requirement */ + +typedef struct cudnnConvolutionBwdDataAlgoPerfStruct { + cudnnConvolutionBwdDataAlgo_t algo; + cudnnStatus_t status; + float time; + size_t memory; + cudnnDeterminism_t determinism; + cudnnMathType_t mathType; + int reserved[3]; +} cudnnConvolutionBwdDataAlgoPerf_t; + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardDataAlgorithmMaxCount(cudnnHandle_t handle, int *count); + +cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionBackwardDataAlgorithm(cudnnHandle_t handle, + const cudnnFilterDescriptor_t wDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t dxDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdDataAlgoPerf_t *perfResults); + +cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionBackwardDataAlgorithmEx(cudnnHandle_t handle, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t dxDesc, + void *dx, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdDataAlgoPerf_t *perfResults, + void *workSpace, + size_t workSpaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardDataAlgorithm_v7(cudnnHandle_t handle, + const cudnnFilterDescriptor_t filterDesc, + const cudnnTensorDescriptor_t diffDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t gradDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdDataAlgoPerf_t *perfResults); + +/* + * convolution algorithm (which requires potentially some workspace) + */ + +/* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/ +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardDataWorkspaceSize(cudnnHandle_t handle, + const cudnnFilterDescriptor_t wDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t dxDesc, + cudnnConvolutionBwdDataAlgo_t algo, + size_t *sizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnConvolutionBackwardData(cudnnHandle_t handle, + const void *alpha, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnConvolutionDescriptor_t convDesc, + cudnnConvolutionBwdDataAlgo_t algo, + void *workSpace, + size_t workSpaceSizeInBytes, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx); + +/* Helper function to calculate folding descriptors for dgrad */ +cudnnStatus_t CUDNNWINAPI +cudnnGetFoldedConvBackwardDataDescriptors(const cudnnHandle_t handle, + const cudnnFilterDescriptor_t filterDesc, + const cudnnTensorDescriptor_t diffDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t gradDesc, + const cudnnTensorFormat_t transformFormat, + cudnnFilterDescriptor_t foldedFilterDesc, + cudnnTensorDescriptor_t paddedDiffDesc, + cudnnConvolutionDescriptor_t foldedConvDesc, + cudnnTensorDescriptor_t foldedGradDesc, + cudnnTensorTransformDescriptor_t filterFoldTransDesc, + cudnnTensorTransformDescriptor_t diffPadTransDesc, + cudnnTensorTransformDescriptor_t gradFoldTransDesc, + cudnnTensorTransformDescriptor_t gradUnfoldTransDesc); + +/* cudnnFusedOps... */ +struct cudnnFusedOpsConstParamStruct; +typedef struct cudnnFusedOpsConstParamStruct *cudnnFusedOpsConstParamPack_t; + +struct cudnnFusedOpsVariantParamStruct; +typedef struct cudnnFusedOpsVariantParamStruct *cudnnFusedOpsVariantParamPack_t; + +struct cudnnFusedOpsPlanStruct; +typedef struct cudnnFusedOpsPlanStruct *cudnnFusedOpsPlan_t; + +typedef enum { + /* each op in [ ] can be disabled by passing NULL ptr */ + /* [per channel scale], [per channel bias], [activation], convolution, [generate BN stats] */ + CUDNN_FUSED_SCALE_BIAS_ACTIVATION_CONV_BNSTATS = 0, + /* [per channel scale], [per channel bias], [activation], convolutionBackwardWeights */ + CUDNN_FUSED_SCALE_BIAS_ACTIVATION_WGRAD = 1, + /* utility for BN training in BN-conv fusion */ + /* computes the equivalent scale and bias from ySum ySqSum and learned scale, bias */ + /* optionally update running stats and generate saved stats */ + CUDNN_FUSED_BN_FINALIZE_STATISTICS_TRAINING = 2, + /* utility for BN inference in BN-conv fusion */ + /* computes the equivalent scale and bias from learned running stats and learned scale, bias */ + CUDNN_FUSED_BN_FINALIZE_STATISTICS_INFERENCE = 3, + /* reserved for future use: convolution, [per channel scale], [per channel bias], [residual add], [activation] */ + CUDNN_FUSED_CONV_SCALE_BIAS_ADD_ACTIVATION = 4, + /* reserved for future use: [per channel scale], [per channel bias], [residual add], activation, bitmask */ + CUDNN_FUSED_SCALE_BIAS_ADD_ACTIVATION_GEN_BITMASK = 5, + /* reserved for future use */ + CUDNN_FUSED_DACTIVATION_FORK_DBATCHNORM = 6, +} cudnnFusedOps_t; + +typedef enum { + /* set XDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get XDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_XDESC = 0, + /* set/get XDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_XDATA_PLACEHOLDER = 1, + /* set/get BN_MODE: pass cudnnBatchNormMode_t* */ + CUDNN_PARAM_BN_MODE = 2, + /* set CUDNN_PARAM_BN_EQSCALEBIAS_DESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get CUDNN_PARAM_BN_EQSCALEBIAS_DESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_BN_EQSCALEBIAS_DESC = 3, + /* set/get BN_EQSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_EQSCALE_PLACEHOLDER = 4, + /* set/get BN_EQBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_EQBIAS_PLACEHOLDER = 5, + /* set ACTIVATION_DESC: pass previously initialized cudnnActivationDescriptor_t */ + /* get ACTIVATION_DESC: pass previously created cudnnActivationDescriptor_t */ + CUDNN_PARAM_ACTIVATION_DESC = 6, + /* set CONV_DESC: pass previously initialized cudnnConvolutionDescriptor_t */ + /* get CONV_DESC: pass previously created cudnnConvolutionDescriptor_t */ + CUDNN_PARAM_CONV_DESC = 7, + /* set WDESC: pass previously initialized cudnnFilterDescriptor_t */ + /* get WDESC: pass previously created cudnnFilterDescriptor_t */ + CUDNN_PARAM_WDESC = 8, + /* set/get WDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_WDATA_PLACEHOLDER = 9, + /* set DWDESC: pass previously initialized cudnnFilterDescriptor_t */ + /* get DWDESC: pass previously created cudnnFilterDescriptor_t */ + CUDNN_PARAM_DWDESC = 10, + /* set/get DWDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_DWDATA_PLACEHOLDER = 11, + /* set YDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get YDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_YDESC = 12, + /* set/get YDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_YDATA_PLACEHOLDER = 13, + /* set DYDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get DYDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_DYDESC = 14, + /* set/get DYDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_DYDATA_PLACEHOLDER = 15, + /* set YSTATS_DESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get YSTATS_DESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_YSTATS_DESC = 16, + /* set/get YSUM_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_YSUM_PLACEHOLDER = 17, + /* set/get YSQSUM_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_YSQSUM_PLACEHOLDER = 18, + /* set CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC = 19, + /* set/get CUDNN_PARAM_BN_SCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_SCALE_PLACEHOLDER = 20, + /* set/get CUDNN_PARAM_BN_BIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_BIAS_PLACEHOLDER = 21, + /* set/get CUDNN_PARAM_BN_SAVED_MEAN_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_SAVED_MEAN_PLACEHOLDER = 22, + /* set/get CUDNN_PARAM_BN_SAVED_INVSTD_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_SAVED_INVSTD_PLACEHOLDER = 23, + /* set/get CUDNN_PARAM_BN_RUNNING_MEAN_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_RUNNING_MEAN_PLACEHOLDER = 24, + /* set/get CUDNN_PARAM_BN_RUNNING_VAR_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_RUNNING_VAR_PLACEHOLDER = 25, + + /* set ZDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get ZDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_ZDESC = 26, + /* set/get ZDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_ZDATA_PLACEHOLDER = 27, + /* set BN_Z_EQSCALEBIAS_DESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get BN_Z_EQSCALEBIAS_DESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_BN_Z_EQSCALEBIAS_DESC = 28, + /* set/get BN_Z_EQSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_Z_EQSCALE_PLACEHOLDER = 29, + /* set/get BN_Z_EQBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_Z_EQBIAS_PLACEHOLDER = 30, + + /* set ACTIVATION_BITMASK_DESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get ACTIVATION_BITMASK_DESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_ACTIVATION_BITMASK_DESC = 31, + /* set/get ACTIVATION_BITMASK_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_ACTIVATION_BITMASK_PLACEHOLDER = 32, + + /* set DXDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get DXDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_DXDESC = 33, + /* set/get DXDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_DXDATA_PLACEHOLDER = 34, + /* set DZDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get DZDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_DZDESC = 35, + /* set/get DZDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_DZDATA_PLACEHOLDER = 36, + /* set/get CUDNN_PARAM_BN_DSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_DSCALE_PLACEHOLDER = 37, + /* set/get CUDNN_PARAM_BN_DBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_DBIAS_PLACEHOLDER = 38, +} cudnnFusedOpsConstParamLabel_t; + +typedef enum { + CUDNN_PTR_NULL = 0, + CUDNN_PTR_ELEM_ALIGNED = 1, + CUDNN_PTR_16B_ALIGNED = 2, +} cudnnFusedOpsPointerPlaceHolder_t; + +typedef enum { + /* set: pass void* pointing to dev memory */ + /* get: pass void** pointing to host memory */ + CUDNN_PTR_XDATA = 0, + CUDNN_PTR_BN_EQSCALE = 1, + CUDNN_PTR_BN_EQBIAS = 2, + CUDNN_PTR_WDATA = 3, + CUDNN_PTR_DWDATA = 4, + CUDNN_PTR_YDATA = 5, + CUDNN_PTR_DYDATA = 6, + CUDNN_PTR_YSUM = 7, + CUDNN_PTR_YSQSUM = 8, + CUDNN_PTR_WORKSPACE = 9, + CUDNN_PTR_BN_SCALE = 10, + CUDNN_PTR_BN_BIAS = 11, + CUDNN_PTR_BN_SAVED_MEAN = 12, + CUDNN_PTR_BN_SAVED_INVSTD = 13, + CUDNN_PTR_BN_RUNNING_MEAN = 14, + CUDNN_PTR_BN_RUNNING_VAR = 15, + CUDNN_PTR_ZDATA = 16, + CUDNN_PTR_BN_Z_EQSCALE = 17, + CUDNN_PTR_BN_Z_EQBIAS = 18, + CUDNN_PTR_ACTIVATION_BITMASK = 19, + CUDNN_PTR_DXDATA = 20, + CUDNN_PTR_DZDATA = 21, + CUDNN_PTR_BN_DSCALE = 22, + CUDNN_PTR_BN_DBIAS = 23, + + /* set/get: pass size_t* pointing to host memory */ + CUDNN_SCALAR_SIZE_T_WORKSPACE_SIZE_IN_BYTES = 100, + /* set/get: pass int64_t* pointing to host memory */ + CUDNN_SCALAR_INT64_T_BN_ACCUMULATION_COUNT = 101, + /* set/get: pass double* pointing to host memory */ + CUDNN_SCALAR_DOUBLE_BN_EXP_AVG_FACTOR = 102, + /* set/get: pass double* pointing to host memory */ + CUDNN_SCALAR_DOUBLE_BN_EPSILON = 103, +} cudnnFusedOpsVariantParamLabel_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCnnInferVersionCheck(void); + +#if defined(__cplusplus) +} +#endif + +#endif /* CUDNN_CNN_INFER_H_ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_infer_v8.h b/venv/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_infer_v8.h new file mode 100644 index 0000000000000000000000000000000000000000..79ba34cc1a1557462d49b63a9cb52d9bfe149693 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_infer_v8.h @@ -0,0 +1,1183 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * cudnn_ops_infer : cuDNN's basic definitions and inference operations. + */ + +#if !defined(CUDNN_OPS_INFER_H_) +#define CUDNN_OPS_INFER_H_ + +#include +#include + +#include "cudnn_version.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_OPS_INFER_MAJOR 8 +#define CUDNN_OPS_INFER_MINOR 9 +#define CUDNN_OPS_INFER_PATCH 2 + +#if (CUDNN_OPS_INFER_MAJOR != CUDNN_MAJOR) || (CUDNN_OPS_INFER_MINOR != CUDNN_MINOR) || \ + (CUDNN_OPS_INFER_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN OPS INFER!!! +#endif + +#ifndef CUDNNWINAPI +#ifdef _WIN32 +#define CUDNNWINAPI __stdcall +#else +#define CUDNNWINAPI +#endif +#endif + +/* Warnings for deprecated API-s are enabled using the CUDNN_WARN_DEPRECATED macro */ +#if defined(CUDNN_WARN_DEPRECATED) && (defined(__GNUC__) || defined(__clang__)) +/* GCC, Intel C/C++, Cray C/C++, CLANG, IBM XL C/C++ little endian */ +#define CUDNN_DEPRECATED __attribute__((deprecated)) +#elif defined(CUDNN_WARN_DEPRECATED) && defined(_MSC_VER) +/* Microsoft Visual C++ */ +#define CUDNN_DEPRECATED __declspec(deprecated) +#elif defined(CUDNN_WARN_DEPRECATED) && (__cplusplus >= 201402L) +/* C++14 compilers */ +#define CUDNN_DEPRECATED [[deprecated]] +#else +/* No support for the deprecated attribute */ +#define CUDNN_DEPRECATED +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +struct cudnnContext; +typedef struct cudnnContext *cudnnHandle_t; + +size_t CUDNNWINAPI +cudnnGetVersion(void); + +size_t CUDNNWINAPI +cudnnGetMaxDeviceVersion(void); + +/* Returns CUDA Runtime version statically linked against cudnn */ +size_t CUDNNWINAPI +cudnnGetCudartVersion(void); + +/* + * CUDNN return codes + */ +typedef enum { + CUDNN_STATUS_SUCCESS = 0, + CUDNN_STATUS_NOT_INITIALIZED = 1, + CUDNN_STATUS_ALLOC_FAILED = 2, + CUDNN_STATUS_BAD_PARAM = 3, + CUDNN_STATUS_INTERNAL_ERROR = 4, + CUDNN_STATUS_INVALID_VALUE = 5, + CUDNN_STATUS_ARCH_MISMATCH = 6, + CUDNN_STATUS_MAPPING_ERROR = 7, + CUDNN_STATUS_EXECUTION_FAILED = 8, + CUDNN_STATUS_NOT_SUPPORTED = 9, + CUDNN_STATUS_LICENSE_ERROR = 10, + CUDNN_STATUS_RUNTIME_PREREQUISITE_MISSING = 11, + CUDNN_STATUS_RUNTIME_IN_PROGRESS = 12, + CUDNN_STATUS_RUNTIME_FP_OVERFLOW = 13, + CUDNN_STATUS_VERSION_MISMATCH = 14, +} cudnnStatus_t; + +/* human-readable error messages */ +const char *CUDNNWINAPI +cudnnGetErrorString(cudnnStatus_t status); + +/* Forward definition in this version only */ +typedef struct cudnnRuntimeTag_t cudnnRuntimeTag_t; + +typedef enum { + CUDNN_ERRQUERY_RAWCODE = 0, + CUDNN_ERRQUERY_NONBLOCKING = 1, + CUDNN_ERRQUERY_BLOCKING = 2, +} cudnnErrQueryMode_t; + +cudnnStatus_t CUDNNWINAPI +cudnnQueryRuntimeError(cudnnHandle_t handle, cudnnStatus_t *rstatus, cudnnErrQueryMode_t mode, cudnnRuntimeTag_t *tag); + +#ifndef __LIBRARY_TYPES_H__ + +typedef enum libraryPropertyType_t { MAJOR_VERSION, MINOR_VERSION, PATCH_LEVEL } libraryPropertyType; + +#endif + +cudnnStatus_t CUDNNWINAPI +cudnnGetProperty(libraryPropertyType type, int *value); + +cudnnStatus_t CUDNNWINAPI +cudnnCreate(cudnnHandle_t *handle); +cudnnStatus_t CUDNNWINAPI +cudnnDestroy(cudnnHandle_t handle); +cudnnStatus_t CUDNNWINAPI +cudnnSetStream(cudnnHandle_t handle, cudaStream_t streamId); +cudnnStatus_t CUDNNWINAPI +cudnnGetStream(cudnnHandle_t handle, cudaStream_t *streamId); + +/* Data structures to represent Image/Filter and the Neural Network Layer */ +typedef struct cudnnTensorStruct *cudnnTensorDescriptor_t; +typedef struct cudnnPoolingStruct *cudnnPoolingDescriptor_t; +typedef struct cudnnFilterStruct *cudnnFilterDescriptor_t; +typedef struct cudnnLRNStruct *cudnnLRNDescriptor_t; +typedef struct cudnnActivationStruct *cudnnActivationDescriptor_t; +typedef struct cudnnSpatialTransformerStruct *cudnnSpatialTransformerDescriptor_t; +typedef struct cudnnOpTensorStruct *cudnnOpTensorDescriptor_t; +typedef struct cudnnReduceTensorStruct *cudnnReduceTensorDescriptor_t; +typedef struct cudnnCTCLossStruct *cudnnCTCLossDescriptor_t; +typedef struct cudnnTensorTransformStruct *cudnnTensorTransformDescriptor_t; +/* + * CUDNN data type + */ +typedef enum { + CUDNN_DATA_FLOAT = 0, + CUDNN_DATA_DOUBLE = 1, + CUDNN_DATA_HALF = 2, + CUDNN_DATA_INT8 = 3, + CUDNN_DATA_INT32 = 4, + CUDNN_DATA_INT8x4 = 5, + CUDNN_DATA_UINT8 = 6, + CUDNN_DATA_UINT8x4 = 7, + CUDNN_DATA_INT8x32 = 8, + CUDNN_DATA_BFLOAT16 = 9, + CUDNN_DATA_INT64 = 10, + CUDNN_DATA_BOOLEAN = 11, + CUDNN_DATA_FP8_E4M3 = 12, + CUDNN_DATA_FP8_E5M2 = 13, + CUDNN_DATA_FAST_FLOAT_FOR_FP8 = 14, +} cudnnDataType_t; + +/* + * CUDNN math type + */ +typedef enum { + CUDNN_DEFAULT_MATH = 0, + CUDNN_TENSOR_OP_MATH = 1, + CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION = 2, + CUDNN_FMA_MATH = 3, +} cudnnMathType_t; + +/* + * CUDNN propagate Nan + */ +typedef enum { + CUDNN_NOT_PROPAGATE_NAN = 0, + CUDNN_PROPAGATE_NAN = 1, +} cudnnNanPropagation_t; + +/* + * CUDNN Determinism + */ +typedef enum { + CUDNN_NON_DETERMINISTIC = 0, + CUDNN_DETERMINISTIC = 1, +} cudnnDeterminism_t; + +/* Maximum supported number of tensor dimensions */ +#define CUDNN_DIM_MAX 8 + +/* Create an instance of a generic Tensor descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateTensorDescriptor(cudnnTensorDescriptor_t *tensorDesc); + +typedef enum { + CUDNN_TENSOR_NCHW = 0, /* row major (wStride = 1, hStride = w) */ + CUDNN_TENSOR_NHWC = 1, /* feature maps interleaved ( cStride = 1 )*/ + CUDNN_TENSOR_NCHW_VECT_C = 2, /* each image point is vector of element of C, vector length in data type */ +} cudnnTensorFormat_t; + +cudnnStatus_t CUDNNWINAPI +cudnnSetTensor4dDescriptor(cudnnTensorDescriptor_t tensorDesc, + cudnnTensorFormat_t format, + cudnnDataType_t dataType, /* image data type */ + int n, /* number of inputs (batch size) */ + int c, /* number of input feature maps */ + int h, /* height of input section */ + int w); /* width of input section */ + +cudnnStatus_t CUDNNWINAPI +cudnnSetTensor4dDescriptorEx(cudnnTensorDescriptor_t tensorDesc, + cudnnDataType_t dataType, /* image data type */ + int n, /* number of inputs (batch size) */ + int c, /* number of input feature maps */ + int h, /* height of input section */ + int w, /* width of input section */ + int nStride, + int cStride, + int hStride, + int wStride); + +cudnnStatus_t CUDNNWINAPI +cudnnGetTensor4dDescriptor(const cudnnTensorDescriptor_t tensorDesc, + cudnnDataType_t *dataType, /* image data type */ + int *n, /* number of inputs (batch size) */ + int *c, /* number of input feature maps */ + int *h, /* height of input section */ + int *w, /* width of input section */ + int *nStride, + int *cStride, + int *hStride, + int *wStride); + +cudnnStatus_t CUDNNWINAPI +cudnnSetTensorNdDescriptor(cudnnTensorDescriptor_t tensorDesc, + cudnnDataType_t dataType, + int nbDims, + const int dimA[], + const int strideA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnSetTensorNdDescriptorEx(cudnnTensorDescriptor_t tensorDesc, + cudnnTensorFormat_t format, + cudnnDataType_t dataType, + int nbDims, + const int dimA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetTensorNdDescriptor(const cudnnTensorDescriptor_t tensorDesc, + int nbDimsRequested, + cudnnDataType_t *dataType, + int *nbDims, + int dimA[], + int strideA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetTensorSizeInBytes(const cudnnTensorDescriptor_t tensorDesc, size_t *size); + +/* PixelOffset( n, c, h, w ) = n *input_stride + c * feature_stride + h * h_stride + w * w_stride + + 1)Example of all images in row major order one batch of features after the other (with an optional padding on row) + input_stride : c x h x h_stride + feature_stride : h x h_stride + h_stride : >= w ( h_stride = w if no padding) + w_stride : 1 + + + 2)Example of all images in row major with features maps interleaved + input_stride : c x h x h_stride + feature_stride : 1 + h_stride : w x c + w_stride : c + + 3)Example of all images in column major order one batch of features after the other (with optional padding on column) + input_stride : c x w x w_stride + feature_stride : w x w_stride + h_stride : 1 + w_stride : >= h + +*/ + +/* Destroy an instance of Tensor4d descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnDestroyTensorDescriptor(cudnnTensorDescriptor_t tensorDesc); + +/* Fold/unfold transforms */ +typedef enum { + CUDNN_TRANSFORM_FOLD = 0U, + CUDNN_TRANSFORM_UNFOLD = 1U, +} cudnnFoldingDirection_t; + +/** Create a destination descriptor for cudnnTransformTensor */ +cudnnStatus_t CUDNNWINAPI +cudnnInitTransformDest(const cudnnTensorTransformDescriptor_t transformDesc, + const cudnnTensorDescriptor_t srcDesc, + cudnnTensorDescriptor_t destDesc, + size_t *destSizeInBytes); + +/** Create an empty tensor transform descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateTensorTransformDescriptor(cudnnTensorTransformDescriptor_t *transformDesc); + +/** Initialize a previously created tensor transform descriptor. */ +cudnnStatus_t CUDNNWINAPI +cudnnSetTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc, + const uint32_t nbDims, + const cudnnTensorFormat_t destFormat, + const int32_t padBeforeA[], + const int32_t padAfterA[], + const uint32_t foldA[], + const cudnnFoldingDirection_t direction); + +/** + * Retrieves the values stored in a previously initialized tensor transform + * descriptor. + */ +cudnnStatus_t CUDNNWINAPI +cudnnGetTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc, + uint32_t nbDimsRequested, + cudnnTensorFormat_t *destFormat, + int32_t padBeforeA[], + int32_t padAfterA[], + uint32_t foldA[], + cudnnFoldingDirection_t *direction); + +/** + * Destroys a previously created tensor transform descriptor. + */ +cudnnStatus_t CUDNNWINAPI +cudnnDestroyTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc); + +/* Tensor layout conversion helper (y = alpha * x + beta * y) */ +cudnnStatus_t CUDNNWINAPI +cudnnTransformTensor(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +cudnnStatus_t CUDNNWINAPI +cudnnTransformTensorEx(cudnnHandle_t handle, + const cudnnTensorTransformDescriptor_t transDesc, + const void *alpha, + const cudnnTensorDescriptor_t srcDesc, + const void *srcData, + const void *beta, + const cudnnTensorDescriptor_t destDesc, + void *destData); + +/* Tensor Bias addition : C = alpha * A + beta * C */ +cudnnStatus_t CUDNNWINAPI +cudnnAddTensor(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t aDesc, + const void *A, + const void *beta, + const cudnnTensorDescriptor_t cDesc, + void *C); + +/* + * CUDNN OpTensor op type + */ +typedef enum { + CUDNN_OP_TENSOR_ADD = 0, + CUDNN_OP_TENSOR_MUL = 1, + CUDNN_OP_TENSOR_MIN = 2, + CUDNN_OP_TENSOR_MAX = 3, + CUDNN_OP_TENSOR_SQRT = 4, + CUDNN_OP_TENSOR_NOT = 5, +} cudnnOpTensorOp_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateOpTensorDescriptor(cudnnOpTensorDescriptor_t *opTensorDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetOpTensorDescriptor(cudnnOpTensorDescriptor_t opTensorDesc, + cudnnOpTensorOp_t opTensorOp, + cudnnDataType_t opTensorCompType, + cudnnNanPropagation_t opTensorNanOpt); + +cudnnStatus_t CUDNNWINAPI +cudnnGetOpTensorDescriptor(const cudnnOpTensorDescriptor_t opTensorDesc, + cudnnOpTensorOp_t *opTensorOp, + cudnnDataType_t *opTensorCompType, + cudnnNanPropagation_t *opTensorNanOpt); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyOpTensorDescriptor(cudnnOpTensorDescriptor_t opTensorDesc); + +/* Tensor operation : C = op( alpha1 * A, alpha2 * B ) + beta * C */ +/* B tensor is ignored for CUDNN_OP_TENSOR_SQRT, CUDNN_OP_TENSOR_NOT. */ +cudnnStatus_t CUDNNWINAPI +cudnnOpTensor(cudnnHandle_t handle, + const cudnnOpTensorDescriptor_t opTensorDesc, + const void *alpha1, + const cudnnTensorDescriptor_t aDesc, + const void *A, + const void *alpha2, + const cudnnTensorDescriptor_t bDesc, + const void *B, + const void *beta, + const cudnnTensorDescriptor_t cDesc, + void *C); + +/* + * CUDNN ReduceTensor op type + */ +typedef enum { + CUDNN_REDUCE_TENSOR_ADD = 0, + CUDNN_REDUCE_TENSOR_MUL = 1, + CUDNN_REDUCE_TENSOR_MIN = 2, + CUDNN_REDUCE_TENSOR_MAX = 3, + CUDNN_REDUCE_TENSOR_AMAX = 4, + CUDNN_REDUCE_TENSOR_AVG = 5, + CUDNN_REDUCE_TENSOR_NORM1 = 6, + CUDNN_REDUCE_TENSOR_NORM2 = 7, + CUDNN_REDUCE_TENSOR_MUL_NO_ZEROS = 8, +} cudnnReduceTensorOp_t; + +/* + * CUDNN ReduceTensor indices type + */ +typedef enum { + CUDNN_REDUCE_TENSOR_NO_INDICES = 0, + CUDNN_REDUCE_TENSOR_FLATTENED_INDICES = 1, +} cudnnReduceTensorIndices_t; + +/* + * CUDNN tensor indices type size (all unsigned) + * Currently not supported, default is 32 bit unsigned. + */ +typedef enum { + CUDNN_32BIT_INDICES = 0, + CUDNN_64BIT_INDICES = 1, + CUDNN_16BIT_INDICES = 2, + CUDNN_8BIT_INDICES = 3, +} cudnnIndicesType_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateReduceTensorDescriptor(cudnnReduceTensorDescriptor_t *reduceTensorDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetReduceTensorDescriptor(cudnnReduceTensorDescriptor_t reduceTensorDesc, + cudnnReduceTensorOp_t reduceTensorOp, + cudnnDataType_t reduceTensorCompType, + cudnnNanPropagation_t reduceTensorNanOpt, + cudnnReduceTensorIndices_t reduceTensorIndices, + cudnnIndicesType_t reduceTensorIndicesType); + +cudnnStatus_t CUDNNWINAPI +cudnnGetReduceTensorDescriptor(const cudnnReduceTensorDescriptor_t reduceTensorDesc, + cudnnReduceTensorOp_t *reduceTensorOp, + cudnnDataType_t *reduceTensorCompType, + cudnnNanPropagation_t *reduceTensorNanOpt, + cudnnReduceTensorIndices_t *reduceTensorIndices, + cudnnIndicesType_t *reduceTensorIndicesType); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyReduceTensorDescriptor(cudnnReduceTensorDescriptor_t reduceTensorDesc); + +/* Helper function to return the minimum size of the index space to be passed to the reduction given the input and + * output tensors */ +cudnnStatus_t CUDNNWINAPI +cudnnGetReductionIndicesSize(cudnnHandle_t handle, + const cudnnReduceTensorDescriptor_t reduceTensorDesc, + const cudnnTensorDescriptor_t aDesc, + const cudnnTensorDescriptor_t cDesc, + size_t *sizeInBytes); + +/* Helper function to return the minimum size of the workspace to be passed to the reduction given the input and output + * tensors */ +cudnnStatus_t CUDNNWINAPI +cudnnGetReductionWorkspaceSize(cudnnHandle_t handle, + const cudnnReduceTensorDescriptor_t reduceTensorDesc, + const cudnnTensorDescriptor_t aDesc, + const cudnnTensorDescriptor_t cDesc, + size_t *sizeInBytes); + +/* Tensor operation : C = reduce op( alpha * A ) + beta * C */ +/* The NaN propagation enum applies to only the min and max reduce ops; the other reduce ops propagate NaN as usual. */ +/* The indices space is ignored for reduce ops other than min or max. */ +cudnnStatus_t CUDNNWINAPI +cudnnReduceTensor(cudnnHandle_t handle, + const cudnnReduceTensorDescriptor_t reduceTensorDesc, + void *indices, + size_t indicesSizeInBytes, + void *workspace, + size_t workspaceSizeInBytes, + const void *alpha, + const cudnnTensorDescriptor_t aDesc, + const void *A, + const void *beta, + const cudnnTensorDescriptor_t cDesc, + void *C); + +/* Set all values of a tensor to a given value : y[i] = value[0] */ +cudnnStatus_t CUDNNWINAPI +cudnnSetTensor(cudnnHandle_t handle, const cudnnTensorDescriptor_t yDesc, void *y, const void *valuePtr); + +/* Scale all values of a tensor by a given factor : y[i] = alpha * y[i] */ +cudnnStatus_t CUDNNWINAPI +cudnnScaleTensor(cudnnHandle_t handle, const cudnnTensorDescriptor_t yDesc, void *y, const void *alpha); + +/* Create an instance of FilterStruct */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateFilterDescriptor(cudnnFilterDescriptor_t *filterDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetFilter4dDescriptor(cudnnFilterDescriptor_t filterDesc, + cudnnDataType_t dataType, /* image data type */ + cudnnTensorFormat_t format, + int k, /* number of output feature maps */ + int c, /* number of input feature maps */ + int h, /* height of each input filter */ + int w); /* width of each input filter */ + +cudnnStatus_t CUDNNWINAPI +cudnnGetFilter4dDescriptor(const cudnnFilterDescriptor_t filterDesc, + cudnnDataType_t *dataType, /* image data type */ + cudnnTensorFormat_t *format, + int *k, /* number of output feature maps */ + int *c, /* number of input feature maps */ + int *h, /* height of each input filter */ + int *w); /* width of each input filter */ + +cudnnStatus_t CUDNNWINAPI +cudnnSetFilterNdDescriptor(cudnnFilterDescriptor_t filterDesc, + cudnnDataType_t dataType, /* image data type */ + cudnnTensorFormat_t format, + int nbDims, + const int filterDimA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetFilterNdDescriptor(const cudnnFilterDescriptor_t filterDesc, + int nbDimsRequested, + cudnnDataType_t *dataType, /* image data type */ + cudnnTensorFormat_t *format, + int *nbDims, + int filterDimA[]); +cudnnStatus_t CUDNNWINAPI +cudnnGetFilterSizeInBytes(const cudnnFilterDescriptor_t filterDesc, size_t *size); + +cudnnStatus_t CUDNNWINAPI +cudnnTransformFilter(cudnnHandle_t handle, + const cudnnTensorTransformDescriptor_t transDesc, + const void *alpha, + const cudnnFilterDescriptor_t srcDesc, + const void *srcData, + const void *beta, + const cudnnFilterDescriptor_t destDesc, + void *destData); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyFilterDescriptor(cudnnFilterDescriptor_t filterDesc); + +/* + * softmax algorithm + */ +typedef enum { + CUDNN_SOFTMAX_FAST = 0, /* straightforward implementation */ + CUDNN_SOFTMAX_ACCURATE = 1, /* subtract max from every point to avoid overflow */ + CUDNN_SOFTMAX_LOG = 2 +} cudnnSoftmaxAlgorithm_t; + +typedef enum { + CUDNN_SOFTMAX_MODE_INSTANCE = 0, /* compute the softmax over all C, H, W for each N */ + CUDNN_SOFTMAX_MODE_CHANNEL = 1 /* compute the softmax over all C for each H, W, N */ +} cudnnSoftmaxMode_t; + +/* Softmax functions: All of the form "output = alpha * Op(inputs) + beta * output" */ + +/* Function to perform forward softmax */ +cudnnStatus_t CUDNNWINAPI +cudnnSoftmaxForward(cudnnHandle_t handle, + cudnnSoftmaxAlgorithm_t algo, + cudnnSoftmaxMode_t mode, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* + * pooling mode + */ +typedef enum { + CUDNN_POOLING_MAX = 0, + CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING = 1, /* count for average includes padded values */ + CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING = 2, /* count for average does not include padded values */ + CUDNN_POOLING_MAX_DETERMINISTIC = 3 +} cudnnPoolingMode_t; + +/* Create an instance of pooling descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnCreatePoolingDescriptor(cudnnPoolingDescriptor_t *poolingDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetPooling2dDescriptor(cudnnPoolingDescriptor_t poolingDesc, + cudnnPoolingMode_t mode, + cudnnNanPropagation_t maxpoolingNanOpt, + int windowHeight, + int windowWidth, + int verticalPadding, + int horizontalPadding, + int verticalStride, + int horizontalStride); + +cudnnStatus_t CUDNNWINAPI +cudnnGetPooling2dDescriptor(const cudnnPoolingDescriptor_t poolingDesc, + cudnnPoolingMode_t *mode, + cudnnNanPropagation_t *maxpoolingNanOpt, + int *windowHeight, + int *windowWidth, + int *verticalPadding, + int *horizontalPadding, + int *verticalStride, + int *horizontalStride); + +cudnnStatus_t CUDNNWINAPI +cudnnSetPoolingNdDescriptor(cudnnPoolingDescriptor_t poolingDesc, + const cudnnPoolingMode_t mode, + const cudnnNanPropagation_t maxpoolingNanOpt, + int nbDims, + const int windowDimA[], + const int paddingA[], + const int strideA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetPoolingNdDescriptor(const cudnnPoolingDescriptor_t poolingDesc, + int nbDimsRequested, + cudnnPoolingMode_t *mode, + cudnnNanPropagation_t *maxpoolingNanOpt, + int *nbDims, + int windowDimA[], + int paddingA[], + int strideA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetPoolingNdForwardOutputDim(const cudnnPoolingDescriptor_t poolingDesc, + const cudnnTensorDescriptor_t inputTensorDesc, + int nbDims, + int outputTensorDimA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetPooling2dForwardOutputDim(const cudnnPoolingDescriptor_t poolingDesc, + const cudnnTensorDescriptor_t inputTensorDesc, + int *n, + int *c, + int *h, + int *w); + +/* Destroy an instance of pooling descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnDestroyPoolingDescriptor(cudnnPoolingDescriptor_t poolingDesc); + +/* Pooling functions: All of the form "output = alpha * Op(inputs) + beta * output" */ + +/* Function to perform forward pooling */ +cudnnStatus_t CUDNNWINAPI +cudnnPoolingForward(cudnnHandle_t handle, + const cudnnPoolingDescriptor_t poolingDesc, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* + * activation mode + */ +typedef enum { + CUDNN_ACTIVATION_SIGMOID = 0, + CUDNN_ACTIVATION_RELU = 1, + CUDNN_ACTIVATION_TANH = 2, + CUDNN_ACTIVATION_CLIPPED_RELU = 3, + CUDNN_ACTIVATION_ELU = 4, + CUDNN_ACTIVATION_IDENTITY = 5, + CUDNN_ACTIVATION_SWISH = 6 +} cudnnActivationMode_t; + +/* Activation functions: All of the form "output = alpha * Op(inputs) + beta * output" */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateActivationDescriptor(cudnnActivationDescriptor_t *activationDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetActivationDescriptor(cudnnActivationDescriptor_t activationDesc, + cudnnActivationMode_t mode, + cudnnNanPropagation_t reluNanOpt, + double coef); /* ceiling for clipped RELU, alpha for ELU */ + +cudnnStatus_t CUDNNWINAPI +cudnnGetActivationDescriptor(const cudnnActivationDescriptor_t activationDesc, + cudnnActivationMode_t *mode, + cudnnNanPropagation_t *reluNanOpt, + double *coef); /* ceiling for clipped RELU, alpha for ELU */ + +cudnnStatus_t CUDNNWINAPI +cudnnSetActivationDescriptorSwishBeta(cudnnActivationDescriptor_t activationDesc, double swish_beta); + +cudnnStatus_t CUDNNWINAPI +cudnnGetActivationDescriptorSwishBeta(cudnnActivationDescriptor_t activationDesc, double *swish_beta); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyActivationDescriptor(cudnnActivationDescriptor_t activationDesc); + +/* Function to perform forward activation */ +cudnnStatus_t CUDNNWINAPI +cudnnActivationForward(cudnnHandle_t handle, + cudnnActivationDescriptor_t activationDesc, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* + * Create an instance of LRN (Local Response Normalization) descriptor + * Uses lrnN=5, lrnAlpha=1e-4, lrnBeta=0.75, lrnK=2.0 as defaults from Krizhevsky'12 ImageNet paper + */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateLRNDescriptor(cudnnLRNDescriptor_t *normDesc); + +#define CUDNN_LRN_MIN_N 1 /* minimum allowed lrnN */ +#define CUDNN_LRN_MAX_N 16 /* maximum allowed lrnN */ +#define CUDNN_LRN_MIN_K 1e-5 /* minimum allowed lrnK */ +#define CUDNN_LRN_MIN_BETA 0.01 /* minimum allowed lrnBeta */ + +/* LRN layer mode */ +typedef enum { + CUDNN_LRN_CROSS_CHANNEL_DIM1 = 0, /* Normalize across tensor's dimA[1] dimension */ +} cudnnLRNMode_t; + +/* + * Uses a window [center-lookBehind, center+lookAhead], where + * lookBehind = floor( (lrnN-1)/2 ), lookAhead = lrnN-lookBehind-1. + * Values of double parameters cast to tensor data type. + */ +cudnnStatus_t CUDNNWINAPI +cudnnSetLRNDescriptor(cudnnLRNDescriptor_t normDesc, unsigned lrnN, double lrnAlpha, double lrnBeta, double lrnK); +/* + * Retrieve the settings currently stored in an LRN layer descriptor + * Any of the provided pointers can be NULL (no corresponding value will be returned) + */ +cudnnStatus_t CUDNNWINAPI +cudnnGetLRNDescriptor(cudnnLRNDescriptor_t normDesc, unsigned *lrnN, double *lrnAlpha, double *lrnBeta, double *lrnK); + +/* Destroy an instance of LRN descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnDestroyLRNDescriptor(cudnnLRNDescriptor_t lrnDesc); + +/* LRN functions: output = alpha * normalize(x) + beta * old_y */ + +/* LRN cross-channel forward computation. Double parameters cast to tensor data type */ +cudnnStatus_t CUDNNWINAPI +cudnnLRNCrossChannelForward(cudnnHandle_t handle, + cudnnLRNDescriptor_t normDesc, + cudnnLRNMode_t lrnMode, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +typedef enum { + CUDNN_DIVNORM_PRECOMPUTED_MEANS = 0, +} cudnnDivNormMode_t; + +/* LCN/divisive normalization functions: y = alpha * normalize(x) + beta * y */ +cudnnStatus_t CUDNNWINAPI +cudnnDivisiveNormalizationForward(cudnnHandle_t handle, + cudnnLRNDescriptor_t normDesc, + cudnnDivNormMode_t mode, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, /* same desc for means, temp, temp2 */ + const void *x, + const void *means, /* if NULL, means are assumed to be zero */ + void *temp, + void *temp2, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +typedef enum { + /* bnScale, bnBias tensor dims are 1xCxHxWx.. (one value per CHW...-slice, normalized over N slice) */ + CUDNN_BATCHNORM_PER_ACTIVATION = 0, + + /* bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors) */ + CUDNN_BATCHNORM_SPATIAL = 1, + + /* + * bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors). + * May be faster than CUDNN_BATCHNORM_SPATIAL but imposes some limits on the range of values + */ + CUDNN_BATCHNORM_SPATIAL_PERSISTENT = 2, +} cudnnBatchNormMode_t; + +#define CUDNN_BN_MIN_EPSILON 0.0 /* Minimum epsilon allowed to be used in the Batch Normalization formula */ + +/* + * Derives a tensor descriptor from layer data descriptor for BatchNormalization + * scale, invVariance, bnBias, bnScale tensors. Use this tensor desc for + * bnScaleBiasMeanVarDesc and bnScaleBiasDiffDesc in Batch Normalization forward and backward functions. + */ +cudnnStatus_t CUDNNWINAPI +cudnnDeriveBNTensorDescriptor(cudnnTensorDescriptor_t derivedBnDesc, + const cudnnTensorDescriptor_t xDesc, + cudnnBatchNormMode_t mode); + +typedef enum { + CUDNN_BATCHNORM_OPS_BN = 0, /* do batch normalization only */ + CUDNN_BATCHNORM_OPS_BN_ACTIVATION = 1, /* do batchNorm, then activation */ + CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION = 2, /* do batchNorm, then elemWiseAdd, then activation */ +} cudnnBatchNormOps_t; + +/* + * Performs Batch Normalization during Inference: + * y[i] = bnScale[k]*(x[i]-estimatedMean[k])/sqrt(epsilon+estimatedVariance[k]) + bnBias[k] + * with bnScale, bnBias, runningMean, runningInvVariance tensors indexed + * according to spatial or per-activation mode. Refer to cudnnBatchNormalizationForwardTraining + * above for notes on function arguments. + */ +cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationForwardInference(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + const cudnnTensorDescriptor_t xDesc, + const void *x, /* NxCxHxW */ + const cudnnTensorDescriptor_t yDesc, + void *y, /* NxCxHxW */ + const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc, + const void *bnScale, + const void *bnBias, + const void *estimatedMean, + const void *estimatedVariance, + double epsilon); + +typedef enum { + /* bnScale, bnBias tensor dims are 1xCxHxWx.. (one value per CHW...-slice, normalized over N slice) */ + CUDNN_NORM_PER_ACTIVATION = 0, + + /* bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors) */ + CUDNN_NORM_PER_CHANNEL = 1, +} cudnnNormMode_t; + +typedef enum { CUDNN_NORM_ALGO_STANDARD = 0, CUDNN_NORM_ALGO_PERSIST = 1 } cudnnNormAlgo_t; + +/* + * Derives a tensor descriptor from layer data descriptor for Normalization + * scale, invVariance, bnBias, bnScale tensors. Use this tensor desc for + * normScaleBiasMeanVarDesc and normScaleBiasDiffDesc in Normalization forward and backward functions. + */ +cudnnStatus_t CUDNNWINAPI +cudnnDeriveNormTensorDescriptor(cudnnTensorDescriptor_t derivedNormScaleBiasDesc, + cudnnTensorDescriptor_t derivedNormMeanVarDesc, + const cudnnTensorDescriptor_t xDesc, + cudnnNormMode_t mode, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +typedef enum { + CUDNN_NORM_OPS_NORM = 0, /* do normalization only */ + CUDNN_NORM_OPS_NORM_ACTIVATION = 1, /* do Norm, then activation */ + CUDNN_NORM_OPS_NORM_ADD_ACTIVATION = 2, /* do Norm, then elemWiseAdd, then activation */ +} cudnnNormOps_t; + +/* + * Performs Normalization during Inference: + * y[i] = normScale[k]*(x[i]-estimatedMean[k])/sqrt(epsilon+estimatedVariance[k]) + normBias[k] + * with normScale, normBias, runningMean, runningInvVariance tensors indexed + * according to per-channel or per-activation mode. Refer to cudnnNormalizationForwardTraining + * above for notes on function arguments. + */ +cudnnStatus_t CUDNNWINAPI +cudnnNormalizationForwardInference(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + const cudnnTensorDescriptor_t xDesc, + const void *x, /* NxCxHxW */ + const cudnnTensorDescriptor_t normScaleBiasDesc, + const void *normScale, + const void *normBias, + const cudnnTensorDescriptor_t normMeanVarDesc, + const void *estimatedMean, + const void *estimatedVariance, + const cudnnTensorDescriptor_t zDesc, + const void *z, + cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t yDesc, + void *y, /* NxCxHxW */ + double epsilon, + int groupCnt); /* Place hold for future work*/ + +/* APIs for spatial transformer network*/ +typedef enum { + CUDNN_SAMPLER_BILINEAR = 0, +} cudnnSamplerType_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateSpatialTransformerDescriptor(cudnnSpatialTransformerDescriptor_t *stDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetSpatialTransformerNdDescriptor(cudnnSpatialTransformerDescriptor_t stDesc, + cudnnSamplerType_t samplerType, + cudnnDataType_t dataType, + const int nbDims, + const int dimA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroySpatialTransformerDescriptor(cudnnSpatialTransformerDescriptor_t stDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSpatialTfGridGeneratorForward(cudnnHandle_t handle, + const cudnnSpatialTransformerDescriptor_t stDesc, + const void *theta, + void *grid); + +cudnnStatus_t CUDNNWINAPI +cudnnSpatialTfSamplerForward(cudnnHandle_t handle, + cudnnSpatialTransformerDescriptor_t stDesc, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *grid, + const void *beta, + cudnnTensorDescriptor_t yDesc, + void *y); + +typedef struct cudnnDropoutStruct *cudnnDropoutDescriptor_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateDropoutDescriptor(cudnnDropoutDescriptor_t *dropoutDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc); + +/*helper function to determine size of the states to be passed to cudnnSetDropoutDescriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnDropoutGetStatesSize(cudnnHandle_t handle, size_t *sizeInBytes); + +/*helper function to determine size of the reserve space to be passed to dropout forward/backward calls */ +cudnnStatus_t CUDNNWINAPI +cudnnDropoutGetReserveSpaceSize(cudnnTensorDescriptor_t xdesc, size_t *sizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnSetDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc, + cudnnHandle_t handle, + float dropout, + void *states, + size_t stateSizeInBytes, + unsigned long long seed); + +/* Restores the dropout descriptor to a previously saved-off state */ +cudnnStatus_t CUDNNWINAPI +cudnnRestoreDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc, + cudnnHandle_t handle, + float dropout, + void *states, + size_t stateSizeInBytes, + unsigned long long seed); + +cudnnStatus_t CUDNNWINAPI +cudnnGetDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc, + cudnnHandle_t handle, + float *dropout, + void **states, + unsigned long long *seed); + +cudnnStatus_t CUDNNWINAPI +cudnnDropoutForward(cudnnHandle_t handle, + const cudnnDropoutDescriptor_t dropoutDesc, + const cudnnTensorDescriptor_t xdesc, + const void *x, + const cudnnTensorDescriptor_t ydesc, + void *y, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +/* TODO: remove */ + +typedef struct cudnnAlgorithmStruct *cudnnAlgorithmDescriptor_t; +typedef struct cudnnAlgorithmPerformanceStruct *cudnnAlgorithmPerformance_t; + +/* TODO: move these enums out to the appropriate submodule */ +typedef enum { + CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM = 0, + CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM = 1, + CUDNN_CONVOLUTION_FWD_ALGO_GEMM = 2, + CUDNN_CONVOLUTION_FWD_ALGO_DIRECT = 3, + CUDNN_CONVOLUTION_FWD_ALGO_FFT = 4, + CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING = 5, + CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD = 6, + CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED = 7, + CUDNN_CONVOLUTION_FWD_ALGO_COUNT = 8 +} cudnnConvolutionFwdAlgo_t; + +typedef enum { + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0 = 0, /* non-deterministic */ + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1 = 1, + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT = 2, + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3 = 3, /* non-deterministic */ + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD = 4, /* not implemented */ + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED = 5, + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING = 6, + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_COUNT = 7 +} cudnnConvolutionBwdFilterAlgo_t; + +typedef enum { + CUDNN_CONVOLUTION_BWD_DATA_ALGO_0 = 0, /* non-deterministic */ + CUDNN_CONVOLUTION_BWD_DATA_ALGO_1 = 1, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT = 2, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING = 3, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD = 4, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED = 5, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_COUNT = 6 +} cudnnConvolutionBwdDataAlgo_t; + +typedef enum { + CUDNN_RNN_ALGO_STANDARD = 0, + CUDNN_RNN_ALGO_PERSIST_STATIC = 1, + CUDNN_RNN_ALGO_PERSIST_DYNAMIC = 2, + CUDNN_RNN_ALGO_PERSIST_STATIC_SMALL_H = 3, + CUDNN_RNN_ALGO_COUNT = 4, +} cudnnRNNAlgo_t; + +typedef enum { CUDNN_CTC_LOSS_ALGO_DETERMINISTIC = 0, CUDNN_CTC_LOSS_ALGO_NON_DETERMINISTIC = 1 } cudnnCTCLossAlgo_t; + +/* TODO: remove */ +typedef struct cudnnAlgorithmUnionStruct { + union Algorithm { + cudnnConvolutionFwdAlgo_t convFwdAlgo; + cudnnConvolutionBwdFilterAlgo_t convBwdFilterAlgo; + cudnnConvolutionBwdDataAlgo_t convBwdDataAlgo; + cudnnRNNAlgo_t RNNAlgo; + cudnnCTCLossAlgo_t CTCLossAlgo; + } algo; +} cudnnAlgorithm_t; + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateAlgorithmDescriptor(cudnnAlgorithmDescriptor_t *algoDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetAlgorithmDescriptor(cudnnAlgorithmDescriptor_t algoDesc, cudnnAlgorithm_t algorithm); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetAlgorithmDescriptor(const cudnnAlgorithmDescriptor_t algoDesc, cudnnAlgorithm_t *algorithm); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCopyAlgorithmDescriptor(const cudnnAlgorithmDescriptor_t src, cudnnAlgorithmDescriptor_t dest); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyAlgorithmDescriptor(cudnnAlgorithmDescriptor_t algoDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateAlgorithmPerformance(cudnnAlgorithmPerformance_t *algoPerf, int numberToCreate); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetAlgorithmPerformance(cudnnAlgorithmPerformance_t algoPerf, + cudnnAlgorithmDescriptor_t algoDesc, + cudnnStatus_t status, + float time, + size_t memory); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetAlgorithmPerformance(const cudnnAlgorithmPerformance_t algoPerf, + cudnnAlgorithmDescriptor_t *algoDesc, + cudnnStatus_t *status, + float *time, + size_t *memory); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyAlgorithmPerformance(cudnnAlgorithmPerformance_t *algoPerf, int numberToDestroy); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetAlgorithmSpaceSize(cudnnHandle_t handle, cudnnAlgorithmDescriptor_t algoDesc, size_t *algoSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSaveAlgorithm(cudnnHandle_t handle, + cudnnAlgorithmDescriptor_t algoDesc, + void *algoSpace, + size_t algoSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRestoreAlgorithm(cudnnHandle_t handle, + void *algoSpace, + size_t algoSpaceSizeInBytes, + cudnnAlgorithmDescriptor_t algoDesc); + +typedef enum { + CUDNN_SEV_FATAL = 0, + CUDNN_SEV_ERROR = 1, + CUDNN_SEV_WARNING = 2, + CUDNN_SEV_INFO = 3, +} cudnnSeverity_t; + +/* Message masks to be used with cudnnSetCallback() */ +#define CUDNN_SEV_ERROR_EN (1U << CUDNN_SEV_ERROR) +#define CUDNN_SEV_WARNING_EN (1U << CUDNN_SEV_WARNING) +#define CUDNN_SEV_INFO_EN (1U << CUDNN_SEV_INFO) + +/* struct containing useful informaiton for each API call */ +typedef struct cudnnDebugStruct { + unsigned cudnn_version; + cudnnStatus_t cudnnStatus; + unsigned time_sec; /* epoch time in seconds */ + unsigned time_usec; /* microseconds part of epoch time */ + unsigned time_delta; /* time since start in seconds */ + cudnnHandle_t handle; /* cudnn handle */ + cudaStream_t stream; /* cuda stream ID */ + unsigned long long pid; /* process ID */ + unsigned long long tid; /* thread ID */ + int cudaDeviceId; /* CUDA device ID */ + int reserved[15]; /* reserved for future use */ +} cudnnDebug_t; + +typedef void (*cudnnCallback_t)(cudnnSeverity_t sev, void *udata, const cudnnDebug_t *dbg, const char *msg); + +cudnnStatus_t CUDNNWINAPI +cudnnSetCallback(unsigned mask, void *udata, cudnnCallback_t fptr); + +cudnnStatus_t CUDNNWINAPI +cudnnGetCallback(unsigned *mask, void **udata, cudnnCallback_t *fptr); + +/* + * \brief Cross-library version checker. + * This function is implemented differently in each sub-library. Each sublib + * checks whether its own version matches that of its dependencies. + * \returns CUDNN_STATUS_SUCCESS if the version check passes, + * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent. + */ +cudnnStatus_t CUDNNWINAPI +cudnnOpsInferVersionCheck(void); + +#if defined(__cplusplus) +} +#endif + +#endif /* CUDNN_OPS_INFER_H_ */ diff --git a/venv/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_train_v8.h b/venv/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_train_v8.h new file mode 100644 index 0000000000000000000000000000000000000000..425c7c684968d76e1154de76eac082e61ec62f36 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_train_v8.h @@ -0,0 +1,501 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * cudnn_ops_train : cuDNN's basic training operations and algorithms. + */ + +#if !defined(CUDNN_OPS_TRAIN_H_) +#define CUDNN_OPS_TRAIN_H_ + +#include +#include + +#include "cudnn_version.h" +#include "cudnn_ops_infer.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_OPS_TRAIN_MAJOR 8 +#define CUDNN_OPS_TRAIN_MINOR 9 +#define CUDNN_OPS_TRAIN_PATCH 2 + +#if (CUDNN_OPS_TRAIN_MAJOR != CUDNN_MAJOR) || (CUDNN_OPS_TRAIN_MINOR != CUDNN_MINOR) || \ + (CUDNN_OPS_TRAIN_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN OPS TRAIN!!! +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +/* Function to perform backward softmax */ +cudnnStatus_t CUDNNWINAPI +cudnnSoftmaxBackward(cudnnHandle_t handle, + cudnnSoftmaxAlgorithm_t algo, + cudnnSoftmaxMode_t mode, + const void *alpha, + const cudnnTensorDescriptor_t yDesc, + const void *y, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx); + +/* Function to perform backward pooling */ +cudnnStatus_t CUDNNWINAPI +cudnnPoolingBackward(cudnnHandle_t handle, + const cudnnPoolingDescriptor_t poolingDesc, + const void *alpha, + const cudnnTensorDescriptor_t yDesc, + const void *y, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx); + +/* Function to perform backward activation */ +cudnnStatus_t CUDNNWINAPI +cudnnActivationBackward(cudnnHandle_t handle, + cudnnActivationDescriptor_t activationDesc, + const void *alpha, + const cudnnTensorDescriptor_t yDesc, + const void *y, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx); + +/* LRN cross-channel backward computation. Double parameters cast to tensor data type */ +cudnnStatus_t CUDNNWINAPI +cudnnLRNCrossChannelBackward(cudnnHandle_t handle, + cudnnLRNDescriptor_t normDesc, + cudnnLRNMode_t lrnMode, + const void *alpha, + const cudnnTensorDescriptor_t yDesc, + const void *y, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx); + +cudnnStatus_t CUDNNWINAPI +cudnnDivisiveNormalizationBackward(cudnnHandle_t handle, + cudnnLRNDescriptor_t normDesc, + cudnnDivNormMode_t mode, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, /* same desc for x, means, dy, temp, temp2 */ + const void *x, + const void *means, /* if NULL, means are assumed to be zero */ + const void *dy, + void *temp, + void *temp2, + const void *beta, + const cudnnTensorDescriptor_t dXdMeansDesc, /* same desc for dx, dMeans */ + void *dx, /* output x differential */ + void *dMeans); /* output means differential, can be NULL */ + +cudnnStatus_t CUDNNWINAPI +cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + cudnnBatchNormOps_t bnOps, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t zDesc, + const cudnnTensorDescriptor_t yDesc, + const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc, + const cudnnActivationDescriptor_t activationDesc, + size_t *sizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnGetBatchNormalizationBackwardExWorkspaceSize(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + cudnnBatchNormOps_t bnOps, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t yDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnTensorDescriptor_t dzDesc, + const cudnnTensorDescriptor_t dxDesc, + const cudnnTensorDescriptor_t dBnScaleBiasDesc, + const cudnnActivationDescriptor_t activationDesc, + size_t *sizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnGetBatchNormalizationTrainingExReserveSpaceSize(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + cudnnBatchNormOps_t bnOps, + const cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t xDesc, + size_t *sizeInBytes); + +/* Computes y = BN(x). Also accumulates moving averages of mean and inverse variances */ +cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationForwardTraining( + cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + + const cudnnTensorDescriptor_t xDesc, + const void *x, /* NxCxHxW */ + const cudnnTensorDescriptor_t yDesc, + void *y, /* NxCxHxW */ + + /* Shared desc for the next 6 tensors in the argument list. + Data type to be set as follows: + type = (typeOf(x) == double) ? double : float + Dimensions for this descriptor depend on normalization mode + - Spatial Normalization : tensors are expected to have dims 1xCx1x1 + (normalization is performed across NxHxW) + - Per-Activation Normalization : tensors are expected to have dims of 1xCxHxW + (normalization is performed across N) */ + const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc, + + /* 'Gamma' and 'Beta' respectively in Ioffe and Szegedy's paper's notation */ + const void *bnScale, + const void *bnBias, + + /* MUST use factor=1 in the very first call of a complete training cycle. + Use a factor=1/(1+n) at N-th call to the function to get + Cumulative Moving Average (CMA) behavior + CMA[n] = (x[1]+...+x[n])/n + Since CMA[n+1] = (n*CMA[n]+x[n+1])/(n+1) = + ((n+1)*CMA[n]-CMA[n])/(n+1) + x[n+1]/(n+1) = + CMA[n]*(1-1/(n+1)) + x[n+1]*1/(n+1) */ + double exponentialAverageFactor, + + /* Used in Training phase only. + runningMean = newMean*factor + runningMean*(1-factor) */ + void *resultRunningMean, + /* Output in training mode, input in inference. Is the moving average + of variance[x] (factor is applied in the same way as for runningMean) */ + void *resultRunningVariance, + + /* Has to be >= CUDNN_BN_MIN_EPSILON. Should be the same in forward and backward functions. */ + double epsilon, + + /* Optionally save intermediate results from the forward pass here + - can be reused to speed up backward pass. NULL if unused */ + void *resultSaveMean, + void *resultSaveInvVariance); + +/* Computes y = relu(BN(x) + z). Also accumulates moving averages of mean and inverse variances */ +cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationForwardTrainingEx( + cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + cudnnBatchNormOps_t bnOps, + + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + + const cudnnTensorDescriptor_t xDesc, + const void *xData, + const cudnnTensorDescriptor_t zDesc, + const void *zData, + const cudnnTensorDescriptor_t yDesc, + void *yData, + + const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc, + const void *bnScale, + const void *bnBias, + + double exponentialAverageFactor, + void *resultRunningMean, + void *resultRunningVariance, + + /* Has to be >= CUDNN_BN_MIN_EPSILON. Should be the same in forward and backward functions. */ + double epsilon, + + /* Optionally save intermediate results from the forward pass here + - can be reused to speed up backward pass. NULL if unused */ + void *resultSaveMean, + void *resultSaveInvVariance, + + cudnnActivationDescriptor_t activationDesc, + void *workspace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +/* Performs backward pass of Batch Normalization layer. Returns x gradient, +* bnScale gradient and bnBias gradient */ +cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationBackward(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + const void *alphaDataDiff, + const void *betaDataDiff, + const void *alphaParamDiff, + const void *betaParamDiff, + const cudnnTensorDescriptor_t xDesc, /* same desc for x, dx, dy */ + const void *x, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnTensorDescriptor_t dxDesc, + void *dx, + /* Shared tensor desc for the 4 tensors below */ + const cudnnTensorDescriptor_t dBnScaleBiasDesc, + const void *bnScale, /* bnBias doesn't affect backpropagation */ + /* scale and bias diff are not backpropagated below this layer */ + void *dBnScaleResult, + void *dBnBiasResult, + /* Same epsilon as forward pass */ + double epsilon, + + /* Optionally cached intermediate results from + forward pass */ + const void *savedMean, + const void *savedInvVariance); + +cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationBackwardEx(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + cudnnBatchNormOps_t bnOps, + + const void *alphaDataDiff, + const void *betaDataDiff, + const void *alphaParamDiff, + const void *betaParamDiff, + const cudnnTensorDescriptor_t xDesc, + const void *xData, + const cudnnTensorDescriptor_t yDesc, + const void *yData, + const cudnnTensorDescriptor_t dyDesc, + const void *dyData, + const cudnnTensorDescriptor_t dzDesc, + void *dzData, + const cudnnTensorDescriptor_t dxDesc, + void *dxData, + + /* Shared tensor desc for the 4 tensors below */ + const cudnnTensorDescriptor_t dBnScaleBiasDesc, + const void *bnScaleData, + const void *bnBiasData, /* needed if there is activation */ + void *dBnScaleData, + void *dBnBiasData, + double epsilon, /* Same epsilon as forward pass */ + + /* Optionally cached intermediate results from + forward pass */ + const void *savedMean, + const void *savedInvVariance, + cudnnActivationDescriptor_t activationDesc, + void *workSpace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnGetNormalizationForwardTrainingWorkspaceSize(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t zDesc, + const cudnnTensorDescriptor_t yDesc, + const cudnnTensorDescriptor_t normScaleBiasDesc, + const cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t normMeanVarDesc, + size_t *sizeInBytes, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +cudnnStatus_t CUDNNWINAPI +cudnnGetNormalizationBackwardWorkspaceSize(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t yDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnTensorDescriptor_t dzDesc, + const cudnnTensorDescriptor_t dxDesc, + const cudnnTensorDescriptor_t dNormScaleBiasDesc, + const cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t normMeanVarDesc, + size_t *sizeInBytes, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +cudnnStatus_t CUDNNWINAPI +cudnnGetNormalizationTrainingReserveSpaceSize(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t xDesc, + size_t *sizeInBytes, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +/* Computes y = relu(Norm(x) + z). Also accumulates moving averages of mean and inverse variances */ +cudnnStatus_t CUDNNWINAPI +cudnnNormalizationForwardTraining(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + const cudnnTensorDescriptor_t xDesc, + const void *xData, + const cudnnTensorDescriptor_t normScaleBiasDesc, + const void *normScale, + const void *normBias, + double exponentialAverageFactor, + const cudnnTensorDescriptor_t normMeanVarDesc, + void *resultRunningMean, + void *resultRunningVariance, + /* Has to be >= 0. Should be the same in forward and backward functions. */ + double epsilon, + /* Optionally save intermediate results from the forward pass here + - can be reused to speed up backward pass. NULL if unused */ + void *resultSaveMean, + void *resultSaveInvVariance, + cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t zDesc, + const void *zData, + const cudnnTensorDescriptor_t yDesc, + void *yData, + void *workspace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +cudnnStatus_t CUDNNWINAPI +cudnnNormalizationBackward(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const void *alphaDataDiff, + const void *betaDataDiff, + const void *alphaParamDiff, + const void *betaParamDiff, + const cudnnTensorDescriptor_t xDesc, + const void *xData, + const cudnnTensorDescriptor_t yDesc, + const void *yData, + const cudnnTensorDescriptor_t dyDesc, + const void *dyData, + const cudnnTensorDescriptor_t dzDesc, + void *dzData, + const cudnnTensorDescriptor_t dxDesc, + void *dxData, + /* Shared tensor desc for the 4 tensors below */ + const cudnnTensorDescriptor_t dNormScaleBiasDesc, + const void *normScaleData, + const void *normBiasData, /* needed if there is activation */ + void *dNormScaleData, + void *dNormBiasData, + double epsilon, /* Same epsilon as forward pass */ + const cudnnTensorDescriptor_t normMeanVarDesc, + /* Optionally cached intermediate results from + forward pass */ + const void *savedMean, + const void *savedInvVariance, + cudnnActivationDescriptor_t activationDesc, + void *workSpace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +cudnnStatus_t CUDNNWINAPI +cudnnSpatialTfGridGeneratorBackward(cudnnHandle_t handle, + const cudnnSpatialTransformerDescriptor_t stDesc, + const void *dgrid, + void *dtheta); + +cudnnStatus_t CUDNNWINAPI +cudnnSpatialTfSamplerBackward(cudnnHandle_t handle, + cudnnSpatialTransformerDescriptor_t stDesc, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx, + const void *alphaDgrid, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const void *grid, + const void *betaDgrid, + void *dgrid); + +cudnnStatus_t CUDNNWINAPI +cudnnDropoutBackward(cudnnHandle_t handle, + const cudnnDropoutDescriptor_t dropoutDesc, + const cudnnTensorDescriptor_t dydesc, + const void *dy, + const cudnnTensorDescriptor_t dxdesc, + void *dx, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +/* + * \brief Cross-library version checker. + * This function is implemented differently in each sub-library. Each sublib + * checks whether its own version matches that of its dependencies. + * \returns CUDNN_STATUS_SUCCESS if the version check passes, + * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent. + */ +cudnnStatus_t CUDNNWINAPI +cudnnOpsTrainVersionCheck(void); + +#if defined(__cplusplus) +} +#endif + +#endif /* CUDNN_OPS_TRAIN_H_ */