diff --git a/.gitattributes b/.gitattributes index 21cc22adcd83a9a32877393b68309058e9fd0a3f..f3e02cc64eb6c0ea80c2f6bc7a562fc6e707f310 100644 --- a/.gitattributes +++ b/.gitattributes @@ -133,3 +133,10 @@ env-llmeval/lib/python3.10/site-packages/yaml/_yaml.cpython-310-x86_64-linux-gnu env-llmeval/lib/python3.10/site-packages/triton/_C/libtriton.so filter=lfs diff=lfs merge=lfs -text env-llmeval/lib/python3.10/site-packages/zstandard/backend_c.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text env-llmeval/lib/python3.10/site-packages/zstandard/_cffi.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +env-llmeval/lib/python3.10/site-packages/safetensors/_safetensors_rust.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_infer.so.8 filter=lfs diff=lfs merge=lfs -text +env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv_infer.so.8 filter=lfs diff=lfs merge=lfs -text +env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_train.so.8 filter=lfs diff=lfs merge=lfs -text +env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn_train.so.8 filter=lfs diff=lfs merge=lfs -text +env-llmeval/lib/python3.10/site-packages/nvidia/nvjitlink/lib/libnvJitLink.so.12 filter=lfs diff=lfs merge=lfs -text +env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv_train.so.8 filter=lfs diff=lfs merge=lfs -text diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/__init__.py b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/__init__.py b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/channel_descriptor.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/channel_descriptor.h new file mode 100644 index 0000000000000000000000000000000000000000..c6f039db8effce996015f901562009ebe976d832 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/channel_descriptor.h @@ -0,0 +1,588 @@ +/* + * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CHANNEL_DESCRIPTOR_H__) +#define __CHANNEL_DESCRIPTOR_H__ + +#if defined(__cplusplus) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +/** + * \addtogroup CUDART_HIGHLEVEL + * + * @{ + */ + +/** + * \brief \hl Returns a channel descriptor using the specified format + * + * Returns a channel descriptor with format \p f and number of bits of each + * component \p x, \p y, \p z, and \p w. The ::cudaChannelFormatDesc is + * defined as: + * \code + struct cudaChannelFormatDesc { + int x, y, z, w; + enum cudaChannelFormatKind f; + }; + * \endcode + * + * where ::cudaChannelFormatKind is one of ::cudaChannelFormatKindSigned, + * ::cudaChannelFormatKindUnsigned, cudaChannelFormatKindFloat, + * ::cudaChannelFormatKindSignedNormalized8X1, ::cudaChannelFormatKindSignedNormalized8X2, + * ::cudaChannelFormatKindSignedNormalized8X4, + * ::cudaChannelFormatKindUnsignedNormalized8X1, ::cudaChannelFormatKindUnsignedNormalized8X2, + * ::cudaChannelFormatKindUnsignedNormalized8X4, + * ::cudaChannelFormatKindSignedNormalized16X1, ::cudaChannelFormatKindSignedNormalized16X2, + * ::cudaChannelFormatKindSignedNormalized16X4, + * ::cudaChannelFormatKindUnsignedNormalized16X1, ::cudaChannelFormatKindUnsignedNormalized16X2, + * ::cudaChannelFormatKindUnsignedNormalized16X4 + * or ::cudaChannelFormatKindNV12. + * + * The format is specified by the template specialization. + * + * The template function specializes for the following scalar types: + * char, signed char, unsigned char, short, unsigned short, int, unsigned int, long, unsigned long, and float. + * The template function specializes for the following vector types: + * char{1|2|4}, uchar{1|2|4}, short{1|2|4}, ushort{1|2|4}, int{1|2|4}, uint{1|2|4}, long{1|2|4}, ulong{1|2|4}, float{1|2|4}. + * The template function specializes for following cudaChannelFormatKind enum values: + * ::cudaChannelFormatKind{Uns|S}ignedNormalized{8|16}X{1|2|4}, and ::cudaChannelFormatKindNV12. + * + * Invoking the function on a type without a specialization defaults to creating a channel format of kind ::cudaChannelFormatKindNone + * + * \return + * Channel descriptor with format \p f + * + * \sa \ref ::cudaCreateChannelDesc(int,int,int,int,cudaChannelFormatKind) "cudaCreateChannelDesc (Low level)", + * ::cudaGetChannelDesc, + */ +template __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(0, 0, 0, 0, cudaChannelFormatKindNone); +} + +static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf(void) +{ + int e = (int)sizeof(unsigned short) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat); +} + +static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf1(void) +{ + int e = (int)sizeof(unsigned short) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat); +} + +static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf2(void) +{ + int e = (int)sizeof(unsigned short) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindFloat); +} + +static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf4(void) +{ + int e = (int)sizeof(unsigned short) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindFloat); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(char) * 8; + +#if defined(_CHAR_UNSIGNED) || defined(__CHAR_UNSIGNED__) + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +#else /* _CHAR_UNSIGNED || __CHAR_UNSIGNED__ */ + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +#endif /* _CHAR_UNSIGNED || __CHAR_UNSIGNED__ */ +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(signed char) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned char) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(signed char) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned char) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(signed char) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned char) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(signed char) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned char) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(short) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned short) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(short) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned short) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(short) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned short) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(short) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned short) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(int) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned int) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(int) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned int) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(int) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned int) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(int) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned int) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned); +} + +#if !defined(__LP64__) + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(long) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned long) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(long) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned long) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(long) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned long) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(long) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned long) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned); +} + +#endif /* !__LP64__ */ + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(float) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(float) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(float) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindFloat); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(float) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindFloat); +} + +static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescNV12(void) +{ + int e = (int)sizeof(char) * 8; + + return cudaCreateChannelDesc(e, e, e, 0, cudaChannelFormatKindNV12); +} + +template __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(0, 0, 0, 0, cudaChannelFormatKindNone); +} + +/* Signed 8-bit normalized integer formats */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindSignedNormalized8X1); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindSignedNormalized8X2); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindSignedNormalized8X4); +} + +/* Unsigned 8-bit normalized integer formats */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsignedNormalized8X1); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindUnsignedNormalized8X2); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedNormalized8X4); +} + +/* Signed 16-bit normalized integer formats */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(16, 0, 0, 0, cudaChannelFormatKindSignedNormalized16X1); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(16, 16, 0, 0, cudaChannelFormatKindSignedNormalized16X2); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(16, 16, 16, 16, cudaChannelFormatKindSignedNormalized16X4); +} + +/* Unsigned 16-bit normalized integer formats */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(16, 0, 0, 0, cudaChannelFormatKindUnsignedNormalized16X1); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(16, 16, 0, 0, cudaChannelFormatKindUnsignedNormalized16X2); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(16, 16, 16, 16, cudaChannelFormatKindUnsignedNormalized16X4); +} + +/* NV12 format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 0, cudaChannelFormatKindNV12); +} + +/* BC1 format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed1); +} + +/* BC1sRGB format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed1SRGB); +} + +/* BC2 format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed2); +} + +/* BC2sRGB format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed2SRGB); +} + +/* BC3 format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed3); +} + +/* BC3sRGB format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed3SRGB); +} + +/* BC4 unsigned format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsignedBlockCompressed4); +} + +/* BC4 signed format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindSignedBlockCompressed4); +} + +/* BC5 unsigned format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindUnsignedBlockCompressed5); +} + +/* BC5 signed format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindSignedBlockCompressed5); +} + +/* BC6H unsigned format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(16, 16, 16, 0, cudaChannelFormatKindUnsignedBlockCompressed6H); +} + +/* BC6H signed format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(16, 16, 16, 0, cudaChannelFormatKindSignedBlockCompressed6H); +} + +/* BC7 format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed7); +} + +/* BC7sRGB format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed7SRGB); +} + +#endif /* __cplusplus */ + +/** @} */ +/** @} */ /* END CUDART_TEXTURE_HL */ + +#endif /* !__CHANNEL_DESCRIPTOR_H__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups.h new file mode 100644 index 0000000000000000000000000000000000000000..942f22dab54701141c9ce5e2a055c957824ca5b8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups.h @@ -0,0 +1,1690 @@ +/* + * Copyright 1993-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _COOPERATIVE_GROUPS_H_ +#define _COOPERATIVE_GROUPS_H_ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#include "cooperative_groups/details/info.h" +#include "cooperative_groups/details/driver_abi.h" +#include "cooperative_groups/details/helpers.h" +#include "cooperative_groups/details/memory.h" + +#if defined(_CG_HAS_STL_ATOMICS) +#include +#define _CG_THREAD_SCOPE(scope) _CG_STATIC_CONST_DECL cuda::thread_scope thread_scope = scope; +#else +#define _CG_THREAD_SCOPE(scope) +#endif + +_CG_BEGIN_NAMESPACE + +namespace details { + _CG_CONST_DECL unsigned int coalesced_group_id = 1; + _CG_CONST_DECL unsigned int multi_grid_group_id = 2; + _CG_CONST_DECL unsigned int grid_group_id = 3; + _CG_CONST_DECL unsigned int thread_block_id = 4; + _CG_CONST_DECL unsigned int multi_tile_group_id = 5; + _CG_CONST_DECL unsigned int cluster_group_id = 6; +} + +/** + * class thread_group; + * + * Generic thread group type, into which all groups are convertible. + * It acts as a container for all storage necessary for the derived groups, + * and will dispatch the API calls to the correct derived group. This means + * that all derived groups must implement the same interface as thread_group. + */ +class thread_group +{ +protected: + struct group_data { + unsigned int _unused : 1; + unsigned int type : 7, : 0; + }; + + struct gg_data { + details::grid_workspace *gridWs; + }; + +#if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL) + struct mg_data { + unsigned long long _unused : 1; + unsigned long long type : 7; + unsigned long long handle : 56; + const details::multi_grid::multi_grid_functions *functions; + }; +#endif + + struct tg_data { + unsigned int is_tiled : 1; + unsigned int type : 7; + unsigned int size : 24; + // packed to 4b + unsigned int metaGroupSize : 16; + unsigned int metaGroupRank : 16; + // packed to 8b + unsigned int mask; + // packed to 12b + unsigned int _res; + }; + + friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz); + friend class thread_block; + + union __align__(8) { + group_data group; + tg_data coalesced; + gg_data grid; +#if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL) + mg_data multi_grid; +#endif + } _data; + + _CG_QUALIFIER thread_group operator=(const thread_group& src); + + _CG_QUALIFIER thread_group(unsigned int type) { + _data.group.type = type; + _data.group._unused = false; + } + +#ifdef _CG_CPP11_FEATURES + static_assert(sizeof(tg_data) <= 16, "Failed size check"); + static_assert(sizeof(gg_data) <= 16, "Failed size check"); +# ifdef _CG_ABI_EXPERIMENTAL + static_assert(sizeof(mg_data) <= 16, "Failed size check"); +# endif +#endif + +public: + _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_device) + + _CG_QUALIFIER unsigned long long size() const; + _CG_QUALIFIER unsigned long long num_threads() const; + _CG_QUALIFIER unsigned long long thread_rank() const; + _CG_QUALIFIER void sync() const; + _CG_QUALIFIER unsigned int get_type() const { + return _data.group.type; + } + +}; + +template +struct thread_group_base : public thread_group { + _CG_QUALIFIER thread_group_base() : thread_group(TyId) {} + _CG_STATIC_CONST_DECL unsigned int id = TyId; +}; + +#if defined(_CG_HAS_MULTI_GRID_GROUP) + +/** + * class multi_grid_group; + * + * Threads within this this group are guaranteed to be co-resident on the + * same system, on multiple devices within the same launched kernels. + * To use this group, the kernel must have been launched with + * cuLaunchCooperativeKernelMultiDevice (or the CUDA Runtime equivalent), + * and the device must support it (queryable device attribute). + * + * Constructed via this_multi_grid(); + */ + + +# if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL) +class multi_grid_group; + +// Multi grid group requires these functions to be templated to prevent ptxas from trying to use CG syscalls +template +__device__ _CG_DEPRECATED multi_grid_group this_multi_grid(); + +class multi_grid_group : public thread_group_base +{ +private: + template + _CG_QUALIFIER multi_grid_group() { + _data.multi_grid.functions = details::multi_grid::load_grid_intrinsics(); + _data.multi_grid.handle = _data.multi_grid.functions->get_intrinsic_handle(); + } + + friend multi_grid_group this_multi_grid(); + +public: + _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_system) + + _CG_QUALIFIER bool is_valid() const { + return (_data.multi_grid.handle != 0); + } + + _CG_QUALIFIER void sync() const { + if (!is_valid()) { + _CG_ABORT(); + } + _data.multi_grid.functions->sync(_data.multi_grid.handle); + } + + _CG_QUALIFIER unsigned long long num_threads() const { + _CG_ASSERT(is_valid()); + return _data.multi_grid.functions->size(_data.multi_grid.handle); + } + + _CG_QUALIFIER unsigned long long size() const { + return num_threads(); + } + + _CG_QUALIFIER unsigned long long thread_rank() const { + _CG_ASSERT(is_valid()); + return _data.multi_grid.functions->thread_rank(_data.multi_grid.handle); + } + + _CG_QUALIFIER unsigned int grid_rank() const { + _CG_ASSERT(is_valid()); + return (_data.multi_grid.functions->grid_rank(_data.multi_grid.handle)); + } + + _CG_QUALIFIER unsigned int num_grids() const { + _CG_ASSERT(is_valid()); + return (_data.multi_grid.functions->num_grids(_data.multi_grid.handle)); + } +}; +# else +class multi_grid_group +{ +private: + unsigned long long _handle; + unsigned int _size; + unsigned int _rank; + + friend _CG_QUALIFIER multi_grid_group this_multi_grid(); + + _CG_QUALIFIER multi_grid_group() { + _handle = details::multi_grid::get_intrinsic_handle(); + _size = details::multi_grid::size(_handle); + _rank = details::multi_grid::thread_rank(_handle); + } + +public: + _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_system) + + _CG_QUALIFIER _CG_DEPRECATED bool is_valid() const { + return (_handle != 0); + } + + _CG_QUALIFIER _CG_DEPRECATED void sync() const { + if (!is_valid()) { + _CG_ABORT(); + } + details::multi_grid::sync(_handle); + } + + _CG_QUALIFIER _CG_DEPRECATED unsigned long long num_threads() const { + _CG_ASSERT(is_valid()); + return _size; + } + + _CG_QUALIFIER _CG_DEPRECATED unsigned long long size() const { + return num_threads(); + } + + _CG_QUALIFIER _CG_DEPRECATED unsigned long long thread_rank() const { + _CG_ASSERT(is_valid()); + return _rank; + } + + _CG_QUALIFIER _CG_DEPRECATED unsigned int grid_rank() const { + _CG_ASSERT(is_valid()); + return (details::multi_grid::grid_rank(_handle)); + } + + _CG_QUALIFIER _CG_DEPRECATED unsigned int num_grids() const { + _CG_ASSERT(is_valid()); + return (details::multi_grid::num_grids(_handle)); + } +}; +# endif + +/** + * multi_grid_group this_multi_grid() + * + * Constructs a multi_grid_group + */ +# if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL) +template +__device__ +#else +_CG_QUALIFIER +# endif +_CG_DEPRECATED +multi_grid_group this_multi_grid() +{ + return multi_grid_group(); +} +#endif + +/** + * class grid_group; + * + * Threads within this this group are guaranteed to be co-resident on the + * same device within the same launched kernel. To use this group, the kernel + * must have been launched with cuLaunchCooperativeKernel (or the CUDA Runtime equivalent), + * and the device must support it (queryable device attribute). + * + * Constructed via this_grid(); + */ +class grid_group : public thread_group_base +{ + _CG_STATIC_CONST_DECL unsigned int _group_id = details::grid_group_id; + friend _CG_QUALIFIER grid_group this_grid(); + +private: + _CG_QUALIFIER grid_group(details::grid_workspace *gridWs) { + _data.grid.gridWs = gridWs; + } + + public: + _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_device) + + _CG_QUALIFIER bool is_valid() const { + return (_data.grid.gridWs != NULL); + } + + _CG_QUALIFIER void sync() const { + if (!is_valid()) { + _CG_ABORT(); + } + details::grid::sync(&_data.grid.gridWs->barrier); + } + + _CG_STATIC_QUALIFIER unsigned long long size() { + return details::grid::size(); + } + + _CG_STATIC_QUALIFIER unsigned long long thread_rank() { + return details::grid::thread_rank(); + } + + _CG_STATIC_QUALIFIER dim3 group_dim() { + return details::grid::grid_dim(); + } + + _CG_STATIC_QUALIFIER unsigned long long num_threads() { + return details::grid::num_threads(); + } + + _CG_STATIC_QUALIFIER dim3 dim_blocks() { + return details::grid::dim_blocks(); + } + + _CG_STATIC_QUALIFIER unsigned long long num_blocks() { + return details::grid::num_blocks(); + } + + _CG_STATIC_QUALIFIER dim3 block_index() { + return details::grid::block_index(); + } + + _CG_STATIC_QUALIFIER unsigned long long block_rank() { + return details::grid::block_rank(); + } + +# if defined(_CG_HAS_CLUSTER_GROUP) + _CG_STATIC_QUALIFIER dim3 dim_clusters() { + return details::grid::dim_clusters(); + } + + _CG_STATIC_QUALIFIER unsigned long long num_clusters() { + return details::grid::num_clusters(); + } + + _CG_STATIC_QUALIFIER dim3 cluster_index() { + return details::grid::cluster_index(); + } + + _CG_STATIC_QUALIFIER unsigned long long cluster_rank() { + return details::grid::cluster_rank(); + } +# endif +}; + +_CG_QUALIFIER grid_group this_grid() { + // Load a workspace from the driver + grid_group gg(details::get_grid_workspace()); +#ifdef _CG_DEBUG + // *all* threads must be available to synchronize + gg.sync(); +#endif // _CG_DEBUG + return gg; +} + +#if defined(_CG_HAS_CLUSTER_GROUP) +/** + * class cluster_group + * + * Every GPU kernel is executed by a grid of thread blocks. A grid can be evenly + * divided along all dimensions to form groups of blocks, each group of which is + * a block cluster. Clustered grids are subject to various restrictions and + * limitations. Primarily, a cluster consists of at most 8 blocks by default + * (although the user is allowed to opt-in to non-standard sizes,) and clustered + * grids are subject to additional occupancy limitations due to per-cluster + * hardware resource consumption. In exchange, a block cluster is guaranteed to + * be a cooperative group, with access to all cooperative group capabilities, as + * well as cluster specific capabilities and accelerations. A cluster_group + * represents a block cluster. + * + * Constructed via this_cluster_group(); + */ +class cluster_group : public thread_group_base +{ + // Friends + friend _CG_QUALIFIER cluster_group this_cluster(); + + // Disable constructor + _CG_QUALIFIER cluster_group() + { + } + + public: + //_CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_cluster) + + using arrival_token = struct {}; + + // Functionality exposed by the group + _CG_STATIC_QUALIFIER void sync() + { + return details::cluster::sync(); + } + + _CG_STATIC_QUALIFIER arrival_token barrier_arrive() + { + details::cluster::barrier_arrive(); + return arrival_token(); + } + + _CG_STATIC_QUALIFIER void barrier_wait() + { + return details::cluster::barrier_wait(); + } + + _CG_STATIC_QUALIFIER void barrier_wait(arrival_token&&) + { + return details::cluster::barrier_wait(); + } + + _CG_STATIC_QUALIFIER unsigned int query_shared_rank(const void *addr) + { + return details::cluster::query_shared_rank(addr); + } + + template + _CG_STATIC_QUALIFIER T* map_shared_rank(T *addr, int rank) + { + return details::cluster::map_shared_rank(addr, rank); + } + + _CG_STATIC_QUALIFIER dim3 block_index() + { + return details::cluster::block_index(); + } + + _CG_STATIC_QUALIFIER unsigned int block_rank() + { + return details::cluster::block_rank(); + } + + _CG_STATIC_QUALIFIER unsigned int thread_rank() + { + return details::cluster::thread_rank(); + } + + _CG_STATIC_QUALIFIER dim3 dim_blocks() + { + return details::cluster::dim_blocks(); + } + + _CG_STATIC_QUALIFIER unsigned int num_blocks() + { + return details::cluster::num_blocks(); + } + + _CG_STATIC_QUALIFIER dim3 dim_threads() + { + return details::cluster::dim_threads(); + } + + _CG_STATIC_QUALIFIER unsigned int num_threads() + { + return details::cluster::num_threads(); + } + + // Legacy aliases + _CG_STATIC_QUALIFIER unsigned int size() + { + return num_threads(); + } +}; + +/* + * cluster_group this_cluster() + * + * Constructs a cluster_group + */ +_CG_QUALIFIER cluster_group this_cluster() +{ + cluster_group cg; +#ifdef _CG_DEBUG + cg.sync(); +#endif + return cg; +} +#endif + +#if defined(_CG_CPP11_FEATURES) +class thread_block; +template +_CG_QUALIFIER thread_block this_thread_block(block_tile_memory& scratch); +#endif + +/** + * class thread_block + * + * Every GPU kernel is executed by a grid of thread blocks, and threads within + * each block are guaranteed to reside on the same streaming multiprocessor. + * A thread_block represents a thread block whose dimensions are not known until runtime. + * + * Constructed via this_thread_block(); + */ +class thread_block : public thread_group_base +{ + // Friends + friend _CG_QUALIFIER thread_block this_thread_block(); + friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz); + friend _CG_QUALIFIER thread_group tiled_partition(const thread_block& parent, unsigned int tilesz); + +#if defined(_CG_CPP11_FEATURES) + template + friend _CG_QUALIFIER thread_block this_thread_block(block_tile_memory& scratch); + template + friend class __static_size_multi_warp_tile_base; + + details::multi_warp_scratch* const tile_memory; + + template + _CG_QUALIFIER thread_block(block_tile_memory& scratch) : + tile_memory(details::get_scratch_ptr(&scratch)) { +#ifdef _CG_DEBUG + if (num_threads() > MaxBlockSize) { + details::abort(); + } +#endif +#if !defined(_CG_HAS_RESERVED_SHARED) + tile_memory->init_barriers(thread_rank()); + sync(); +#endif + } +#endif + + // Disable constructor + _CG_QUALIFIER thread_block() +#if defined(_CG_CPP11_FEATURES) + : tile_memory(details::get_scratch_ptr(NULL)) +#endif + { } + + // Internal Use + _CG_QUALIFIER thread_group _get_tiled_threads(unsigned int tilesz) const { + const bool pow2_tilesz = ((tilesz & (tilesz - 1)) == 0); + + // Invalid, immediately fail + if (tilesz == 0 || (tilesz > 32) || !pow2_tilesz) { + details::abort(); + return (thread_block()); + } + + unsigned int mask; + unsigned int base_offset = thread_rank() & (~(tilesz - 1)); + unsigned int masklength = min((unsigned int)size() - base_offset, tilesz); + + mask = (unsigned int)(-1) >> (32 - masklength); + mask <<= (details::laneid() & ~(tilesz - 1)); + thread_group tile = thread_group(details::coalesced_group_id); + tile._data.coalesced.mask = mask; + tile._data.coalesced.size = __popc(mask); + tile._data.coalesced.metaGroupSize = (details::cta::size() + tilesz - 1) / tilesz; + tile._data.coalesced.metaGroupRank = details::cta::thread_rank() / tilesz; + tile._data.coalesced.is_tiled = true; + return (tile); + } + + public: + _CG_STATIC_CONST_DECL unsigned int _group_id = details::thread_block_id; + _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_block) + + _CG_STATIC_QUALIFIER void sync() { + details::cta::sync(); + } + + _CG_STATIC_QUALIFIER unsigned int size() { + return details::cta::size(); + } + + _CG_STATIC_QUALIFIER unsigned int thread_rank() { + return details::cta::thread_rank(); + } + + // Additional functionality exposed by the group + _CG_STATIC_QUALIFIER dim3 group_index() { + return details::cta::group_index(); + } + + _CG_STATIC_QUALIFIER dim3 thread_index() { + return details::cta::thread_index(); + } + + _CG_STATIC_QUALIFIER dim3 group_dim() { + return details::cta::block_dim(); + } + + _CG_STATIC_QUALIFIER dim3 dim_threads() { + return details::cta::dim_threads(); + } + + _CG_STATIC_QUALIFIER unsigned int num_threads() { + return details::cta::num_threads(); + } + +}; + +/** + * thread_block this_thread_block() + * + * Constructs a thread_block group + */ +_CG_QUALIFIER thread_block this_thread_block() +{ + return (thread_block()); +} + +#if defined(_CG_CPP11_FEATURES) +template +_CG_QUALIFIER thread_block this_thread_block(block_tile_memory& scratch) { + return (thread_block(scratch)); +} +#endif + +/** + * class coalesced_group + * + * A group representing the current set of converged threads in a warp. + * The size of the group is not guaranteed and it may return a group of + * only one thread (itself). + * + * This group exposes warp-synchronous builtins. + * Constructed via coalesced_threads(); + */ +class coalesced_group : public thread_group_base +{ +private: + friend _CG_QUALIFIER coalesced_group coalesced_threads(); + friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz); + friend _CG_QUALIFIER coalesced_group tiled_partition(const coalesced_group& parent, unsigned int tilesz); + friend class details::_coalesced_group_data_access; + + _CG_QUALIFIER unsigned int _packLanes(unsigned laneMask) const { + unsigned int member_pack = 0; + unsigned int member_rank = 0; + for (int bit_idx = 0; bit_idx < 32; bit_idx++) { + unsigned int lane_bit = _data.coalesced.mask & (1 << bit_idx); + if (lane_bit) { + if (laneMask & lane_bit) + member_pack |= 1 << member_rank; + member_rank++; + } + } + return (member_pack); + } + + // Internal Use + _CG_QUALIFIER coalesced_group _get_tiled_threads(unsigned int tilesz) const { + const bool pow2_tilesz = ((tilesz & (tilesz - 1)) == 0); + + // Invalid, immediately fail + if (tilesz == 0 || (tilesz > 32) || !pow2_tilesz) { + details::abort(); + return (coalesced_group(0)); + } + if (size() <= tilesz) { + return (*this); + } + + if ((_data.coalesced.is_tiled == true) && pow2_tilesz) { + unsigned int base_offset = (thread_rank() & (~(tilesz - 1))); + unsigned int masklength = min((unsigned int)size() - base_offset, tilesz); + unsigned int mask = (unsigned int)(-1) >> (32 - masklength); + + mask <<= (details::laneid() & ~(tilesz - 1)); + coalesced_group coalesced_tile = coalesced_group(mask); + coalesced_tile._data.coalesced.metaGroupSize = size() / tilesz; + coalesced_tile._data.coalesced.metaGroupRank = thread_rank() / tilesz; + coalesced_tile._data.coalesced.is_tiled = true; + return (coalesced_tile); + } + else if ((_data.coalesced.is_tiled == false) && pow2_tilesz) { + unsigned int mask = 0; + unsigned int member_rank = 0; + int seen_lanes = (thread_rank() / tilesz) * tilesz; + for (unsigned int bit_idx = 0; bit_idx < 32; bit_idx++) { + unsigned int lane_bit = _data.coalesced.mask & (1 << bit_idx); + if (lane_bit) { + if (seen_lanes <= 0 && member_rank < tilesz) { + mask |= lane_bit; + member_rank++; + } + seen_lanes--; + } + } + coalesced_group coalesced_tile = coalesced_group(mask); + // Override parent with the size of this group + coalesced_tile._data.coalesced.metaGroupSize = (size() + tilesz - 1) / tilesz; + coalesced_tile._data.coalesced.metaGroupRank = thread_rank() / tilesz; + return coalesced_tile; + } + else { + // None in _CG_VERSION 1000 + details::abort(); + } + + return (coalesced_group(0)); + } + + protected: + _CG_QUALIFIER coalesced_group(unsigned int mask) { + _data.coalesced.mask = mask; + _data.coalesced.size = __popc(mask); + _data.coalesced.metaGroupRank = 0; + _data.coalesced.metaGroupSize = 1; + _data.coalesced.is_tiled = false; + } + + _CG_QUALIFIER unsigned int get_mask() const { + return (_data.coalesced.mask); + } + + public: + _CG_STATIC_CONST_DECL unsigned int _group_id = details::coalesced_group_id; + _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_block) + + _CG_QUALIFIER unsigned int num_threads() const { + return _data.coalesced.size; + } + + _CG_QUALIFIER unsigned int size() const { + return num_threads(); + } + + _CG_QUALIFIER unsigned int thread_rank() const { + return (__popc(_data.coalesced.mask & details::lanemask32_lt())); + } + + // Rank of this group in the upper level of the hierarchy + _CG_QUALIFIER unsigned int meta_group_rank() const { + return _data.coalesced.metaGroupRank; + } + + // Total num partitions created out of all CTAs when the group was created + _CG_QUALIFIER unsigned int meta_group_size() const { + return _data.coalesced.metaGroupSize; + } + + _CG_QUALIFIER void sync() const { + __syncwarp(_data.coalesced.mask); + } + +#ifdef _CG_CPP11_FEATURES + template > + _CG_QUALIFIER TyRet shfl(TyElem&& elem, int srcRank) const { + unsigned int lane = (srcRank == 0) ? __ffs(_data.coalesced.mask) - 1 : + (size() == 32) ? srcRank : __fns(_data.coalesced.mask, 0, (srcRank + 1)); + + return details::tile::shuffle_dispatch::shfl( + _CG_STL_NAMESPACE::forward(elem), _data.coalesced.mask, lane, 32); + } + + template > + _CG_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int delta) const { + if (size() == 32) { + return details::tile::shuffle_dispatch::shfl_down( + _CG_STL_NAMESPACE::forward(elem), 0xFFFFFFFF, delta, 32); + } + + unsigned int lane = __fns(_data.coalesced.mask, details::laneid(), delta + 1); + + if (lane >= 32) + lane = details::laneid(); + + return details::tile::shuffle_dispatch::shfl( + _CG_STL_NAMESPACE::forward(elem), _data.coalesced.mask, lane, 32); + } + + template > + _CG_QUALIFIER TyRet shfl_up(TyElem&& elem, int delta) const { + if (size() == 32) { + return details::tile::shuffle_dispatch::shfl_up( + _CG_STL_NAMESPACE::forward(elem), 0xFFFFFFFF, delta, 32); + } + + unsigned lane = __fns(_data.coalesced.mask, details::laneid(), -(delta + 1)); + if (lane >= 32) + lane = details::laneid(); + + return details::tile::shuffle_dispatch::shfl( + _CG_STL_NAMESPACE::forward(elem), _data.coalesced.mask, lane, 32); + } +#else + template + _CG_QUALIFIER TyIntegral shfl(TyIntegral var, unsigned int src_rank) const { + details::assert_if_not_arithmetic(); + unsigned int lane = (src_rank == 0) ? __ffs(_data.coalesced.mask) - 1 : + (size() == 32) ? src_rank : __fns(_data.coalesced.mask, 0, (src_rank + 1)); + return (__shfl_sync(_data.coalesced.mask, var, lane, 32)); + } + + template + _CG_QUALIFIER TyIntegral shfl_up(TyIntegral var, int delta) const { + details::assert_if_not_arithmetic(); + if (size() == 32) { + return (__shfl_up_sync(0xFFFFFFFF, var, delta, 32)); + } + unsigned lane = __fns(_data.coalesced.mask, details::laneid(), -(delta + 1)); + if (lane >= 32) lane = details::laneid(); + return (__shfl_sync(_data.coalesced.mask, var, lane, 32)); + } + + template + _CG_QUALIFIER TyIntegral shfl_down(TyIntegral var, int delta) const { + details::assert_if_not_arithmetic(); + if (size() == 32) { + return (__shfl_down_sync(0xFFFFFFFF, var, delta, 32)); + } + unsigned int lane = __fns(_data.coalesced.mask, details::laneid(), delta + 1); + if (lane >= 32) lane = details::laneid(); + return (__shfl_sync(_data.coalesced.mask, var, lane, 32)); + } +#endif + + _CG_QUALIFIER int any(int predicate) const { + return (__ballot_sync(_data.coalesced.mask, predicate) != 0); + } + _CG_QUALIFIER int all(int predicate) const { + return (__ballot_sync(_data.coalesced.mask, predicate) == _data.coalesced.mask); + } + _CG_QUALIFIER unsigned int ballot(int predicate) const { + if (size() == 32) { + return (__ballot_sync(0xFFFFFFFF, predicate)); + } + unsigned int lane_ballot = __ballot_sync(_data.coalesced.mask, predicate); + return (_packLanes(lane_ballot)); + } + +#ifdef _CG_HAS_MATCH_COLLECTIVE + + template + _CG_QUALIFIER unsigned int match_any(TyIntegral val) const { + details::assert_if_not_arithmetic(); + if (size() == 32) { + return (__match_any_sync(0xFFFFFFFF, val)); + } + unsigned int lane_match = __match_any_sync(_data.coalesced.mask, val); + return (_packLanes(lane_match)); + } + + template + _CG_QUALIFIER unsigned int match_all(TyIntegral val, int &pred) const { + details::assert_if_not_arithmetic(); + if (size() == 32) { + return (__match_all_sync(0xFFFFFFFF, val, &pred)); + } + unsigned int lane_match = __match_all_sync(_data.coalesced.mask, val, &pred); + return (_packLanes(lane_match)); + } + +#endif /* !_CG_HAS_MATCH_COLLECTIVE */ + +}; + +_CG_QUALIFIER coalesced_group coalesced_threads() +{ + return (coalesced_group(__activemask())); +} + +namespace details { + template struct verify_thread_block_tile_size; + template <> struct verify_thread_block_tile_size<32> { typedef void OK; }; + template <> struct verify_thread_block_tile_size<16> { typedef void OK; }; + template <> struct verify_thread_block_tile_size<8> { typedef void OK; }; + template <> struct verify_thread_block_tile_size<4> { typedef void OK; }; + template <> struct verify_thread_block_tile_size<2> { typedef void OK; }; + template <> struct verify_thread_block_tile_size<1> { typedef void OK; }; + +#ifdef _CG_CPP11_FEATURES + template + using _is_power_of_2 = _CG_STL_NAMESPACE::integral_constant; + + template + using _is_single_warp = _CG_STL_NAMESPACE::integral_constant; + template + using _is_multi_warp = + _CG_STL_NAMESPACE::integral_constant 32) && (Size <= 1024)>; + + template + using _is_valid_single_warp_tile = + _CG_STL_NAMESPACE::integral_constant::value && _is_single_warp::value>; + template + using _is_valid_multi_warp_tile = + _CG_STL_NAMESPACE::integral_constant::value && _is_multi_warp::value>; +#else + template + struct _is_multi_warp { + static const bool value = false; + }; +#endif +} + +template +class __static_size_tile_base +{ +protected: + _CG_STATIC_CONST_DECL unsigned int numThreads = Size; + +public: + _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_block) + + // Rank of thread within tile + _CG_STATIC_QUALIFIER unsigned int thread_rank() { + return (details::cta::thread_rank() & (numThreads - 1)); + } + + // Number of threads within tile + _CG_STATIC_CONSTEXPR_QUALIFIER unsigned int num_threads() { + return numThreads; + } + + _CG_STATIC_CONSTEXPR_QUALIFIER unsigned int size() { + return num_threads(); + } +}; + +template +class __static_size_thread_block_tile_base : public __static_size_tile_base +{ + friend class details::_coalesced_group_data_access; + typedef details::tile::tile_helpers th; + +#ifdef _CG_CPP11_FEATURES + static_assert(details::_is_valid_single_warp_tile::value, "Size must be one of 1/2/4/8/16/32"); +#else + typedef typename details::verify_thread_block_tile_size::OK valid; +#endif + using __static_size_tile_base::numThreads; + _CG_STATIC_CONST_DECL unsigned int fullMask = 0xFFFFFFFF; + + protected: + _CG_STATIC_QUALIFIER unsigned int build_mask() { + unsigned int mask = fullMask; + if (numThreads != 32) { + // [0,31] representing the current active thread in the warp + unsigned int laneId = details::laneid(); + // shift mask according to the partition it belongs to + mask = th::tileMask << (laneId & ~(th::laneMask)); + } + return (mask); + } + +public: + _CG_STATIC_CONST_DECL unsigned int _group_id = details::coalesced_group_id; + + _CG_STATIC_QUALIFIER void sync() { + __syncwarp(build_mask()); + } + +#ifdef _CG_CPP11_FEATURES + // PTX supported collectives + template > + _CG_QUALIFIER TyRet shfl(TyElem&& elem, int srcRank) const { + return details::tile::shuffle_dispatch::shfl( + _CG_STL_NAMESPACE::forward(elem), build_mask(), srcRank, numThreads); + } + + template > + _CG_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int delta) const { + return details::tile::shuffle_dispatch::shfl_down( + _CG_STL_NAMESPACE::forward(elem), build_mask(), delta, numThreads); + } + + template > + _CG_QUALIFIER TyRet shfl_up(TyElem&& elem, unsigned int delta) const { + return details::tile::shuffle_dispatch::shfl_up( + _CG_STL_NAMESPACE::forward(elem), build_mask(), delta, numThreads); + } + + template > + _CG_QUALIFIER TyRet shfl_xor(TyElem&& elem, unsigned int laneMask) const { + return details::tile::shuffle_dispatch::shfl_xor( + _CG_STL_NAMESPACE::forward(elem), build_mask(), laneMask, numThreads); + } +#else + template + _CG_QUALIFIER TyIntegral shfl(TyIntegral var, int srcRank) const { + details::assert_if_not_arithmetic(); + return (__shfl_sync(build_mask(), var, srcRank, numThreads)); + } + + template + _CG_QUALIFIER TyIntegral shfl_down(TyIntegral var, unsigned int delta) const { + details::assert_if_not_arithmetic(); + return (__shfl_down_sync(build_mask(), var, delta, numThreads)); + } + + template + _CG_QUALIFIER TyIntegral shfl_up(TyIntegral var, unsigned int delta) const { + details::assert_if_not_arithmetic(); + return (__shfl_up_sync(build_mask(), var, delta, numThreads)); + } + + template + _CG_QUALIFIER TyIntegral shfl_xor(TyIntegral var, unsigned int laneMask) const { + details::assert_if_not_arithmetic(); + return (__shfl_xor_sync(build_mask(), var, laneMask, numThreads)); + } +#endif //_CG_CPP11_FEATURES + + _CG_QUALIFIER int any(int predicate) const { + unsigned int lane_ballot = __ballot_sync(build_mask(), predicate); + return (lane_ballot != 0); + } + _CG_QUALIFIER int all(int predicate) const { + unsigned int lane_ballot = __ballot_sync(build_mask(), predicate); + return (lane_ballot == build_mask()); + } + _CG_QUALIFIER unsigned int ballot(int predicate) const { + unsigned int lane_ballot = __ballot_sync(build_mask(), predicate); + return (lane_ballot >> (details::laneid() & (~(th::laneMask)))); + } + +#ifdef _CG_HAS_MATCH_COLLECTIVE + template + _CG_QUALIFIER unsigned int match_any(TyIntegral val) const { + details::assert_if_not_arithmetic(); + unsigned int lane_match = __match_any_sync(build_mask(), val); + return (lane_match >> (details::laneid() & (~(th::laneMask)))); + } + + template + _CG_QUALIFIER unsigned int match_all(TyIntegral val, int &pred) const { + details::assert_if_not_arithmetic(); + unsigned int lane_match = __match_all_sync(build_mask(), val, &pred); + return (lane_match >> (details::laneid() & (~(th::laneMask)))); + } +#endif + +}; + +template +class __static_parent_thread_block_tile_base +{ +public: + // Rank of this group in the upper level of the hierarchy + _CG_STATIC_QUALIFIER unsigned int meta_group_rank() { + return ParentT::thread_rank() / Size; + } + + // Total num partitions created out of all CTAs when the group was created + _CG_STATIC_QUALIFIER unsigned int meta_group_size() { + return (ParentT::size() + Size - 1) / Size; + } +}; + +/** + * class thread_block_tile + * + * Statically-sized group type, representing one tile of a thread block. + * The only specializations currently supported are those with native + * hardware support (1/2/4/8/16/32) + * + * This group exposes warp-synchronous builtins. + * Can only be constructed via tiled_partition(ParentT&) + */ + +template +class __single_warp_thread_block_tile : + public __static_size_thread_block_tile_base, + public __static_parent_thread_block_tile_base +{ + typedef __static_parent_thread_block_tile_base staticParentBaseT; + friend class details::_coalesced_group_data_access; + +protected: + _CG_QUALIFIER __single_warp_thread_block_tile() { }; + _CG_QUALIFIER __single_warp_thread_block_tile(unsigned int, unsigned int) { }; + + _CG_STATIC_QUALIFIER unsigned int get_mask() { + return __static_size_thread_block_tile_base::build_mask(); + } +}; + +template +class __single_warp_thread_block_tile : + public __static_size_thread_block_tile_base, + public thread_group_base +{ + _CG_STATIC_CONST_DECL unsigned int numThreads = Size; + + template friend class __single_warp_thread_block_tile; + friend class details::_coalesced_group_data_access; + + typedef __static_size_thread_block_tile_base staticSizeBaseT; + +protected: + _CG_QUALIFIER __single_warp_thread_block_tile(unsigned int meta_group_rank, unsigned int meta_group_size) { + _data.coalesced.mask = staticSizeBaseT::build_mask(); + _data.coalesced.size = numThreads; + _data.coalesced.metaGroupRank = meta_group_rank; + _data.coalesced.metaGroupSize = meta_group_size; + _data.coalesced.is_tiled = true; + } + + _CG_QUALIFIER unsigned int get_mask() const { + return (_data.coalesced.mask); + } + +public: + using staticSizeBaseT::sync; + using staticSizeBaseT::size; + using staticSizeBaseT::num_threads; + using staticSizeBaseT::thread_rank; + + _CG_QUALIFIER unsigned int meta_group_rank() const { + return _data.coalesced.metaGroupRank; + } + + _CG_QUALIFIER unsigned int meta_group_size() const { + return _data.coalesced.metaGroupSize; + } +}; + +/** + * Outer level API calls + * void sync(GroupT) - see .sync() + * void thread_rank(GroupT) - see .thread_rank() + * void group_size(GroupT) - see .size() + */ +template +_CG_QUALIFIER void sync(GroupT const &g) +{ + g.sync(); +} + +// TODO: Use a static dispatch to determine appropriate return type +// C++03 is stuck with unsigned long long for now +#ifdef _CG_CPP11_FEATURES +template +_CG_QUALIFIER auto thread_rank(GroupT const& g) -> decltype(g.thread_rank()) { + return g.thread_rank(); +} + + +template +_CG_QUALIFIER auto group_size(GroupT const &g) -> decltype(g.num_threads()) { + return g.num_threads(); +} +#else +template +_CG_QUALIFIER unsigned long long thread_rank(GroupT const& g) { + return static_cast(g.thread_rank()); +} + + +template +_CG_QUALIFIER unsigned long long group_size(GroupT const &g) { + return static_cast(g.num_threads()); +} +#endif + + +/** + * tiled_partition + * + * The tiled_partition(parent, tilesz) method is a collective operation that + * partitions the parent group into a one-dimensional, row-major, tiling of subgroups. + * + * A total of ((size(parent)+tilesz-1)/tilesz) subgroups will + * be created where threads having identical k = (thread_rank(parent)/tilesz) + * will be members of the same subgroup. + * + * The implementation may cause the calling thread to wait until all the members + * of the parent group have invoked the operation before resuming execution. + * + * Functionality is limited to power-of-two sized subgorup instances of at most + * 32 threads. Only thread_block, thread_block_tile<>, and their subgroups can be + * tiled_partition() in _CG_VERSION 1000. + */ +_CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz) +{ + if (parent.get_type() == details::coalesced_group_id) { + const coalesced_group *_cg = static_cast(&parent); + return _cg->_get_tiled_threads(tilesz); + } + else { + const thread_block *_tb = static_cast(&parent); + return _tb->_get_tiled_threads(tilesz); + } +} + +// Thread block type overload: returns a basic thread_group for now (may be specialized later) +_CG_QUALIFIER thread_group tiled_partition(const thread_block& parent, unsigned int tilesz) +{ + return (parent._get_tiled_threads(tilesz)); +} + +// Coalesced group type overload: retains its ability to stay coalesced +_CG_QUALIFIER coalesced_group tiled_partition(const coalesced_group& parent, unsigned int tilesz) +{ + return (parent._get_tiled_threads(tilesz)); +} + +namespace details { + template + class internal_thread_block_tile : public __single_warp_thread_block_tile {}; + + template + _CG_QUALIFIER internal_thread_block_tile tiled_partition_internal() { + return internal_thread_block_tile(); + } + + template + _CG_QUALIFIER TyVal multi_warp_collectives_helper( + const GroupT& group, + WarpLambda warp_lambda, + InterWarpLambda inter_warp_lambda) { + return group.template collectives_scheme(warp_lambda, inter_warp_lambda); + } + + template + _CG_QUALIFIER T* multi_warp_scratch_location_getter(const GroupT& group, unsigned int warp_id) { + return group.template get_scratch_location(warp_id); + } + + template + _CG_QUALIFIER details::barrier_t* multi_warp_sync_location_getter(const GroupT& group) { + return group.get_sync_location(); + } + +} +/** + * tiled_partition + * + * The tiled_partition(parent) method is a collective operation that + * partitions the parent group into a one-dimensional, row-major, tiling of subgroups. + * + * A total of ((size(parent)/tilesz) subgroups will be created, + * therefore the parent group size must be evenly divisible by the tilesz. + * The allow parent groups are thread_block or thread_block_tile. + * + * The implementation may cause the calling thread to wait until all the members + * of the parent group have invoked the operation before resuming execution. + * + * Functionality is limited to native hardware sizes, 1/2/4/8/16/32. + * The size(parent) must be greater than the template Size parameter + * otherwise the results are undefined. + */ + +#if defined(_CG_CPP11_FEATURES) +template +class __static_size_multi_warp_tile_base : public __static_size_tile_base +{ + static_assert(details::_is_valid_multi_warp_tile::value, "Size must be one of 64/128/256/512"); + + template + friend __device__ TyVal details::multi_warp_collectives_helper( + const GroupT& group, + WarpLambda warp_lambda, + InterWarpLambda inter_warp_lambda); + template + friend __device__ T* details::multi_warp_scratch_location_getter(const GroupT& group, unsigned int warp_id); + template + friend __device__ details::barrier_t* details::multi_warp_sync_location_getter(const GroupT& group); + template + friend class __static_size_multi_warp_tile_base; + using WarpType = details::internal_thread_block_tile<32, __static_size_multi_warp_tile_base>; + using ThisType = __static_size_multi_warp_tile_base; + _CG_STATIC_CONST_DECL int numWarps = Size / 32; + +protected: + details::multi_warp_scratch* const tile_memory; + + template + _CG_QUALIFIER __static_size_multi_warp_tile_base(const GroupT& g) : tile_memory(g.tile_memory) { +#if defined(_CG_HAS_RESERVED_SHARED) + details::sync_warps_reset(get_sync_location(), details::cta::thread_rank()); + g.sync(); +#endif + } + + +private: + _CG_QUALIFIER details::barrier_t* get_sync_location() const { + // Different group sizes use different barriers, all groups of a given size share one barrier. + unsigned int sync_id = details::log2(Size / 64); + return &tile_memory->barriers[sync_id]; + } + + template + _CG_QUALIFIER T* get_scratch_location(unsigned int warp_id) const { + unsigned int scratch_id = (details::cta::thread_rank() - thread_rank()) / 32 + warp_id; + return reinterpret_cast(&tile_memory->communication_memory[scratch_id]); + } + + template + _CG_QUALIFIER T* get_scratch_location() const { + unsigned int scratch_id = details::cta::thread_rank() / 32; + return reinterpret_cast(&tile_memory->communication_memory[scratch_id]); + } + + template + _CG_QUALIFIER TyVal shfl_impl(TyVal val, unsigned int src) const { + unsigned int src_warp = src / 32; + auto warp = details::tiled_partition_internal<32, ThisType>(); + details::barrier_t* sync_location = get_sync_location(); + + // Get warp slot of the source threads warp. + TyVal* warp_scratch_location = get_scratch_location(src_warp); + + if (warp.meta_group_rank() == src_warp) { + warp.sync(); + // Put shuffled value into my warp slot and let my warp arrive at the barrier. + if (thread_rank() == src) { + *warp_scratch_location = val; + } + details::sync_warps_arrive(sync_location, details::cta::thread_rank(), numWarps); + TyVal result = *warp_scratch_location; + details::sync_warps_wait(sync_location, details::cta::thread_rank()); + return result; + } + else { + // Wait for the source warp to arrive on the barrier. + details::sync_warps_wait_for_specific_warp(sync_location, + (details::cta::thread_rank() / 32 - warp.meta_group_rank() + src_warp)); + TyVal result = *warp_scratch_location; + details::sync_warps(sync_location, details::cta::thread_rank(), numWarps); + return result; + } + } + + template + _CG_QUALIFIER TyVal collectives_scheme(const WarpLambda& warp_lambda, const InterWarpLambda& inter_warp_lambda) const { + static_assert(sizeof(TyVal) <= details::multi_warp_scratch::communication_size, + "Collectives with tiles larger than 32 threads are limited to types smaller then 8 bytes"); + auto warp = details::tiled_partition_internal<32, ThisType>(); + details::barrier_t* sync_location = get_sync_location(); + TyVal* warp_scratch_location = get_scratch_location(); + + warp_lambda(warp, warp_scratch_location); + + if (details::sync_warps_last_releases(sync_location, details::cta::thread_rank(), numWarps)) { + auto subwarp = details::tiled_partition_internal(); + if (subwarp.meta_group_rank() == 0) { + TyVal* thread_scratch_location = get_scratch_location(subwarp.thread_rank()); + inter_warp_lambda(subwarp, thread_scratch_location); + } + warp.sync(); + details::sync_warps_release(sync_location, warp.thread_rank() == 0, details::cta::thread_rank(), numWarps); + } + TyVal result = *warp_scratch_location; + return result; + } + +public: + _CG_STATIC_CONST_DECL unsigned int _group_id = details::multi_tile_group_id; + + using __static_size_tile_base::thread_rank; + + template + _CG_QUALIFIER TyVal shfl(TyVal val, unsigned int src) const { + static_assert(sizeof(TyVal) <= details::multi_warp_scratch::communication_size, + "Collectives with tiles larger than 32 threads are limited to types smaller then 8 bytes"); + return shfl_impl(val, src); + } + + _CG_QUALIFIER void sync() const { + details::sync_warps(get_sync_location(), details::cta::thread_rank(), numWarps); + } + + _CG_QUALIFIER int any(int predicate) const { + auto warp_lambda = [=] (WarpType& warp, int* warp_scratch_location) { + *warp_scratch_location = __any_sync(0xFFFFFFFF, predicate); + }; + auto inter_warp_lambda = + [] (details::internal_thread_block_tile& subwarp, int* thread_scratch_location) { + *thread_scratch_location = __any_sync(0xFFFFFFFFU >> (32 - numWarps), *thread_scratch_location); + }; + return collectives_scheme(warp_lambda, inter_warp_lambda); + } + + _CG_QUALIFIER int all(int predicate) const { + auto warp_lambda = [=] (WarpType& warp, int* warp_scratch_location) { + *warp_scratch_location = __all_sync(0xFFFFFFFF, predicate); + }; + auto inter_warp_lambda = + [] (details::internal_thread_block_tile& subwarp, int* thread_scratch_location) { + *thread_scratch_location = __all_sync(0xFFFFFFFFU >> (32 - numWarps), *thread_scratch_location); + }; + return collectives_scheme(warp_lambda, inter_warp_lambda); + } +}; + + +template +class __multi_warp_thread_block_tile : + public __static_size_multi_warp_tile_base, + public __static_parent_thread_block_tile_base +{ + typedef __static_parent_thread_block_tile_base staticParentBaseT; + typedef __static_size_multi_warp_tile_base staticTileBaseT; +protected: + _CG_QUALIFIER __multi_warp_thread_block_tile(const ParentT& g) : + __static_size_multi_warp_tile_base(g) {} +}; + +template +class __multi_warp_thread_block_tile : public __static_size_multi_warp_tile_base +{ + const unsigned int metaGroupRank; + const unsigned int metaGroupSize; + +protected: + template + _CG_QUALIFIER __multi_warp_thread_block_tile(const __multi_warp_thread_block_tile& g) : + __static_size_multi_warp_tile_base(g), metaGroupRank(g.meta_group_rank()), metaGroupSize(g.meta_group_size()) {} + +public: + _CG_QUALIFIER unsigned int meta_group_rank() const { + return metaGroupRank; + } + + _CG_QUALIFIER unsigned int meta_group_size() const { + return metaGroupSize; + } +}; +#endif + +template +class thread_block_tile; + +namespace details { + template + class thread_block_tile_impl; + + template + class thread_block_tile_impl: public __single_warp_thread_block_tile + { + protected: + template + _CG_QUALIFIER thread_block_tile_impl(const thread_block_tile_impl& g) : + __single_warp_thread_block_tile(g.meta_group_rank(), g.meta_group_size()) {} + + _CG_QUALIFIER thread_block_tile_impl(const thread_block& g) : + __single_warp_thread_block_tile() {} + }; + +#if defined(_CG_CPP11_FEATURES) + template + class thread_block_tile_impl : public __multi_warp_thread_block_tile + { + protected: + template + _CG_QUALIFIER thread_block_tile_impl(const GroupT& g) : + __multi_warp_thread_block_tile(g) {} + }; +#else + template + class thread_block_tile_impl + { + protected: + template + _CG_QUALIFIER thread_block_tile_impl(const GroupT& g) {} + }; +#endif +} + +template +class thread_block_tile : public details::thread_block_tile_impl::value> +{ + friend _CG_QUALIFIER thread_block_tile<1, void> this_thread(); + +protected: + _CG_QUALIFIER thread_block_tile(const ParentT& g) : + details::thread_block_tile_impl::value>(g) {} + +public: + _CG_QUALIFIER operator thread_block_tile() const { + return thread_block_tile(*this); + } +}; + +template +class thread_block_tile : public details::thread_block_tile_impl::value> +{ + template + friend class thread_block_tile; + +protected: + template + _CG_QUALIFIER thread_block_tile(const thread_block_tile& g) : + details::thread_block_tile_impl::value>(g) {} + +public: + template + _CG_QUALIFIER thread_block_tile(const thread_block_tile& g) : + details::thread_block_tile_impl::value>(g) {} +}; + +namespace details { + template + struct tiled_partition_impl; + + template + struct tiled_partition_impl : public thread_block_tile { + _CG_QUALIFIER tiled_partition_impl(const thread_block& g) : + thread_block_tile(g) {} + }; + + // ParentT = static thread_block_tile specialization + template + struct tiled_partition_impl > : + public thread_block_tile > { +#ifdef _CG_CPP11_FEATURES + static_assert(Size < ParentSize, "Tile size bigger or equal to the parent group size"); +#endif + _CG_QUALIFIER tiled_partition_impl(const thread_block_tile& g) : + thread_block_tile >(g) {} + }; + +} + +template +_CG_QUALIFIER thread_block_tile tiled_partition(const ParentT& g) +{ + return details::tiled_partition_impl(g); +} + +/** + * thread_group this_thread() + * + * Constructs a generic thread_group containing only the calling thread + */ +_CG_QUALIFIER thread_block_tile<1, void> this_thread() +{ + // Make thread_block_tile<1, thread_block> parent of the returned group, so it will have its + // meta group rank and size set to 0 and 1 respectively. + return thread_block_tile<1, thread_block_tile<1, thread_block> >(this_thread_block()); +} + +/** + * .sync() + * + * Executes a barrier across the group + * + * Implements both a compiler fence and an architectural fence to prevent, + * memory reordering around the barrier. + */ +_CG_QUALIFIER void thread_group::sync() const +{ + switch (_data.group.type) { + case details::coalesced_group_id: + cooperative_groups::sync(*static_cast(this)); + break; + case details::thread_block_id: + cooperative_groups::sync(*static_cast(this)); + break; + case details::grid_group_id: + cooperative_groups::sync(*static_cast(this)); + break; +#if defined(_CG_HAS_MULTI_GRID_GROUP) && defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL) + case details::multi_grid_group_id: + cooperative_groups::sync(*static_cast(this)); + break; +#endif +#if defined(_CG_HAS_CLUSTER_GROUP) + case details::cluster_group_id: + cooperative_groups::sync(*static_cast(this)); + break; +#endif + default: + break; + } +} + +/** + * .size() + * + * Returns the total number of threads in the group. + */ +_CG_QUALIFIER unsigned long long thread_group::size() const +{ + unsigned long long size = 0; + switch (_data.group.type) { + case details::coalesced_group_id: + size = cooperative_groups::group_size(*static_cast(this)); + break; + case details::thread_block_id: + size = cooperative_groups::group_size(*static_cast(this)); + break; + case details::grid_group_id: + size = cooperative_groups::group_size(*static_cast(this)); + break; +#if defined(_CG_HAS_MULTI_GRID_GROUP) && defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL) + case details::multi_grid_group_id: + size = cooperative_groups::group_size(*static_cast(this)); + break; +#endif +#if defined(_CG_HAS_CLUSTER_GROUP) + case details::cluster_group_id: + size = cooperative_groups::group_size(*static_cast(this)); + break; +#endif + default: + break; + } + return size; +} + +/** + * .thread_rank() + * + * Returns the linearized rank of the calling thread along the interval [0, size()). + */ +_CG_QUALIFIER unsigned long long thread_group::thread_rank() const +{ + unsigned long long rank = 0; + switch (_data.group.type) { + case details::coalesced_group_id: + rank = cooperative_groups::thread_rank(*static_cast(this)); + break; + case details::thread_block_id: + rank = cooperative_groups::thread_rank(*static_cast(this)); + break; + case details::grid_group_id: + rank = cooperative_groups::thread_rank(*static_cast(this)); + break; +#if defined(_CG_HAS_MULTI_GRID_GROUP) && defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL) + case details::multi_grid_group_id: + rank = cooperative_groups::thread_rank(*static_cast(this)); + break; +#endif +#if defined(_CG_HAS_CLUSTER_GROUP) + case details::cluster_group_id: + rank = cooperative_groups::thread_rank(*static_cast(this)); + break; +#endif + default: + break; + } + return rank; +} + +_CG_END_NAMESPACE + +#include +#if (!defined(_MSC_VER) || defined(_WIN64)) +# include +#endif + +# endif /* ! (__cplusplus, __CUDACC__) */ + +#endif /* !_COOPERATIVE_GROUPS_H_ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuComplex.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuComplex.h new file mode 100644 index 0000000000000000000000000000000000000000..7b167111b0b387a5279da6749d946560e1c42c1b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuComplex.h @@ -0,0 +1,348 @@ +/* + * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(CU_COMPLEX_H_) +#define CU_COMPLEX_H_ + +#if !defined(__CUDACC_RTC__) +#if defined(__GNUC__) +#if defined(__clang__) || (!defined(__PGIC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2))) +#pragma GCC diagnostic ignored "-Wunused-function" +#endif +#endif +#endif + +/* When trying to include C header file in C++ Code extern "C" is required + * But the Standard QNX headers already have ifdef extern in them when compiling C++ Code + * extern "C" cannot be nested + * Hence keep the header out of extern "C" block + */ + +#if !defined(__CUDACC__) +#include /* import fabsf, sqrt */ +#endif /* !defined(__CUDACC__) */ + +#if defined(__cplusplus) +extern "C" { +#endif /* __cplusplus */ + +#include "vector_types.h" + +typedef float2 cuFloatComplex; + +__host__ __device__ static __inline__ float cuCrealf (cuFloatComplex x) +{ + return x.x; +} + +__host__ __device__ static __inline__ float cuCimagf (cuFloatComplex x) +{ + return x.y; +} + +__host__ __device__ static __inline__ cuFloatComplex make_cuFloatComplex + (float r, float i) +{ + cuFloatComplex res; + res.x = r; + res.y = i; + return res; +} + +__host__ __device__ static __inline__ cuFloatComplex cuConjf (cuFloatComplex x) +{ + return make_cuFloatComplex (cuCrealf(x), -cuCimagf(x)); +} +__host__ __device__ static __inline__ cuFloatComplex cuCaddf (cuFloatComplex x, + cuFloatComplex y) +{ + return make_cuFloatComplex (cuCrealf(x) + cuCrealf(y), + cuCimagf(x) + cuCimagf(y)); +} + +__host__ __device__ static __inline__ cuFloatComplex cuCsubf (cuFloatComplex x, + cuFloatComplex y) +{ + return make_cuFloatComplex (cuCrealf(x) - cuCrealf(y), + cuCimagf(x) - cuCimagf(y)); +} + +/* This implementation could suffer from intermediate overflow even though + * the final result would be in range. However, various implementations do + * not guard against this (presumably to avoid losing performance), so we + * don't do it either to stay competitive. + */ +__host__ __device__ static __inline__ cuFloatComplex cuCmulf (cuFloatComplex x, + cuFloatComplex y) +{ + cuFloatComplex prod; + prod = make_cuFloatComplex ((cuCrealf(x) * cuCrealf(y)) - + (cuCimagf(x) * cuCimagf(y)), + (cuCrealf(x) * cuCimagf(y)) + + (cuCimagf(x) * cuCrealf(y))); + return prod; +} + +/* This implementation guards against intermediate underflow and overflow + * by scaling. Such guarded implementations are usually the default for + * complex library implementations, with some also offering an unguarded, + * faster version. + */ +__host__ __device__ static __inline__ cuFloatComplex cuCdivf (cuFloatComplex x, + cuFloatComplex y) +{ + cuFloatComplex quot; + float s = fabsf(cuCrealf(y)) + fabsf(cuCimagf(y)); + float oos = 1.0f / s; + float ars = cuCrealf(x) * oos; + float ais = cuCimagf(x) * oos; + float brs = cuCrealf(y) * oos; + float bis = cuCimagf(y) * oos; + s = (brs * brs) + (bis * bis); + oos = 1.0f / s; + quot = make_cuFloatComplex (((ars * brs) + (ais * bis)) * oos, + ((ais * brs) - (ars * bis)) * oos); + return quot; +} + +/* + * We would like to call hypotf(), but it's not available on all platforms. + * This discrete implementation guards against intermediate underflow and + * overflow by scaling. Otherwise we would lose half the exponent range. + * There are various ways of doing guarded computation. For now chose the + * simplest and fastest solution, however this may suffer from inaccuracies + * if sqrt and division are not IEEE compliant. + */ +__host__ __device__ static __inline__ float cuCabsf (cuFloatComplex x) +{ + float a = cuCrealf(x); + float b = cuCimagf(x); + float v, w, t; + a = fabsf(a); + b = fabsf(b); + if (a > b) { + v = a; + w = b; + } else { + v = b; + w = a; + } + t = w / v; + t = 1.0f + t * t; + t = v * sqrtf(t); + if ((v == 0.0f) || (v > 3.402823466e38f) || (w > 3.402823466e38f)) { + t = v + w; + } + return t; +} + +/* Double precision */ +typedef double2 cuDoubleComplex; + +__host__ __device__ static __inline__ double cuCreal (cuDoubleComplex x) +{ + return x.x; +} + +__host__ __device__ static __inline__ double cuCimag (cuDoubleComplex x) +{ + return x.y; +} + +__host__ __device__ static __inline__ cuDoubleComplex make_cuDoubleComplex + (double r, double i) +{ + cuDoubleComplex res; + res.x = r; + res.y = i; + return res; +} + +__host__ __device__ static __inline__ cuDoubleComplex cuConj(cuDoubleComplex x) +{ + return make_cuDoubleComplex (cuCreal(x), -cuCimag(x)); +} + +__host__ __device__ static __inline__ cuDoubleComplex cuCadd(cuDoubleComplex x, + cuDoubleComplex y) +{ + return make_cuDoubleComplex (cuCreal(x) + cuCreal(y), + cuCimag(x) + cuCimag(y)); +} + +__host__ __device__ static __inline__ cuDoubleComplex cuCsub(cuDoubleComplex x, + cuDoubleComplex y) +{ + return make_cuDoubleComplex (cuCreal(x) - cuCreal(y), + cuCimag(x) - cuCimag(y)); +} + +/* This implementation could suffer from intermediate overflow even though + * the final result would be in range. However, various implementations do + * not guard against this (presumably to avoid losing performance), so we + * don't do it either to stay competitive. + */ +__host__ __device__ static __inline__ cuDoubleComplex cuCmul(cuDoubleComplex x, + cuDoubleComplex y) +{ + cuDoubleComplex prod; + prod = make_cuDoubleComplex ((cuCreal(x) * cuCreal(y)) - + (cuCimag(x) * cuCimag(y)), + (cuCreal(x) * cuCimag(y)) + + (cuCimag(x) * cuCreal(y))); + return prod; +} + +/* This implementation guards against intermediate underflow and overflow + * by scaling. Such guarded implementations are usually the default for + * complex library implementations, with some also offering an unguarded, + * faster version. + */ +__host__ __device__ static __inline__ cuDoubleComplex cuCdiv(cuDoubleComplex x, + cuDoubleComplex y) +{ + cuDoubleComplex quot; + double s = (fabs(cuCreal(y))) + (fabs(cuCimag(y))); + double oos = 1.0 / s; + double ars = cuCreal(x) * oos; + double ais = cuCimag(x) * oos; + double brs = cuCreal(y) * oos; + double bis = cuCimag(y) * oos; + s = (brs * brs) + (bis * bis); + oos = 1.0 / s; + quot = make_cuDoubleComplex (((ars * brs) + (ais * bis)) * oos, + ((ais * brs) - (ars * bis)) * oos); + return quot; +} + +/* This implementation guards against intermediate underflow and overflow + * by scaling. Otherwise we would lose half the exponent range. There are + * various ways of doing guarded computation. For now chose the simplest + * and fastest solution, however this may suffer from inaccuracies if sqrt + * and division are not IEEE compliant. + */ +__host__ __device__ static __inline__ double cuCabs (cuDoubleComplex x) +{ + double a = cuCreal(x); + double b = cuCimag(x); + double v, w, t; + a = fabs(a); + b = fabs(b); + if (a > b) { + v = a; + w = b; + } else { + v = b; + w = a; + } + t = w / v; + t = 1.0 + t * t; + t = v * sqrt(t); + if ((v == 0.0) || + (v > 1.79769313486231570e+308) || (w > 1.79769313486231570e+308)) { + t = v + w; + } + return t; +} + +#if defined(__cplusplus) +} +#endif /* __cplusplus */ + +/* aliases */ +typedef cuFloatComplex cuComplex; +__host__ __device__ static __inline__ cuComplex make_cuComplex (float x, + float y) +{ + return make_cuFloatComplex (x, y); +} + +/* float-to-double promotion */ +__host__ __device__ static __inline__ cuDoubleComplex cuComplexFloatToDouble + (cuFloatComplex c) +{ + return make_cuDoubleComplex ((double)cuCrealf(c), (double)cuCimagf(c)); +} + +__host__ __device__ static __inline__ cuFloatComplex cuComplexDoubleToFloat +(cuDoubleComplex c) +{ + return make_cuFloatComplex ((float)cuCreal(c), (float)cuCimag(c)); +} + + +__host__ __device__ static __inline__ cuComplex cuCfmaf( cuComplex x, cuComplex y, cuComplex d) +{ + float real_res; + float imag_res; + + real_res = (cuCrealf(x) * cuCrealf(y)) + cuCrealf(d); + imag_res = (cuCrealf(x) * cuCimagf(y)) + cuCimagf(d); + + real_res = -(cuCimagf(x) * cuCimagf(y)) + real_res; + imag_res = (cuCimagf(x) * cuCrealf(y)) + imag_res; + + return make_cuComplex(real_res, imag_res); +} + +__host__ __device__ static __inline__ cuDoubleComplex cuCfma( cuDoubleComplex x, cuDoubleComplex y, cuDoubleComplex d) +{ + double real_res; + double imag_res; + + real_res = (cuCreal(x) * cuCreal(y)) + cuCreal(d); + imag_res = (cuCreal(x) * cuCimag(y)) + cuCimag(d); + + real_res = -(cuCimag(x) * cuCimag(y)) + real_res; + imag_res = (cuCimag(x) * cuCreal(y)) + imag_res; + + return make_cuDoubleComplex(real_res, imag_res); +} + +#endif /* !defined(CU_COMPLEX_H_) */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda.h new file mode 100644 index 0000000000000000000000000000000000000000..fc733fdeacf706b0180bd0f9e915ef50eae1aba3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda.h @@ -0,0 +1,22119 @@ +/* + * Copyright 1993-2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef __cuda_cuda_h__ +#define __cuda_cuda_h__ + + + + +#include +#ifdef _MSC_VER +typedef unsigned __int32 cuuint32_t; +typedef unsigned __int64 cuuint64_t; +#else +#include +typedef uint32_t cuuint32_t; +typedef uint64_t cuuint64_t; +#endif + +#if defined(__CUDA_API_VERSION_INTERNAL) || defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED) +#define __CUDA_DEPRECATED +#elif defined(_MSC_VER) +#define __CUDA_DEPRECATED __declspec(deprecated) +#elif defined(__GNUC__) +#define __CUDA_DEPRECATED __attribute__((deprecated)) +#else +#define __CUDA_DEPRECATED +#endif + +#if defined(CUDA_FORCE_API_VERSION) +#error "CUDA_FORCE_API_VERSION is no longer supported." +#endif + +#if defined(__CUDA_API_VERSION_INTERNAL) || defined(CUDA_API_PER_THREAD_DEFAULT_STREAM) + #define __CUDA_API_PER_THREAD_DEFAULT_STREAM + #define __CUDA_API_PTDS(api) api ## _ptds + #define __CUDA_API_PTSZ(api) api ## _ptsz +#else + #define __CUDA_API_PTDS(api) api + #define __CUDA_API_PTSZ(api) api +#endif + +#define cuDeviceTotalMem cuDeviceTotalMem_v2 +#define cuCtxCreate cuCtxCreate_v2 +#define cuCtxCreate_v3 cuCtxCreate_v3 +#define cuModuleGetGlobal cuModuleGetGlobal_v2 +#define cuMemGetInfo cuMemGetInfo_v2 +#define cuMemAlloc cuMemAlloc_v2 +#define cuMemAllocPitch cuMemAllocPitch_v2 +#define cuMemFree cuMemFree_v2 +#define cuMemGetAddressRange cuMemGetAddressRange_v2 +#define cuMemAllocHost cuMemAllocHost_v2 +#define cuMemHostGetDevicePointer cuMemHostGetDevicePointer_v2 +#define cuMemcpyHtoD __CUDA_API_PTDS(cuMemcpyHtoD_v2) +#define cuMemcpyDtoH __CUDA_API_PTDS(cuMemcpyDtoH_v2) +#define cuMemcpyDtoD __CUDA_API_PTDS(cuMemcpyDtoD_v2) +#define cuMemcpyDtoA __CUDA_API_PTDS(cuMemcpyDtoA_v2) +#define cuMemcpyAtoD __CUDA_API_PTDS(cuMemcpyAtoD_v2) +#define cuMemcpyHtoA __CUDA_API_PTDS(cuMemcpyHtoA_v2) +#define cuMemcpyAtoH __CUDA_API_PTDS(cuMemcpyAtoH_v2) +#define cuMemcpyAtoA __CUDA_API_PTDS(cuMemcpyAtoA_v2) +#define cuMemcpyHtoAAsync __CUDA_API_PTSZ(cuMemcpyHtoAAsync_v2) +#define cuMemcpyAtoHAsync __CUDA_API_PTSZ(cuMemcpyAtoHAsync_v2) +#define cuMemcpy2D __CUDA_API_PTDS(cuMemcpy2D_v2) +#define cuMemcpy2DUnaligned __CUDA_API_PTDS(cuMemcpy2DUnaligned_v2) +#define cuMemcpy3D __CUDA_API_PTDS(cuMemcpy3D_v2) +#define cuMemcpyHtoDAsync __CUDA_API_PTSZ(cuMemcpyHtoDAsync_v2) +#define cuMemcpyDtoHAsync __CUDA_API_PTSZ(cuMemcpyDtoHAsync_v2) +#define cuMemcpyDtoDAsync __CUDA_API_PTSZ(cuMemcpyDtoDAsync_v2) +#define cuMemcpy2DAsync __CUDA_API_PTSZ(cuMemcpy2DAsync_v2) +#define cuMemcpy3DAsync __CUDA_API_PTSZ(cuMemcpy3DAsync_v2) +#define cuMemsetD8 __CUDA_API_PTDS(cuMemsetD8_v2) +#define cuMemsetD16 __CUDA_API_PTDS(cuMemsetD16_v2) +#define cuMemsetD32 __CUDA_API_PTDS(cuMemsetD32_v2) +#define cuMemsetD2D8 __CUDA_API_PTDS(cuMemsetD2D8_v2) +#define cuMemsetD2D16 __CUDA_API_PTDS(cuMemsetD2D16_v2) +#define cuMemsetD2D32 __CUDA_API_PTDS(cuMemsetD2D32_v2) +#define cuArrayCreate cuArrayCreate_v2 +#define cuArrayGetDescriptor cuArrayGetDescriptor_v2 +#define cuArray3DCreate cuArray3DCreate_v2 +#define cuArray3DGetDescriptor cuArray3DGetDescriptor_v2 +#define cuTexRefSetAddress cuTexRefSetAddress_v2 +#define cuTexRefGetAddress cuTexRefGetAddress_v2 +#define cuGraphicsResourceGetMappedPointer cuGraphicsResourceGetMappedPointer_v2 +#define cuCtxDestroy cuCtxDestroy_v2 +#define cuCtxPopCurrent cuCtxPopCurrent_v2 +#define cuCtxPushCurrent cuCtxPushCurrent_v2 +#define cuStreamDestroy cuStreamDestroy_v2 +#define cuEventDestroy cuEventDestroy_v2 +#define cuTexRefSetAddress2D cuTexRefSetAddress2D_v3 +#define cuLinkCreate cuLinkCreate_v2 +#define cuLinkAddData cuLinkAddData_v2 +#define cuLinkAddFile cuLinkAddFile_v2 +#define cuMemHostRegister cuMemHostRegister_v2 +#define cuGraphicsResourceSetMapFlags cuGraphicsResourceSetMapFlags_v2 +#define cuStreamBeginCapture __CUDA_API_PTSZ(cuStreamBeginCapture_v2) +#define cuDevicePrimaryCtxRelease cuDevicePrimaryCtxRelease_v2 +#define cuDevicePrimaryCtxReset cuDevicePrimaryCtxReset_v2 +#define cuDevicePrimaryCtxSetFlags cuDevicePrimaryCtxSetFlags_v2 +#define cuDeviceGetUuid_v2 cuDeviceGetUuid_v2 +#define cuIpcOpenMemHandle cuIpcOpenMemHandle_v2 + +#define cuGraphInstantiate cuGraphInstantiateWithFlags + +#define cuGraphExecUpdate cuGraphExecUpdate_v2 +#define cuGetProcAddress cuGetProcAddress_v2 +#define cuGraphAddKernelNode cuGraphAddKernelNode_v2 +#define cuGraphKernelNodeGetParams cuGraphKernelNodeGetParams_v2 +#define cuGraphKernelNodeSetParams cuGraphKernelNodeSetParams_v2 +#define cuGraphExecKernelNodeSetParams cuGraphExecKernelNodeSetParams_v2 + +#define cuStreamWriteValue32 __CUDA_API_PTSZ(cuStreamWriteValue32_v2) +#define cuStreamWaitValue32 __CUDA_API_PTSZ(cuStreamWaitValue32_v2) +#define cuStreamWriteValue64 __CUDA_API_PTSZ(cuStreamWriteValue64_v2) +#define cuStreamWaitValue64 __CUDA_API_PTSZ(cuStreamWaitValue64_v2) +#define cuStreamBatchMemOp __CUDA_API_PTSZ(cuStreamBatchMemOp_v2) +#define cuStreamGetCaptureInfo __CUDA_API_PTSZ(cuStreamGetCaptureInfo_v2) +#define cuStreamGetCaptureInfo_v2 __CUDA_API_PTSZ(cuStreamGetCaptureInfo_v2) + +#if defined(__CUDA_API_PER_THREAD_DEFAULT_STREAM) + #define cuMemcpy __CUDA_API_PTDS(cuMemcpy) + #define cuMemcpyAsync __CUDA_API_PTSZ(cuMemcpyAsync) + #define cuMemcpyPeer __CUDA_API_PTDS(cuMemcpyPeer) + #define cuMemcpyPeerAsync __CUDA_API_PTSZ(cuMemcpyPeerAsync) + #define cuMemcpy3DPeer __CUDA_API_PTDS(cuMemcpy3DPeer) + #define cuMemcpy3DPeerAsync __CUDA_API_PTSZ(cuMemcpy3DPeerAsync) + #define cuMemPrefetchAsync __CUDA_API_PTSZ(cuMemPrefetchAsync) + + #define cuMemsetD8Async __CUDA_API_PTSZ(cuMemsetD8Async) + #define cuMemsetD16Async __CUDA_API_PTSZ(cuMemsetD16Async) + #define cuMemsetD32Async __CUDA_API_PTSZ(cuMemsetD32Async) + #define cuMemsetD2D8Async __CUDA_API_PTSZ(cuMemsetD2D8Async) + #define cuMemsetD2D16Async __CUDA_API_PTSZ(cuMemsetD2D16Async) + #define cuMemsetD2D32Async __CUDA_API_PTSZ(cuMemsetD2D32Async) + + #define cuStreamGetPriority __CUDA_API_PTSZ(cuStreamGetPriority) + #define cuStreamGetId __CUDA_API_PTSZ(cuStreamGetId) + #define cuStreamGetFlags __CUDA_API_PTSZ(cuStreamGetFlags) + #define cuStreamGetCtx __CUDA_API_PTSZ(cuStreamGetCtx) + #define cuStreamWaitEvent __CUDA_API_PTSZ(cuStreamWaitEvent) + #define cuStreamEndCapture __CUDA_API_PTSZ(cuStreamEndCapture) + #define cuStreamIsCapturing __CUDA_API_PTSZ(cuStreamIsCapturing) + #define cuStreamUpdateCaptureDependencies __CUDA_API_PTSZ(cuStreamUpdateCaptureDependencies) + #define cuStreamAddCallback __CUDA_API_PTSZ(cuStreamAddCallback) + #define cuStreamAttachMemAsync __CUDA_API_PTSZ(cuStreamAttachMemAsync) + #define cuStreamQuery __CUDA_API_PTSZ(cuStreamQuery) + #define cuStreamSynchronize __CUDA_API_PTSZ(cuStreamSynchronize) + #define cuEventRecord __CUDA_API_PTSZ(cuEventRecord) + #define cuEventRecordWithFlags __CUDA_API_PTSZ(cuEventRecordWithFlags) + #define cuLaunchKernel __CUDA_API_PTSZ(cuLaunchKernel) + #define cuLaunchKernelEx __CUDA_API_PTSZ(cuLaunchKernelEx) + #define cuLaunchHostFunc __CUDA_API_PTSZ(cuLaunchHostFunc) + #define cuGraphicsMapResources __CUDA_API_PTSZ(cuGraphicsMapResources) + #define cuGraphicsUnmapResources __CUDA_API_PTSZ(cuGraphicsUnmapResources) + + #define cuLaunchCooperativeKernel __CUDA_API_PTSZ(cuLaunchCooperativeKernel) + + #define cuSignalExternalSemaphoresAsync __CUDA_API_PTSZ(cuSignalExternalSemaphoresAsync) + #define cuWaitExternalSemaphoresAsync __CUDA_API_PTSZ(cuWaitExternalSemaphoresAsync) + + #define cuGraphInstantiateWithParams __CUDA_API_PTSZ(cuGraphInstantiateWithParams) + #define cuGraphUpload __CUDA_API_PTSZ(cuGraphUpload) + #define cuGraphLaunch __CUDA_API_PTSZ(cuGraphLaunch) + #define cuStreamCopyAttributes __CUDA_API_PTSZ(cuStreamCopyAttributes) + #define cuStreamGetAttribute __CUDA_API_PTSZ(cuStreamGetAttribute) + #define cuStreamSetAttribute __CUDA_API_PTSZ(cuStreamSetAttribute) + #define cuMemMapArrayAsync __CUDA_API_PTSZ(cuMemMapArrayAsync) + + #define cuMemFreeAsync __CUDA_API_PTSZ(cuMemFreeAsync) + #define cuMemAllocAsync __CUDA_API_PTSZ(cuMemAllocAsync) + #define cuMemAllocFromPoolAsync __CUDA_API_PTSZ(cuMemAllocFromPoolAsync) +#endif + +/** + * \file cuda.h + * \brief Header file for the CUDA Toolkit application programming interface. + * + * \file cudaGL.h + * \brief Header file for the OpenGL interoperability functions of the + * low-level CUDA driver application programming interface. + * + * \file cudaD3D9.h + * \brief Header file for the Direct3D 9 interoperability functions of the + * low-level CUDA driver application programming interface. + */ + +/** + * \defgroup CUDA_TYPES Data types used by CUDA driver + * @{ + */ + +/** + * CUDA API version number + */ +#define CUDA_VERSION 12010 + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * CUDA device pointer + * CUdeviceptr is defined as an unsigned integer type whose size matches the size of a pointer on the target platform. + */ +#if defined(_WIN64) || defined(__LP64__) +typedef unsigned long long CUdeviceptr_v2; +#else +typedef unsigned int CUdeviceptr_v2; +#endif +typedef CUdeviceptr_v2 CUdeviceptr; /**< CUDA device pointer */ + +typedef int CUdevice_v1; /**< CUDA device */ +typedef CUdevice_v1 CUdevice; /**< CUDA device */ +typedef struct CUctx_st *CUcontext; /**< CUDA context */ +typedef struct CUmod_st *CUmodule; /**< CUDA module */ +typedef struct CUfunc_st *CUfunction; /**< CUDA function */ +typedef struct CUlib_st *CUlibrary; /**< CUDA library */ +typedef struct CUkern_st *CUkernel; /**< CUDA kernel */ +typedef struct CUarray_st *CUarray; /**< CUDA array */ +typedef struct CUmipmappedArray_st *CUmipmappedArray; /**< CUDA mipmapped array */ +typedef struct CUtexref_st *CUtexref; /**< CUDA texture reference */ +typedef struct CUsurfref_st *CUsurfref; /**< CUDA surface reference */ +typedef struct CUevent_st *CUevent; /**< CUDA event */ +typedef struct CUstream_st *CUstream; /**< CUDA stream */ +typedef struct CUgraphicsResource_st *CUgraphicsResource; /**< CUDA graphics interop resource */ +typedef unsigned long long CUtexObject_v1; /**< An opaque value that represents a CUDA texture object */ +typedef CUtexObject_v1 CUtexObject; /**< An opaque value that represents a CUDA texture object */ +typedef unsigned long long CUsurfObject_v1; /**< An opaque value that represents a CUDA surface object */ +typedef CUsurfObject_v1 CUsurfObject; /**< An opaque value that represents a CUDA surface object */ +typedef struct CUextMemory_st *CUexternalMemory; /**< CUDA external memory */ +typedef struct CUextSemaphore_st *CUexternalSemaphore; /**< CUDA external semaphore */ +typedef struct CUgraph_st *CUgraph; /**< CUDA graph */ +typedef struct CUgraphNode_st *CUgraphNode; /**< CUDA graph node */ +typedef struct CUgraphExec_st *CUgraphExec; /**< CUDA executable graph */ +typedef struct CUmemPoolHandle_st *CUmemoryPool; /**< CUDA memory pool */ +typedef struct CUuserObject_st *CUuserObject; /**< CUDA user object for graphs */ + +#ifndef CU_UUID_HAS_BEEN_DEFINED +#define CU_UUID_HAS_BEEN_DEFINED +typedef struct CUuuid_st { /**< CUDA definition of UUID */ + char bytes[16]; +} CUuuid; +#endif + +/** + * CUDA IPC handle size + */ +#define CU_IPC_HANDLE_SIZE 64 + +/** + * CUDA IPC event handle + */ +typedef struct CUipcEventHandle_st { + char reserved[CU_IPC_HANDLE_SIZE]; +} CUipcEventHandle_v1; +typedef CUipcEventHandle_v1 CUipcEventHandle; + +/** + * CUDA IPC mem handle + */ +typedef struct CUipcMemHandle_st { + char reserved[CU_IPC_HANDLE_SIZE]; +} CUipcMemHandle_v1; +typedef CUipcMemHandle_v1 CUipcMemHandle; + +/** + * CUDA Ipc Mem Flags + */ +typedef enum CUipcMem_flags_enum { + CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS = 0x1 /**< Automatically enable peer access between remote devices as needed */ +} CUipcMem_flags; + + +/** + * CUDA Mem Attach Flags + */ +typedef enum CUmemAttach_flags_enum { + CU_MEM_ATTACH_GLOBAL = 0x1, /**< Memory can be accessed by any stream on any device */ + CU_MEM_ATTACH_HOST = 0x2, /**< Memory cannot be accessed by any stream on any device */ + CU_MEM_ATTACH_SINGLE = 0x4 /**< Memory can only be accessed by a single stream on the associated device */ +} CUmemAttach_flags; + +/** + * Context creation flags + */ +typedef enum CUctx_flags_enum { + CU_CTX_SCHED_AUTO = 0x00, /**< Automatic scheduling */ + CU_CTX_SCHED_SPIN = 0x01, /**< Set spin as default scheduling */ + CU_CTX_SCHED_YIELD = 0x02, /**< Set yield as default scheduling */ + CU_CTX_SCHED_BLOCKING_SYNC = 0x04, /**< Set blocking synchronization as default scheduling */ + CU_CTX_BLOCKING_SYNC = 0x04, /**< Set blocking synchronization as default scheduling + * \deprecated This flag was deprecated as of CUDA 4.0 + * and was replaced with ::CU_CTX_SCHED_BLOCKING_SYNC. */ + CU_CTX_SCHED_MASK = 0x07, + CU_CTX_MAP_HOST = 0x08, /**< \deprecated This flag was deprecated as of CUDA 11.0 + * and it no longer has any effect. All contexts + * as of CUDA 3.2 behave as though the flag is enabled. */ + CU_CTX_LMEM_RESIZE_TO_MAX = 0x10, /**< Keep local memory allocation after launch */ + CU_CTX_COREDUMP_ENABLE = 0x20, /**< Trigger coredumps from exceptions in this context */ + CU_CTX_USER_COREDUMP_ENABLE= 0x40, /**< Enable user pipe to trigger coredumps in this context */ + CU_CTX_SYNC_MEMOPS = 0x80, /**< Force synchronous blocking on cudaMemcpy/cudaMemset */ + CU_CTX_FLAGS_MASK = 0xFF +} CUctx_flags; + +/** + * Event sched flags + */ +typedef enum CUevent_sched_flags_enum { + CU_EVENT_SCHED_AUTO = 0x00, /**< Automatic scheduling */ + CU_EVENT_SCHED_SPIN = 0x01, /**< Set spin as default scheduling */ + CU_EVENT_SCHED_YIELD = 0x02, /**< Set yield as default scheduling */ + CU_EVENT_SCHED_BLOCKING_SYNC = 0x04, /**< Set blocking synchronization as default scheduling */ +} CUevent_sched_flags; + +/** + * NVCL event scheduling flags + */ +typedef enum cl_event_flags_enum { + NVCL_EVENT_SCHED_AUTO = 0x00, /**< Automatic scheduling */ + NVCL_EVENT_SCHED_SPIN = 0x01, /**< Set spin as default scheduling */ + NVCL_EVENT_SCHED_YIELD = 0x02, /**< Set yield as default scheduling */ + NVCL_EVENT_SCHED_BLOCKING_SYNC = 0x04, /**< Set blocking synchronization as default scheduling */ +} cl_event_flags; + +/** + * NVCL context scheduling flags + */ +typedef enum cl_context_flags_enum { + NVCL_CTX_SCHED_AUTO = 0x00, /**< Automatic scheduling */ + NVCL_CTX_SCHED_SPIN = 0x01, /**< Set spin as default scheduling */ + NVCL_CTX_SCHED_YIELD = 0x02, /**< Set yield as default scheduling */ + NVCL_CTX_SCHED_BLOCKING_SYNC = 0x04, /**< Set blocking synchronization as default scheduling */ +} cl_context_flags; + + +/** + * Stream creation flags + */ +typedef enum CUstream_flags_enum { + CU_STREAM_DEFAULT = 0x0, /**< Default stream flag */ + CU_STREAM_NON_BLOCKING = 0x1 /**< Stream does not synchronize with stream 0 (the NULL stream) */ +} CUstream_flags; + +/** + * Legacy stream handle + * + * Stream handle that can be passed as a CUstream to use an implicit stream + * with legacy synchronization behavior. + * + * See details of the \link_sync_behavior + */ +#define CU_STREAM_LEGACY ((CUstream)0x1) + +/** + * Per-thread stream handle + * + * Stream handle that can be passed as a CUstream to use an implicit stream + * with per-thread synchronization behavior. + * + * See details of the \link_sync_behavior + */ +#define CU_STREAM_PER_THREAD ((CUstream)0x2) + +/** + * Event creation flags + */ +typedef enum CUevent_flags_enum { + CU_EVENT_DEFAULT = 0x0, /**< Default event flag */ + CU_EVENT_BLOCKING_SYNC = 0x1, /**< Event uses blocking synchronization */ + CU_EVENT_DISABLE_TIMING = 0x2, /**< Event will not record timing data */ + CU_EVENT_INTERPROCESS = 0x4 /**< Event is suitable for interprocess use. CU_EVENT_DISABLE_TIMING must be set */ +} CUevent_flags; + +/** + * Event record flags + */ +typedef enum CUevent_record_flags_enum { + CU_EVENT_RECORD_DEFAULT = 0x0, /**< Default event record flag */ + CU_EVENT_RECORD_EXTERNAL = 0x1 /**< When using stream capture, create an event record node + * instead of the default behavior. This flag is invalid + * when used outside of capture. */ +} CUevent_record_flags; + +/** + * Event wait flags + */ +typedef enum CUevent_wait_flags_enum { + CU_EVENT_WAIT_DEFAULT = 0x0, /**< Default event wait flag */ + CU_EVENT_WAIT_EXTERNAL = 0x1 /**< When using stream capture, create an event wait node + * instead of the default behavior. This flag is invalid + * when used outside of capture.*/ +} CUevent_wait_flags; + +/** + * Flags for ::cuStreamWaitValue32 and ::cuStreamWaitValue64 + */ +typedef enum CUstreamWaitValue_flags_enum { + CU_STREAM_WAIT_VALUE_GEQ = 0x0, /**< Wait until (int32_t)(*addr - value) >= 0 (or int64_t for 64 bit + values). Note this is a cyclic comparison which ignores wraparound. + (Default behavior.) */ + CU_STREAM_WAIT_VALUE_EQ = 0x1, /**< Wait until *addr == value. */ + CU_STREAM_WAIT_VALUE_AND = 0x2, /**< Wait until (*addr & value) != 0. */ + CU_STREAM_WAIT_VALUE_NOR = 0x3, /**< Wait until ~(*addr | value) != 0. Support for this operation can be + queried with ::cuDeviceGetAttribute() and + ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR.*/ + CU_STREAM_WAIT_VALUE_FLUSH = 1<<30 /**< Follow the wait operation with a flush of outstanding remote writes. This + means that, if a remote write operation is guaranteed to have reached the + device before the wait can be satisfied, that write is guaranteed to be + visible to downstream device work. The device is permitted to reorder + remote writes internally. For example, this flag would be required if + two remote writes arrive in a defined order, the wait is satisfied by the + second write, and downstream work needs to observe the first write. + Support for this operation is restricted to selected platforms and can be + queried with ::CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES.*/ +} CUstreamWaitValue_flags; + +/** + * Flags for ::cuStreamWriteValue32 + */ +typedef enum CUstreamWriteValue_flags_enum { + CU_STREAM_WRITE_VALUE_DEFAULT = 0x0, /**< Default behavior */ + CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER = 0x1 /**< Permits the write to be reordered with writes which were issued + before it, as a performance optimization. Normally, + ::cuStreamWriteValue32 will provide a memory fence before the + write, which has similar semantics to + __threadfence_system() but is scoped to the stream + rather than a CUDA thread. + This flag is not supported in the v2 API. */ +} CUstreamWriteValue_flags; + +/** + * Operations for ::cuStreamBatchMemOp + */ +typedef enum CUstreamBatchMemOpType_enum { + CU_STREAM_MEM_OP_WAIT_VALUE_32 = 1, /**< Represents a ::cuStreamWaitValue32 operation */ + CU_STREAM_MEM_OP_WRITE_VALUE_32 = 2, /**< Represents a ::cuStreamWriteValue32 operation */ + CU_STREAM_MEM_OP_WAIT_VALUE_64 = 4, /**< Represents a ::cuStreamWaitValue64 operation */ + CU_STREAM_MEM_OP_WRITE_VALUE_64 = 5, /**< Represents a ::cuStreamWriteValue64 operation */ + CU_STREAM_MEM_OP_BARRIER = 6, /**< Insert a memory barrier of the specified type */ + CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES = 3 /**< This has the same effect as ::CU_STREAM_WAIT_VALUE_FLUSH, but as a + standalone operation. */ +} CUstreamBatchMemOpType; + +/** + * Flags for ::cuStreamMemoryBarrier + */ +typedef enum CUstreamMemoryBarrier_flags_enum { + CU_STREAM_MEMORY_BARRIER_TYPE_SYS = 0x0, /**< System-wide memory barrier. */ + CU_STREAM_MEMORY_BARRIER_TYPE_GPU = 0x1 /**< Limit memory barrier scope to the GPU. */ +} CUstreamMemoryBarrier_flags; + +/** + * Per-operation parameters for ::cuStreamBatchMemOp + */ +typedef union CUstreamBatchMemOpParams_union { + CUstreamBatchMemOpType operation; + struct CUstreamMemOpWaitValueParams_st { + CUstreamBatchMemOpType operation; + CUdeviceptr address; + union { + cuuint32_t value; + cuuint64_t value64; + }; + unsigned int flags; + CUdeviceptr alias; /**< For driver internal use. Initial value is unimportant. */ + } waitValue; + struct CUstreamMemOpWriteValueParams_st { + CUstreamBatchMemOpType operation; + CUdeviceptr address; + union { + cuuint32_t value; + cuuint64_t value64; + }; + unsigned int flags; + CUdeviceptr alias; /**< For driver internal use. Initial value is unimportant. */ + } writeValue; + struct CUstreamMemOpFlushRemoteWritesParams_st { + CUstreamBatchMemOpType operation; + unsigned int flags; + } flushRemoteWrites; + struct CUstreamMemOpMemoryBarrierParams_st { /**< Only supported in the _v2 API */ + CUstreamBatchMemOpType operation; + unsigned int flags; + } memoryBarrier; + cuuint64_t pad[6]; +} CUstreamBatchMemOpParams_v1; +typedef CUstreamBatchMemOpParams_v1 CUstreamBatchMemOpParams; + +typedef struct CUDA_BATCH_MEM_OP_NODE_PARAMS_st { + CUcontext ctx; + unsigned int count; + CUstreamBatchMemOpParams *paramArray; + unsigned int flags; +} CUDA_BATCH_MEM_OP_NODE_PARAMS; + +/** + * Occupancy calculator flag + */ +typedef enum CUoccupancy_flags_enum { + CU_OCCUPANCY_DEFAULT = 0x0, /**< Default behavior */ + CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE = 0x1 /**< Assume global caching is enabled and cannot be automatically turned off */ +} CUoccupancy_flags; + +/** + * Flags for ::cuStreamUpdateCaptureDependencies + */ +typedef enum CUstreamUpdateCaptureDependencies_flags_enum { + CU_STREAM_ADD_CAPTURE_DEPENDENCIES = 0x0, /**< Add new nodes to the dependency set */ + CU_STREAM_SET_CAPTURE_DEPENDENCIES = 0x1 /**< Replace the dependency set with the new nodes */ +} CUstreamUpdateCaptureDependencies_flags; + +/** + * Array formats + */ +typedef enum CUarray_format_enum { + CU_AD_FORMAT_UNSIGNED_INT8 = 0x01, /**< Unsigned 8-bit integers */ + CU_AD_FORMAT_UNSIGNED_INT16 = 0x02, /**< Unsigned 16-bit integers */ + CU_AD_FORMAT_UNSIGNED_INT32 = 0x03, /**< Unsigned 32-bit integers */ + CU_AD_FORMAT_SIGNED_INT8 = 0x08, /**< Signed 8-bit integers */ + CU_AD_FORMAT_SIGNED_INT16 = 0x09, /**< Signed 16-bit integers */ + CU_AD_FORMAT_SIGNED_INT32 = 0x0a, /**< Signed 32-bit integers */ + CU_AD_FORMAT_HALF = 0x10, /**< 16-bit floating point */ + CU_AD_FORMAT_FLOAT = 0x20, /**< 32-bit floating point */ + CU_AD_FORMAT_NV12 = 0xb0, /**< 8-bit YUV planar format, with 4:2:0 sampling */ + CU_AD_FORMAT_UNORM_INT8X1 = 0xc0, /**< 1 channel unsigned 8-bit normalized integer */ + CU_AD_FORMAT_UNORM_INT8X2 = 0xc1, /**< 2 channel unsigned 8-bit normalized integer */ + CU_AD_FORMAT_UNORM_INT8X4 = 0xc2, /**< 4 channel unsigned 8-bit normalized integer */ + CU_AD_FORMAT_UNORM_INT16X1 = 0xc3, /**< 1 channel unsigned 16-bit normalized integer */ + CU_AD_FORMAT_UNORM_INT16X2 = 0xc4, /**< 2 channel unsigned 16-bit normalized integer */ + CU_AD_FORMAT_UNORM_INT16X4 = 0xc5, /**< 4 channel unsigned 16-bit normalized integer */ + CU_AD_FORMAT_SNORM_INT8X1 = 0xc6, /**< 1 channel signed 8-bit normalized integer */ + CU_AD_FORMAT_SNORM_INT8X2 = 0xc7, /**< 2 channel signed 8-bit normalized integer */ + CU_AD_FORMAT_SNORM_INT8X4 = 0xc8, /**< 4 channel signed 8-bit normalized integer */ + CU_AD_FORMAT_SNORM_INT16X1 = 0xc9, /**< 1 channel signed 16-bit normalized integer */ + CU_AD_FORMAT_SNORM_INT16X2 = 0xca, /**< 2 channel signed 16-bit normalized integer */ + CU_AD_FORMAT_SNORM_INT16X4 = 0xcb, /**< 4 channel signed 16-bit normalized integer */ + CU_AD_FORMAT_BC1_UNORM = 0x91, /**< 4 channel unsigned normalized block-compressed (BC1 compression) format */ + CU_AD_FORMAT_BC1_UNORM_SRGB = 0x92, /**< 4 channel unsigned normalized block-compressed (BC1 compression) format with sRGB encoding*/ + CU_AD_FORMAT_BC2_UNORM = 0x93, /**< 4 channel unsigned normalized block-compressed (BC2 compression) format */ + CU_AD_FORMAT_BC2_UNORM_SRGB = 0x94, /**< 4 channel unsigned normalized block-compressed (BC2 compression) format with sRGB encoding*/ + CU_AD_FORMAT_BC3_UNORM = 0x95, /**< 4 channel unsigned normalized block-compressed (BC3 compression) format */ + CU_AD_FORMAT_BC3_UNORM_SRGB = 0x96, /**< 4 channel unsigned normalized block-compressed (BC3 compression) format with sRGB encoding*/ + CU_AD_FORMAT_BC4_UNORM = 0x97, /**< 1 channel unsigned normalized block-compressed (BC4 compression) format */ + CU_AD_FORMAT_BC4_SNORM = 0x98, /**< 1 channel signed normalized block-compressed (BC4 compression) format */ + CU_AD_FORMAT_BC5_UNORM = 0x99, /**< 2 channel unsigned normalized block-compressed (BC5 compression) format */ + CU_AD_FORMAT_BC5_SNORM = 0x9a, /**< 2 channel signed normalized block-compressed (BC5 compression) format */ + CU_AD_FORMAT_BC6H_UF16 = 0x9b, /**< 3 channel unsigned half-float block-compressed (BC6H compression) format */ + CU_AD_FORMAT_BC6H_SF16 = 0x9c, /**< 3 channel signed half-float block-compressed (BC6H compression) format */ + CU_AD_FORMAT_BC7_UNORM = 0x9d, /**< 4 channel unsigned normalized block-compressed (BC7 compression) format */ + CU_AD_FORMAT_BC7_UNORM_SRGB = 0x9e /**< 4 channel unsigned normalized block-compressed (BC7 compression) format with sRGB encoding */ +} CUarray_format; + +/** + * Texture reference addressing modes + */ +typedef enum CUaddress_mode_enum { + CU_TR_ADDRESS_MODE_WRAP = 0, /**< Wrapping address mode */ + CU_TR_ADDRESS_MODE_CLAMP = 1, /**< Clamp to edge address mode */ + CU_TR_ADDRESS_MODE_MIRROR = 2, /**< Mirror address mode */ + CU_TR_ADDRESS_MODE_BORDER = 3 /**< Border address mode */ +} CUaddress_mode; + +/** + * Texture reference filtering modes + */ +typedef enum CUfilter_mode_enum { + CU_TR_FILTER_MODE_POINT = 0, /**< Point filter mode */ + CU_TR_FILTER_MODE_LINEAR = 1 /**< Linear filter mode */ +} CUfilter_mode; + +/** + * Device properties + */ +typedef enum CUdevice_attribute_enum { + CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK = 1, /**< Maximum number of threads per block */ + CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X = 2, /**< Maximum block dimension X */ + CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y = 3, /**< Maximum block dimension Y */ + CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z = 4, /**< Maximum block dimension Z */ + CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X = 5, /**< Maximum grid dimension X */ + CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y = 6, /**< Maximum grid dimension Y */ + CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z = 7, /**< Maximum grid dimension Z */ + CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK = 8, /**< Maximum shared memory available per block in bytes */ + CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK = 8, /**< Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK */ + CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY = 9, /**< Memory available on device for __constant__ variables in a CUDA C kernel in bytes */ + CU_DEVICE_ATTRIBUTE_WARP_SIZE = 10, /**< Warp size in threads */ + CU_DEVICE_ATTRIBUTE_MAX_PITCH = 11, /**< Maximum pitch in bytes allowed by memory copies */ + CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK = 12, /**< Maximum number of 32-bit registers available per block */ + CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK = 12, /**< Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK */ + CU_DEVICE_ATTRIBUTE_CLOCK_RATE = 13, /**< Typical clock frequency in kilohertz */ + CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT = 14, /**< Alignment requirement for textures */ + CU_DEVICE_ATTRIBUTE_GPU_OVERLAP = 15, /**< Device can possibly copy memory and execute a kernel concurrently. Deprecated. Use instead CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT. */ + CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT = 16, /**< Number of multiprocessors on device */ + CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT = 17, /**< Specifies whether there is a run time limit on kernels */ + CU_DEVICE_ATTRIBUTE_INTEGRATED = 18, /**< Device is integrated with host memory */ + CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY = 19, /**< Device can map host memory into CUDA address space */ + CU_DEVICE_ATTRIBUTE_COMPUTE_MODE = 20, /**< Compute mode (See ::CUcomputemode for details) */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH = 21, /**< Maximum 1D texture width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH = 22, /**< Maximum 2D texture width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT = 23, /**< Maximum 2D texture height */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH = 24, /**< Maximum 3D texture width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT = 25, /**< Maximum 3D texture height */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH = 26, /**< Maximum 3D texture depth */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH = 27, /**< Maximum 2D layered texture width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT = 28, /**< Maximum 2D layered texture height */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS = 29, /**< Maximum layers in a 2D layered texture */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH = 27, /**< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT = 28, /**< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES = 29, /**< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS */ + CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT = 30, /**< Alignment requirement for surfaces */ + CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS = 31, /**< Device can possibly execute multiple kernels concurrently */ + CU_DEVICE_ATTRIBUTE_ECC_ENABLED = 32, /**< Device has ECC support enabled */ + CU_DEVICE_ATTRIBUTE_PCI_BUS_ID = 33, /**< PCI bus ID of the device */ + CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID = 34, /**< PCI device ID of the device */ + CU_DEVICE_ATTRIBUTE_TCC_DRIVER = 35, /**< Device is using TCC driver model */ + CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE = 36, /**< Peak memory clock frequency in kilohertz */ + CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH = 37, /**< Global memory bus width in bits */ + CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE = 38, /**< Size of L2 cache in bytes */ + CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR = 39, /**< Maximum resident threads per multiprocessor */ + CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT = 40, /**< Number of asynchronous engines */ + CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING = 41, /**< Device shares a unified address space with the host */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH = 42, /**< Maximum 1D layered texture width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS = 43, /**< Maximum layers in a 1D layered texture */ + CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER = 44, /**< Deprecated, do not use. */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH = 45, /**< Maximum 2D texture width if CUDA_ARRAY3D_TEXTURE_GATHER is set */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT = 46, /**< Maximum 2D texture height if CUDA_ARRAY3D_TEXTURE_GATHER is set */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE = 47, /**< Alternate maximum 3D texture width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE = 48, /**< Alternate maximum 3D texture height */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE = 49, /**< Alternate maximum 3D texture depth */ + CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID = 50, /**< PCI domain ID of the device */ + CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT = 51, /**< Pitch alignment requirement for textures */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH = 52, /**< Maximum cubemap texture width/height */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH = 53, /**< Maximum cubemap layered texture width/height */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS = 54, /**< Maximum layers in a cubemap layered texture */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH = 55, /**< Maximum 1D surface width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH = 56, /**< Maximum 2D surface width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT = 57, /**< Maximum 2D surface height */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH = 58, /**< Maximum 3D surface width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT = 59, /**< Maximum 3D surface height */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH = 60, /**< Maximum 3D surface depth */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH = 61, /**< Maximum 1D layered surface width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS = 62, /**< Maximum layers in a 1D layered surface */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH = 63, /**< Maximum 2D layered surface width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT = 64, /**< Maximum 2D layered surface height */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS = 65, /**< Maximum layers in a 2D layered surface */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH = 66, /**< Maximum cubemap surface width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH = 67, /**< Maximum cubemap layered surface width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS = 68, /**< Maximum layers in a cubemap layered surface */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH = 69, /**< Deprecated, do not use. Use cudaDeviceGetTexture1DLinearMaxWidth() or cuDeviceGetTexture1DLinearMaxWidth() instead. */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH = 70, /**< Maximum 2D linear texture width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT = 71, /**< Maximum 2D linear texture height */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH = 72, /**< Maximum 2D linear texture pitch in bytes */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH = 73, /**< Maximum mipmapped 2D texture width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT = 74, /**< Maximum mipmapped 2D texture height */ + CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR = 75, /**< Major compute capability version number */ + CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR = 76, /**< Minor compute capability version number */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH = 77, /**< Maximum mipmapped 1D texture width */ + CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED = 78, /**< Device supports stream priorities */ + CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED = 79, /**< Device supports caching globals in L1 */ + CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED = 80, /**< Device supports caching locals in L1 */ + CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR = 81, /**< Maximum shared memory available per multiprocessor in bytes */ + CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR = 82, /**< Maximum number of 32-bit registers available per multiprocessor */ + CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY = 83, /**< Device can allocate managed memory on this system */ + CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD = 84, /**< Device is on a multi-GPU board */ + CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID = 85, /**< Unique id for a group of devices on the same multi-GPU board */ + CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED = 86, /**< Link between the device and the host supports native atomic operations (this is a placeholder attribute, and is not supported on any current hardware)*/ + CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO = 87, /**< Ratio of single precision performance (in floating-point operations per second) to double precision performance */ + CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS = 88, /**< Device supports coherently accessing pageable memory without calling cudaHostRegister on it */ + CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS = 89, /**< Device can coherently access managed memory concurrently with the CPU */ + CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED = 90, /**< Device supports compute preemption. */ + CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM = 91, /**< Device can access host registered memory at the same virtual address as the CPU */ + CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1 = 92, /**< Deprecated, along with v1 MemOps API, ::cuStreamBatchMemOp and related APIs are supported. */ + CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1 = 93, /**< Deprecated, along with v1 MemOps API, 64-bit operations are supported in ::cuStreamBatchMemOp and related APIs. */ + CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1 = 94, /**< Deprecated, along with v1 MemOps API, ::CU_STREAM_WAIT_VALUE_NOR is supported. */ + CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH = 95, /**< Device supports launching cooperative kernels via ::cuLaunchCooperativeKernel */ + CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH = 96, /**< Deprecated, ::cuLaunchCooperativeKernelMultiDevice is deprecated. */ + CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN = 97, /**< Maximum optin shared memory per block */ + CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES = 98, /**< The ::CU_STREAM_WAIT_VALUE_FLUSH flag and the ::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES MemOp are supported on the device. See \ref CUDA_MEMOP for additional details. */ + CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED = 99, /**< Device supports host memory registration via ::cudaHostRegister. */ + CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES = 100, /**< Device accesses pageable memory via the host's page tables. */ + CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST = 101, /**< The host can directly access managed memory on the device without migration. */ + CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED = 102, /**< Deprecated, Use CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED*/ + CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED = 102, /**< Device supports virtual memory management APIs like ::cuMemAddressReserve, ::cuMemCreate, ::cuMemMap and related APIs */ + CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED = 103, /**< Device supports exporting memory to a posix file descriptor with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate */ + CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED = 104, /**< Device supports exporting memory to a Win32 NT handle with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate */ + CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED = 105, /**< Device supports exporting memory to a Win32 KMT handle with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate */ + CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR = 106, /**< Maximum number of blocks per multiprocessor */ + CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED = 107, /**< Device supports compression of memory */ + CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE = 108, /**< Maximum L2 persisting lines capacity setting in bytes. */ + CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE = 109, /**< Maximum value of CUaccessPolicyWindow::num_bytes. */ + CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED = 110, /**< Device supports specifying the GPUDirect RDMA flag with ::cuMemCreate */ + CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK = 111, /**< Shared memory reserved by CUDA driver per block in bytes */ + CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED = 112, /**< Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays */ + CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED = 113, /**< Device supports using the ::cuMemHostRegister flag ::CU_MEMHOSTERGISTER_READ_ONLY to register memory that must be mapped as read-only to the GPU */ + CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED = 114, /**< External timeline semaphore interop is supported on the device */ + CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED = 115, /**< Device supports using the ::cuMemAllocAsync and ::cuMemPool family of APIs */ + CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED = 116, /**< Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see https://docs.nvidia.com/cuda/gpudirect-rdma for more information) */ + CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS = 117, /**< The returned attribute shall be interpreted as a bitmask, where the individual bits are described by the ::CUflushGPUDirectRDMAWritesOptions enum */ + CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING = 118, /**< GPUDirect RDMA writes to the device do not need to be flushed for consumers within the scope indicated by the returned attribute. See ::CUGPUDirectRDMAWritesOrdering for the numerical values returned here. */ + CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES = 119, /**< Handle types supported with mempool based IPC */ + CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH = 120, /**< Indicates device supports cluster launch */ + CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED = 121, /**< Device supports deferred mapping CUDA arrays and CUDA mipmapped arrays */ + CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS = 122, /**< 64-bit operations are supported in ::cuStreamBatchMemOp and related MemOp APIs. */ + CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR = 123, /**< ::CU_STREAM_WAIT_VALUE_NOR is supported by MemOp APIs. */ + CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED = 124, /**< Device supports buffer sharing with dma_buf mechanism. */ + CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED = 125, /**< Device supports IPC Events. */ + CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT = 126, /**< Number of memory domains the device supports. */ + CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED = 127, /**< Device supports accessing memory using Tensor Map. */ + CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS = 129, /**< Device supports unified function pointers. */ + CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED = 132, /**< Device supports switch multicast and reduction operations. */ + CU_DEVICE_ATTRIBUTE_MAX +} CUdevice_attribute; + +/** + * Legacy device properties + */ +typedef struct CUdevprop_st { + int maxThreadsPerBlock; /**< Maximum number of threads per block */ + int maxThreadsDim[3]; /**< Maximum size of each dimension of a block */ + int maxGridSize[3]; /**< Maximum size of each dimension of a grid */ + int sharedMemPerBlock; /**< Shared memory available per block in bytes */ + int totalConstantMemory; /**< Constant memory available on device in bytes */ + int SIMDWidth; /**< Warp size in threads */ + int memPitch; /**< Maximum pitch in bytes allowed by memory copies */ + int regsPerBlock; /**< 32-bit registers available per block */ + int clockRate; /**< Clock frequency in kilohertz */ + int textureAlign; /**< Alignment requirement for textures */ +} CUdevprop_v1; +typedef CUdevprop_v1 CUdevprop; + +/** + * Pointer information + */ +typedef enum CUpointer_attribute_enum { + CU_POINTER_ATTRIBUTE_CONTEXT = 1, /**< The ::CUcontext on which a pointer was allocated or registered */ + CU_POINTER_ATTRIBUTE_MEMORY_TYPE = 2, /**< The ::CUmemorytype describing the physical location of a pointer */ + CU_POINTER_ATTRIBUTE_DEVICE_POINTER = 3, /**< The address at which a pointer's memory may be accessed on the device */ + CU_POINTER_ATTRIBUTE_HOST_POINTER = 4, /**< The address at which a pointer's memory may be accessed on the host */ + CU_POINTER_ATTRIBUTE_P2P_TOKENS = 5, /**< A pair of tokens for use with the nv-p2p.h Linux kernel interface */ + CU_POINTER_ATTRIBUTE_SYNC_MEMOPS = 6, /**< Synchronize every synchronous memory operation initiated on this region */ + CU_POINTER_ATTRIBUTE_BUFFER_ID = 7, /**< A process-wide unique ID for an allocated memory region*/ + CU_POINTER_ATTRIBUTE_IS_MANAGED = 8, /**< Indicates if the pointer points to managed memory */ + CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL = 9, /**< A device ordinal of a device on which a pointer was allocated or registered */ + CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE = 10, /**< 1 if this pointer maps to an allocation that is suitable for ::cudaIpcGetMemHandle, 0 otherwise **/ + CU_POINTER_ATTRIBUTE_RANGE_START_ADDR = 11, /**< Starting address for this requested pointer */ + CU_POINTER_ATTRIBUTE_RANGE_SIZE = 12, /**< Size of the address range for this requested pointer */ + CU_POINTER_ATTRIBUTE_MAPPED = 13, /**< 1 if this pointer is in a valid address range that is mapped to a backing allocation, 0 otherwise **/ + CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES = 14, /**< Bitmask of allowed ::CUmemAllocationHandleType for this allocation **/ + CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE = 15, /**< 1 if the memory this pointer is referencing can be used with the GPUDirect RDMA API **/ + CU_POINTER_ATTRIBUTE_ACCESS_FLAGS = 16, /**< Returns the access flags the device associated with the current context has on the corresponding memory referenced by the pointer given */ + CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE = 17 /**< Returns the mempool handle for the allocation if it was allocated from a mempool. Otherwise returns NULL. **/ + , + CU_POINTER_ATTRIBUTE_MAPPING_SIZE = 18, /**< Size of the actual underlying mapping that the pointer belongs to **/ + CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR = 19, /**< The start address of the mapping that the pointer belongs to **/ + CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID = 20 /**< A process-wide unique id corresponding to the physical allocation the pointer belongs to **/ +} CUpointer_attribute; + +/** + * Function properties + */ +typedef enum CUfunction_attribute_enum { + /** + * The maximum number of threads per block, beyond which a launch of the + * function would fail. This number depends on both the function and the + * device on which the function is currently loaded. + */ + CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK = 0, + + /** + * The size in bytes of statically-allocated shared memory required by + * this function. This does not include dynamically-allocated shared + * memory requested by the user at runtime. + */ + CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES = 1, + + /** + * The size in bytes of user-allocated constant memory required by this + * function. + */ + CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES = 2, + + /** + * The size in bytes of local memory used by each thread of this function. + */ + CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES = 3, + + /** + * The number of registers used by each thread of this function. + */ + CU_FUNC_ATTRIBUTE_NUM_REGS = 4, + + /** + * The PTX virtual architecture version for which the function was + * compiled. This value is the major PTX version * 10 + the minor PTX + * version, so a PTX version 1.3 function would return the value 13. + * Note that this may return the undefined value of 0 for cubins + * compiled prior to CUDA 3.0. + */ + CU_FUNC_ATTRIBUTE_PTX_VERSION = 5, + + /** + * The binary architecture version for which the function was compiled. + * This value is the major binary version * 10 + the minor binary version, + * so a binary version 1.3 function would return the value 13. Note that + * this will return a value of 10 for legacy cubins that do not have a + * properly-encoded binary architecture version. + */ + CU_FUNC_ATTRIBUTE_BINARY_VERSION = 6, + + /** + * The attribute to indicate whether the function has been compiled with + * user specified option "-Xptxas --dlcm=ca" set . + */ + CU_FUNC_ATTRIBUTE_CACHE_MODE_CA = 7, + + /** + * The maximum size in bytes of dynamically-allocated shared memory that can be used by + * this function. If the user-specified dynamic shared memory size is larger than this + * value, the launch will fail. + * See ::cuFuncSetAttribute, ::cuKernelSetAttribute + */ + CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES = 8, + + /** + * On devices where the L1 cache and shared memory use the same hardware resources, + * this sets the shared memory carveout preference, in percent of the total shared memory. + * Refer to ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR. + * This is only a hint, and the driver can choose a different ratio if required to execute the function. + * See ::cuFuncSetAttribute, ::cuKernelSetAttribute + */ + CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT = 9, + + /** + * If this attribute is set, the kernel must launch with a valid cluster + * size specified. + * See ::cuFuncSetAttribute, ::cuKernelSetAttribute + */ + CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET = 10, + + /** + * The required cluster width in blocks. The values must either all be 0 or + * all be positive. The validity of the cluster dimensions is otherwise + * checked at launch time. + * + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. + * See ::cuFuncSetAttribute, ::cuKernelSetAttribute + */ + CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH = 11, + + /** + * The required cluster height in blocks. The values must either all be 0 or + * all be positive. The validity of the cluster dimensions is otherwise + * checked at launch time. + * + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime should return CUDA_ERROR_NOT_PERMITTED. + * See ::cuFuncSetAttribute, ::cuKernelSetAttribute + */ + CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT = 12, + + /** + * The required cluster depth in blocks. The values must either all be 0 or + * all be positive. The validity of the cluster dimensions is otherwise + * checked at launch time. + * + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime should return CUDA_ERROR_NOT_PERMITTED. + * See ::cuFuncSetAttribute, ::cuKernelSetAttribute + */ + CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH = 13, + + /** + * Whether the function can be launched with non-portable cluster size. 1 is + * allowed, 0 is disallowed. A non-portable cluster size may only function + * on the specific SKUs the program is tested on. The launch might fail if + * the program is run on a different hardware platform. + * + * CUDA API provides cudaOccupancyMaxActiveClusters to assist with checking + * whether the desired size can be launched on the current device. + * + * Portable Cluster Size + * + * A portable cluster size is guaranteed to be functional on all compute + * capabilities higher than the target compute capability. The portable + * cluster size for sm_90 is 8 blocks per cluster. This value may increase + * for future compute capabilities. + * + * The specific hardware unit may support higher cluster sizes that’s not + * guaranteed to be portable. + * See ::cuFuncSetAttribute, ::cuKernelSetAttribute + */ + CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED = 14, + + /** + * The block scheduling policy of a function. The value type is + * CUclusterSchedulingPolicy / cudaClusterSchedulingPolicy. + * See ::cuFuncSetAttribute, ::cuKernelSetAttribute + */ + CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE = 15, + + CU_FUNC_ATTRIBUTE_MAX +} CUfunction_attribute; + +/** + * Function cache configurations + */ +typedef enum CUfunc_cache_enum { + CU_FUNC_CACHE_PREFER_NONE = 0x00, /**< no preference for shared memory or L1 (default) */ + CU_FUNC_CACHE_PREFER_SHARED = 0x01, /**< prefer larger shared memory and smaller L1 cache */ + CU_FUNC_CACHE_PREFER_L1 = 0x02, /**< prefer larger L1 cache and smaller shared memory */ + CU_FUNC_CACHE_PREFER_EQUAL = 0x03 /**< prefer equal sized L1 cache and shared memory */ +} CUfunc_cache; + +/** + * Shared memory configurations + */ +typedef enum CUsharedconfig_enum { + CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE = 0x00, /**< set default shared memory bank size */ + CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE = 0x01, /**< set shared memory bank width to four bytes */ + CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE = 0x02 /**< set shared memory bank width to eight bytes */ +} CUsharedconfig; + +/** + * Shared memory carveout configurations. These may be passed to ::cuFuncSetAttribute or ::cuKernelSetAttribute + */ +typedef enum CUshared_carveout_enum { + CU_SHAREDMEM_CARVEOUT_DEFAULT = -1, /**< No preference for shared memory or L1 (default) */ + CU_SHAREDMEM_CARVEOUT_MAX_SHARED = 100, /**< Prefer maximum available shared memory, minimum L1 cache */ + CU_SHAREDMEM_CARVEOUT_MAX_L1 = 0 /**< Prefer maximum available L1 cache, minimum shared memory */ +} CUshared_carveout; + +/** + * Memory types + */ +typedef enum CUmemorytype_enum { + CU_MEMORYTYPE_HOST = 0x01, /**< Host memory */ + CU_MEMORYTYPE_DEVICE = 0x02, /**< Device memory */ + CU_MEMORYTYPE_ARRAY = 0x03, /**< Array memory */ + CU_MEMORYTYPE_UNIFIED = 0x04 /**< Unified device or host memory */ +} CUmemorytype; + +/** + * Compute Modes + */ +typedef enum CUcomputemode_enum { + CU_COMPUTEMODE_DEFAULT = 0, /**< Default compute mode (Multiple contexts allowed per device) */ + CU_COMPUTEMODE_PROHIBITED = 2, /**< Compute-prohibited mode (No contexts can be created on this device at this time) */ + CU_COMPUTEMODE_EXCLUSIVE_PROCESS = 3 /**< Compute-exclusive-process mode (Only one context used by a single process can be present on this device at a time) */ +} CUcomputemode; + +/** + * Memory advise values + */ +typedef enum CUmem_advise_enum { + CU_MEM_ADVISE_SET_READ_MOSTLY = 1, /**< Data will mostly be read and only occasionally be written to */ + CU_MEM_ADVISE_UNSET_READ_MOSTLY = 2, /**< Undo the effect of ::CU_MEM_ADVISE_SET_READ_MOSTLY */ + CU_MEM_ADVISE_SET_PREFERRED_LOCATION = 3, /**< Set the preferred location for the data as the specified device */ + CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION = 4, /**< Clear the preferred location for the data */ + CU_MEM_ADVISE_SET_ACCESSED_BY = 5, /**< Data will be accessed by the specified device, so prevent page faults as much as possible */ + CU_MEM_ADVISE_UNSET_ACCESSED_BY = 6 /**< Let the Unified Memory subsystem decide on the page faulting policy for the specified device */ +} CUmem_advise; + +typedef enum CUmem_range_attribute_enum { + CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY = 1, /**< Whether the range will mostly be read and only occasionally be written to */ + CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION = 2, /**< The preferred location of the range */ + CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY = 3, /**< Memory range has ::CU_MEM_ADVISE_SET_ACCESSED_BY set for specified device */ + CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION = 4 /**< The last location to which the range was prefetched */ +} CUmem_range_attribute; + +/** + * Online compiler and linker options + */ +typedef enum CUjit_option_enum +{ + /** + * Max number of registers that a thread may use.\n + * Option type: unsigned int\n + * Applies to: compiler only + */ + CU_JIT_MAX_REGISTERS = 0, + + /** + * IN: Specifies minimum number of threads per block to target compilation + * for\n + * OUT: Returns the number of threads the compiler actually targeted. + * This restricts the resource utilization of the compiler (e.g. max + * registers) such that a block with the given number of threads should be + * able to launch based on register limitations. Note, this option does not + * currently take into account any other resource limitations, such as + * shared memory utilization.\n + * Cannot be combined with ::CU_JIT_TARGET.\n + * Option type: unsigned int\n + * Applies to: compiler only + */ + CU_JIT_THREADS_PER_BLOCK = 1, + + /** + * Overwrites the option value with the total wall clock time, in + * milliseconds, spent in the compiler and linker\n + * Option type: float\n + * Applies to: compiler and linker + */ + CU_JIT_WALL_TIME = 2, + + /** + * Pointer to a buffer in which to print any log messages + * that are informational in nature (the buffer size is specified via + * option ::CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES)\n + * Option type: char *\n + * Applies to: compiler and linker + */ + CU_JIT_INFO_LOG_BUFFER = 3, + + /** + * IN: Log buffer size in bytes. Log messages will be capped at this size + * (including null terminator)\n + * OUT: Amount of log buffer filled with messages\n + * Option type: unsigned int\n + * Applies to: compiler and linker + */ + CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES = 4, + + /** + * Pointer to a buffer in which to print any log messages that + * reflect errors (the buffer size is specified via option + * ::CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES)\n + * Option type: char *\n + * Applies to: compiler and linker + */ + CU_JIT_ERROR_LOG_BUFFER = 5, + + /** + * IN: Log buffer size in bytes. Log messages will be capped at this size + * (including null terminator)\n + * OUT: Amount of log buffer filled with messages\n + * Option type: unsigned int\n + * Applies to: compiler and linker + */ + CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES = 6, + + /** + * Level of optimizations to apply to generated code (0 - 4), with 4 + * being the default and highest level of optimizations.\n + * Option type: unsigned int\n + * Applies to: compiler only + */ + CU_JIT_OPTIMIZATION_LEVEL = 7, + + /** + * No option value required. Determines the target based on the current + * attached context (default)\n + * Option type: No option value needed\n + * Applies to: compiler and linker + */ + CU_JIT_TARGET_FROM_CUCONTEXT = 8, + + /** + * Target is chosen based on supplied ::CUjit_target. Cannot be + * combined with ::CU_JIT_THREADS_PER_BLOCK.\n + * Option type: unsigned int for enumerated type ::CUjit_target\n + * Applies to: compiler and linker + */ + CU_JIT_TARGET = 9, + + /** + * Specifies choice of fallback strategy if matching cubin is not found. + * Choice is based on supplied ::CUjit_fallback. This option cannot be + * used with cuLink* APIs as the linker requires exact matches.\n + * Option type: unsigned int for enumerated type ::CUjit_fallback\n + * Applies to: compiler only + */ + CU_JIT_FALLBACK_STRATEGY = 10, + + /** + * Specifies whether to create debug information in output (-g) + * (0: false, default)\n + * Option type: int\n + * Applies to: compiler and linker + */ + CU_JIT_GENERATE_DEBUG_INFO = 11, + + /** + * Generate verbose log messages (0: false, default)\n + * Option type: int\n + * Applies to: compiler and linker + */ + CU_JIT_LOG_VERBOSE = 12, + + /** + * Generate line number information (-lineinfo) (0: false, default)\n + * Option type: int\n + * Applies to: compiler only + */ + CU_JIT_GENERATE_LINE_INFO = 13, + + /** + * Specifies whether to enable caching explicitly (-dlcm) \n + * Choice is based on supplied ::CUjit_cacheMode_enum.\n + * Option type: unsigned int for enumerated type ::CUjit_cacheMode_enum\n + * Applies to: compiler only + */ + CU_JIT_CACHE_MODE = 14, + + /** + * \deprecated + * This jit option is deprecated and should not be used. + */ + CU_JIT_NEW_SM3X_OPT = 15, + + /** + * This jit option is used for internal purpose only. + */ + CU_JIT_FAST_COMPILE = 16, + + /** + * Array of device symbol names that will be relocated to the corresponding + * host addresses stored in ::CU_JIT_GLOBAL_SYMBOL_ADDRESSES.\n + * Must contain ::CU_JIT_GLOBAL_SYMBOL_COUNT entries.\n + * When loading a device module, driver will relocate all encountered + * unresolved symbols to the host addresses.\n + * It is only allowed to register symbols that correspond to unresolved + * global variables.\n + * It is illegal to register the same device symbol at multiple addresses.\n + * Option type: const char **\n + * Applies to: dynamic linker only + */ + CU_JIT_GLOBAL_SYMBOL_NAMES = 17, + + /** + * Array of host addresses that will be used to relocate corresponding + * device symbols stored in ::CU_JIT_GLOBAL_SYMBOL_NAMES.\n + * Must contain ::CU_JIT_GLOBAL_SYMBOL_COUNT entries.\n + * Option type: void **\n + * Applies to: dynamic linker only + */ + CU_JIT_GLOBAL_SYMBOL_ADDRESSES = 18, + + /** + * Number of entries in ::CU_JIT_GLOBAL_SYMBOL_NAMES and + * ::CU_JIT_GLOBAL_SYMBOL_ADDRESSES arrays.\n + * Option type: unsigned int\n + * Applies to: dynamic linker only + */ + CU_JIT_GLOBAL_SYMBOL_COUNT = 19, + + /** + * \deprecated + * Enable link-time optimization (-dlto) for device code (Disabled by default).\n + * This option is not supported on 32-bit platforms.\n + * Option type: int\n + * Applies to: compiler and linker + * + * Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + */ + CU_JIT_LTO = 20, + + /** + * \deprecated + * Control single-precision denormals (-ftz) support (0: false, default). + * 1 : flushes denormal values to zero + * 0 : preserves denormal values + * Option type: int\n + * Applies to: link-time optimization specified with CU_JIT_LTO + * + * Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + */ + CU_JIT_FTZ = 21, + + /** + * \deprecated + * Control single-precision floating-point division and reciprocals + * (-prec-div) support (1: true, default). + * 1 : Enables the IEEE round-to-nearest mode + * 0 : Enables the fast approximation mode + * Option type: int\n + * Applies to: link-time optimization specified with CU_JIT_LTO + * + * Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + */ + CU_JIT_PREC_DIV = 22, + + /** + * \deprecated + * Control single-precision floating-point square root + * (-prec-sqrt) support (1: true, default). + * 1 : Enables the IEEE round-to-nearest mode + * 0 : Enables the fast approximation mode + * Option type: int\n + * Applies to: link-time optimization specified with CU_JIT_LTO + * + * Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + */ + CU_JIT_PREC_SQRT = 23, + + /** + * \deprecated + * Enable/Disable the contraction of floating-point multiplies + * and adds/subtracts into floating-point multiply-add (-fma) + * operations (1: Enable, default; 0: Disable). + * Option type: int\n + * Applies to: link-time optimization specified with CU_JIT_LTO + * + * Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + */ + CU_JIT_FMA = 24, + + /** + * \deprecated + * Array of kernel names that should be preserved at link time while others + * can be removed.\n + * Must contain ::CU_JIT_REFERENCED_KERNEL_COUNT entries.\n + * Note that kernel names can be mangled by the compiler in which case the + * mangled name needs to be specified.\n + * Wildcard "*" can be used to represent zero or more characters instead of + * specifying the full or mangled name.\n + * It is important to note that the wildcard "*" is also added implicitly. + * For example, specifying "foo" will match "foobaz", "barfoo", "barfoobaz" and + * thus preserve all kernels with those names. This can be avoided by providing + * a more specific name like "barfoobaz".\n + * Option type: const char **\n + * Applies to: dynamic linker only + * + * Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + */ + CU_JIT_REFERENCED_KERNEL_NAMES = 25, + + /** + * \deprecated + * Number of entries in ::CU_JIT_REFERENCED_KERNEL_NAMES array.\n + * Option type: unsigned int\n + * Applies to: dynamic linker only + * + * Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + */ + CU_JIT_REFERENCED_KERNEL_COUNT = 26, + + /** + * \deprecated + * Array of variable names (__device__ and/or __constant__) that should be + * preserved at link time while others can be removed.\n + * Must contain ::CU_JIT_REFERENCED_VARIABLE_COUNT entries.\n + * Note that variable names can be mangled by the compiler in which case the + * mangled name needs to be specified.\n + * Wildcard "*" can be used to represent zero or more characters instead of + * specifying the full or mangled name.\n + * It is important to note that the wildcard "*" is also added implicitly. + * For example, specifying "foo" will match "foobaz", "barfoo", "barfoobaz" and + * thus preserve all variables with those names. This can be avoided by providing + * a more specific name like "barfoobaz".\n + * Option type: const char **\n + * Applies to: link-time optimization specified with CU_JIT_LTO + * + * Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + */ + CU_JIT_REFERENCED_VARIABLE_NAMES = 27, + + /** + * \deprecated + * Number of entries in ::CU_JIT_REFERENCED_VARIABLE_NAMES array.\n + * Option type: unsigned int\n + * Applies to: link-time optimization specified with CU_JIT_LTO + * + * Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + */ + CU_JIT_REFERENCED_VARIABLE_COUNT = 28, + + /** + * \deprecated + * This option serves as a hint to enable the JIT compiler/linker + * to remove constant (__constant__) and device (__device__) variables + * unreferenced in device code (Disabled by default).\n + * Note that host references to constant and device variables using APIs like + * ::cuModuleGetGlobal() with this option specified may result in undefined behavior unless + * the variables are explicitly specified using ::CU_JIT_REFERENCED_VARIABLE_NAMES.\n + * Option type: int\n + * Applies to: link-time optimization specified with CU_JIT_LTO + * + * Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + */ + CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES = 29, + + /** + * Generate position independent code (0: false)\n + * Option type: int\n + * Applies to: compiler only + */ + CU_JIT_POSITION_INDEPENDENT_CODE = 30, + + CU_JIT_NUM_OPTIONS + +} CUjit_option; + +/* + * Indicates that compute device class supports accelerated features. + */ +#define CU_COMPUTE_ACCELERATED_TARGET_BASE 0x10000 + +/** + * Online compilation targets + */ +typedef enum CUjit_target_enum +{ + CU_TARGET_COMPUTE_30 = 30, /**< Compute device class 3.0 */ + CU_TARGET_COMPUTE_32 = 32, /**< Compute device class 3.2 */ + CU_TARGET_COMPUTE_35 = 35, /**< Compute device class 3.5 */ + CU_TARGET_COMPUTE_37 = 37, /**< Compute device class 3.7 */ + CU_TARGET_COMPUTE_50 = 50, /**< Compute device class 5.0 */ + CU_TARGET_COMPUTE_52 = 52, /**< Compute device class 5.2 */ + CU_TARGET_COMPUTE_53 = 53, /**< Compute device class 5.3 */ + CU_TARGET_COMPUTE_60 = 60, /**< Compute device class 6.0.*/ + CU_TARGET_COMPUTE_61 = 61, /**< Compute device class 6.1.*/ + CU_TARGET_COMPUTE_62 = 62, /**< Compute device class 6.2.*/ + CU_TARGET_COMPUTE_70 = 70, /**< Compute device class 7.0.*/ + CU_TARGET_COMPUTE_72 = 72, /**< Compute device class 7.2.*/ + CU_TARGET_COMPUTE_75 = 75, /**< Compute device class 7.5.*/ + CU_TARGET_COMPUTE_80 = 80, /**< Compute device class 8.0.*/ + CU_TARGET_COMPUTE_86 = 86, /**< Compute device class 8.6.*/ + CU_TARGET_COMPUTE_87 = 87, /**< Compute device class 8.7.*/ + CU_TARGET_COMPUTE_89 = 89, /**< Compute device class 8.9.*/ + CU_TARGET_COMPUTE_90 = 90, /**< Compute device class 9.0.*/ + + /**< Compute device class 9.0. with accelerated features.*/ + CU_TARGET_COMPUTE_90A = CU_COMPUTE_ACCELERATED_TARGET_BASE + CU_TARGET_COMPUTE_90 +} CUjit_target; + +/** + * Cubin matching fallback strategies + */ +typedef enum CUjit_fallback_enum +{ + CU_PREFER_PTX = 0, /**< Prefer to compile ptx if exact binary match not found */ + + CU_PREFER_BINARY /**< Prefer to fall back to compatible binary code if exact match not found */ + +} CUjit_fallback; + +/** + * Caching modes for dlcm + */ +typedef enum CUjit_cacheMode_enum +{ + CU_JIT_CACHE_OPTION_NONE = 0, /**< Compile with no -dlcm flag specified */ + CU_JIT_CACHE_OPTION_CG, /**< Compile with L1 cache disabled */ + CU_JIT_CACHE_OPTION_CA /**< Compile with L1 cache enabled */ +} CUjit_cacheMode; + +/** + * Device code formats + */ +typedef enum CUjitInputType_enum +{ + /** + * Compiled device-class-specific device code\n + * Applicable options: none + */ + CU_JIT_INPUT_CUBIN = 0, + + /** + * PTX source code\n + * Applicable options: PTX compiler options + */ + CU_JIT_INPUT_PTX = 1, + + /** + * Bundle of multiple cubins and/or PTX of some device code\n + * Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY + */ + CU_JIT_INPUT_FATBINARY = 2, + + /** + * Host object with embedded device code\n + * Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY + */ + CU_JIT_INPUT_OBJECT = 3, + + /** + * Archive of host objects with embedded device code\n + * Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY + */ + CU_JIT_INPUT_LIBRARY = 4, + + /** + * \deprecated + * High-level intermediate code for link-time optimization\n + * Applicable options: NVVM compiler options, PTX compiler options + * + * Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + */ + CU_JIT_INPUT_NVVM = 5, + + CU_JIT_NUM_INPUT_TYPES = 6 +} CUjitInputType; + +typedef struct CUlinkState_st *CUlinkState; + +/** + * Flags to register a graphics resource + */ +typedef enum CUgraphicsRegisterFlags_enum { + CU_GRAPHICS_REGISTER_FLAGS_NONE = 0x00, + CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY = 0x01, + CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD = 0x02, + CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST = 0x04, + CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER = 0x08 +} CUgraphicsRegisterFlags; + +/** + * Flags for mapping and unmapping interop resources + */ +typedef enum CUgraphicsMapResourceFlags_enum { + CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE = 0x00, + CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY = 0x01, + CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD = 0x02 +} CUgraphicsMapResourceFlags; + +/** + * Array indices for cube faces + */ +typedef enum CUarray_cubemap_face_enum { + CU_CUBEMAP_FACE_POSITIVE_X = 0x00, /**< Positive X face of cubemap */ + CU_CUBEMAP_FACE_NEGATIVE_X = 0x01, /**< Negative X face of cubemap */ + CU_CUBEMAP_FACE_POSITIVE_Y = 0x02, /**< Positive Y face of cubemap */ + CU_CUBEMAP_FACE_NEGATIVE_Y = 0x03, /**< Negative Y face of cubemap */ + CU_CUBEMAP_FACE_POSITIVE_Z = 0x04, /**< Positive Z face of cubemap */ + CU_CUBEMAP_FACE_NEGATIVE_Z = 0x05 /**< Negative Z face of cubemap */ +} CUarray_cubemap_face; + +/** + * Limits + */ +typedef enum CUlimit_enum { + CU_LIMIT_STACK_SIZE = 0x00, /**< GPU thread stack size */ + CU_LIMIT_PRINTF_FIFO_SIZE = 0x01, /**< GPU printf FIFO size */ + CU_LIMIT_MALLOC_HEAP_SIZE = 0x02, /**< GPU malloc heap size */ + CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH = 0x03, /**< GPU device runtime launch synchronize depth */ + CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT = 0x04, /**< GPU device runtime pending launch count */ + CU_LIMIT_MAX_L2_FETCH_GRANULARITY = 0x05, /**< A value between 0 and 128 that indicates the maximum fetch granularity of L2 (in Bytes). This is a hint */ + CU_LIMIT_PERSISTING_L2_CACHE_SIZE = 0x06, /**< A size in bytes for L2 persisting lines cache size */ + CU_LIMIT_MAX +} CUlimit; + +/** + * Resource types + */ +typedef enum CUresourcetype_enum { + CU_RESOURCE_TYPE_ARRAY = 0x00, /**< Array resource */ + CU_RESOURCE_TYPE_MIPMAPPED_ARRAY = 0x01, /**< Mipmapped array resource */ + CU_RESOURCE_TYPE_LINEAR = 0x02, /**< Linear resource */ + CU_RESOURCE_TYPE_PITCH2D = 0x03 /**< Pitch 2D resource */ +} CUresourcetype; + +#ifdef _WIN32 +#define CUDA_CB __stdcall +#else +#define CUDA_CB +#endif + +/** + * CUDA host function + * \param userData Argument value passed to the function + */ +typedef void (CUDA_CB *CUhostFn)(void *userData); + +/** + * Specifies performance hint with ::CUaccessPolicyWindow for hitProp and missProp members. + */ +typedef enum CUaccessProperty_enum { + CU_ACCESS_PROPERTY_NORMAL = 0, /**< Normal cache persistence. */ + CU_ACCESS_PROPERTY_STREAMING = 1, /**< Streaming access is less likely to persit from cache. */ + CU_ACCESS_PROPERTY_PERSISTING = 2 /**< Persisting access is more likely to persist in cache.*/ +} CUaccessProperty; + +/** + * Specifies an access policy for a window, a contiguous extent of memory + * beginning at base_ptr and ending at base_ptr + num_bytes. + * num_bytes is limited by CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE. + * Partition into many segments and assign segments such that: + * sum of "hit segments" / window == approx. ratio. + * sum of "miss segments" / window == approx 1-ratio. + * Segments and ratio specifications are fitted to the capabilities of + * the architecture. + * Accesses in a hit segment apply the hitProp access policy. + * Accesses in a miss segment apply the missProp access policy. + */ +typedef struct CUaccessPolicyWindow_st { + void *base_ptr; /**< Starting address of the access policy window. CUDA driver may align it. */ + size_t num_bytes; /**< Size in bytes of the window policy. CUDA driver may restrict the maximum size and alignment. */ + float hitRatio; /**< hitRatio specifies percentage of lines assigned hitProp, rest are assigned missProp. */ + CUaccessProperty hitProp; /**< ::CUaccessProperty set for hit. */ + CUaccessProperty missProp; /**< ::CUaccessProperty set for miss. Must be either NORMAL or STREAMING */ +} CUaccessPolicyWindow_v1; +/** + * Access policy window + */ +typedef CUaccessPolicyWindow_v1 CUaccessPolicyWindow; + +/** + * GPU kernel node parameters + */ +typedef struct CUDA_KERNEL_NODE_PARAMS_st { + CUfunction func; /**< Kernel to launch */ + unsigned int gridDimX; /**< Width of grid in blocks */ + unsigned int gridDimY; /**< Height of grid in blocks */ + unsigned int gridDimZ; /**< Depth of grid in blocks */ + unsigned int blockDimX; /**< X dimension of each thread block */ + unsigned int blockDimY; /**< Y dimension of each thread block */ + unsigned int blockDimZ; /**< Z dimension of each thread block */ + unsigned int sharedMemBytes; /**< Dynamic shared-memory size per thread block in bytes */ + void **kernelParams; /**< Array of pointers to kernel parameters */ + void **extra; /**< Extra options */ +} CUDA_KERNEL_NODE_PARAMS_v1; + +/** + * GPU kernel node parameters + */typedef struct CUDA_KERNEL_NODE_PARAMS_v2_st { + CUfunction func; /**< Kernel to launch */ + unsigned int gridDimX; /**< Width of grid in blocks */ + unsigned int gridDimY; /**< Height of grid in blocks */ + unsigned int gridDimZ; /**< Depth of grid in blocks */ + unsigned int blockDimX; /**< X dimension of each thread block */ + unsigned int blockDimY; /**< Y dimension of each thread block */ + unsigned int blockDimZ; /**< Z dimension of each thread block */ + unsigned int sharedMemBytes; /**< Dynamic shared-memory size per thread block in bytes */ + void **kernelParams; /**< Array of pointers to kernel parameters */ + void **extra; /**< Extra options */ + CUkernel kern; /**< Kernel to launch, will only be referenced if func is NULL */ + CUcontext ctx; /**< Context for the kernel task to run in. The value NULL will indicate the current context should be used by the api. This field is ignored if func is set. */ +} CUDA_KERNEL_NODE_PARAMS_v2; +typedef CUDA_KERNEL_NODE_PARAMS_v2 CUDA_KERNEL_NODE_PARAMS; + +/** + * Memset node parameters + */ +typedef struct CUDA_MEMSET_NODE_PARAMS_st { + CUdeviceptr dst; /**< Destination device pointer */ + size_t pitch; /**< Pitch of destination device pointer. Unused if height is 1 */ + unsigned int value; /**< Value to be set */ + unsigned int elementSize; /**< Size of each element in bytes. Must be 1, 2, or 4. */ + size_t width; /**< Width of the row in elements */ + size_t height; /**< Number of rows */ +} CUDA_MEMSET_NODE_PARAMS_v1; +typedef CUDA_MEMSET_NODE_PARAMS_v1 CUDA_MEMSET_NODE_PARAMS; + +/** + * Host node parameters + */ +typedef struct CUDA_HOST_NODE_PARAMS_st { + CUhostFn fn; /**< The function to call when the node executes */ + void* userData; /**< Argument to pass to the function */ +} CUDA_HOST_NODE_PARAMS_v1; +typedef CUDA_HOST_NODE_PARAMS_v1 CUDA_HOST_NODE_PARAMS; + +/** + * Graph node types + */ +typedef enum CUgraphNodeType_enum { + CU_GRAPH_NODE_TYPE_KERNEL = 0, /**< GPU kernel node */ + CU_GRAPH_NODE_TYPE_MEMCPY = 1, /**< Memcpy node */ + CU_GRAPH_NODE_TYPE_MEMSET = 2, /**< Memset node */ + CU_GRAPH_NODE_TYPE_HOST = 3, /**< Host (executable) node */ + CU_GRAPH_NODE_TYPE_GRAPH = 4, /**< Node which executes an embedded graph */ + CU_GRAPH_NODE_TYPE_EMPTY = 5, /**< Empty (no-op) node */ + CU_GRAPH_NODE_TYPE_WAIT_EVENT = 6, /**< External event wait node */ + CU_GRAPH_NODE_TYPE_EVENT_RECORD = 7, /**< External event record node */ + CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL = 8, /**< External semaphore signal node */ + CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT = 9, /**< External semaphore wait node */ + CU_GRAPH_NODE_TYPE_MEM_ALLOC = 10,/**< Memory Allocation Node */ + CU_GRAPH_NODE_TYPE_MEM_FREE = 11,/**< Memory Free Node */ + CU_GRAPH_NODE_TYPE_BATCH_MEM_OP = 12 /**< Batch MemOp Node */ +} CUgraphNodeType; + +/** + * Graph instantiation results +*/ +typedef enum CUgraphInstantiateResult_enum +{ + CUDA_GRAPH_INSTANTIATE_SUCCESS = 0, /**< Instantiation succeeded */ + CUDA_GRAPH_INSTANTIATE_ERROR = 1, /**< Instantiation failed for an unexpected reason which is described in the return value of the function */ + CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE = 2, /**< Instantiation failed due to invalid structure, such as cycles */ + CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED = 3, /**< Instantiation for device launch failed because the graph contained an unsupported operation */ + CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED = 4 /**< Instantiation for device launch failed due to the nodes belonging to different contexts */ +} CUgraphInstantiateResult; + +/** + * Graph instantiation parameters + */ +typedef struct CUDA_GRAPH_INSTANTIATE_PARAMS_st +{ + cuuint64_t flags; /**< Instantiation flags */ + CUstream hUploadStream; /**< Upload stream */ + CUgraphNode hErrNode_out; /**< The node which caused instantiation to fail, if any */ + CUgraphInstantiateResult result_out; /**< Whether instantiation was successful. If it failed, the reason why */ +} CUDA_GRAPH_INSTANTIATE_PARAMS; + +typedef enum CUsynchronizationPolicy_enum { + CU_SYNC_POLICY_AUTO = 1, + CU_SYNC_POLICY_SPIN = 2, + CU_SYNC_POLICY_YIELD = 3, + CU_SYNC_POLICY_BLOCKING_SYNC = 4 +} CUsynchronizationPolicy; + +/** + * Cluster scheduling policies. These may be passed to ::cuFuncSetAttribute or ::cuKernelSetAttribute + */ +typedef enum CUclusterSchedulingPolicy_enum { + CU_CLUSTER_SCHEDULING_POLICY_DEFAULT = 0, /**< the default policy */ + CU_CLUSTER_SCHEDULING_POLICY_SPREAD = 1, /**< spread the blocks within a cluster to the SMs */ + CU_CLUSTER_SCHEDULING_POLICY_LOAD_BALANCING = 2 /**< allow the hardware to load-balance the blocks in a cluster to the SMs */ +} CUclusterSchedulingPolicy; + +typedef enum CUlaunchMemSyncDomain_enum { + CU_LAUNCH_MEM_SYNC_DOMAIN_DEFAULT = 0, + CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE = 1 +} CUlaunchMemSyncDomain; + +typedef struct CUlaunchMemSyncDomainMap_st { + unsigned char default_; + unsigned char remote; +} CUlaunchMemSyncDomainMap; + +typedef enum CUlaunchAttributeID_enum { + CU_LAUNCH_ATTRIBUTE_IGNORE = 0 /**< Ignored entry, for convenient composition */ + , CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW = 1 /**< Valid for streams, graph nodes, launches. */ + , CU_LAUNCH_ATTRIBUTE_COOPERATIVE = 2 /**< Valid for graph nodes, launches. */ + , CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY = 3 /**< Valid for streams. */ + , CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION = 4 /**< Valid for graph nodes, launches. */ + , CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE = 5 /**< Valid for graph nodes, launches. */ + , CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION = 6 /**< Valid for launches. Setting + programmaticStreamSerializationAllowed to non-0 + signals that the kernel will use programmatic + means to resolve its stream dependency, so that + the CUDA runtime should opportunistically allow + the grid's execution to overlap with the previous + kernel in the stream, if that kernel requests the + overlap. The dependent launches can choose to wait + on the dependency using the programmatic sync + (cudaGridDependencySynchronize() or equivalent PTX + instructions). */ + , CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT = 7 /**< Valid for launches. Event recorded through this + launch attribute is guaranteed to only trigger + after all block in the associated kernel trigger + the event. A block can trigger the event through + PTX launchdep.release or CUDA builtin function + cudaTriggerProgrammaticLaunchCompletion(). A + trigger can also be inserted at the beginning of + each block's execution if triggerAtBlockStart is + set to non-0. The dependent launches can choose to + wait on the dependency using the programmatic sync + (cudaGridDependencySynchronize() or equivalent PTX + instructions). Note that dependents (including the + CPU thread calling cuEventSynchronize()) are not + guaranteed to observe the release precisely when + it is released. For example, cuEventSynchronize() + may only observe the event trigger long after the + associated kernel has completed. This recording + type is primarily meant for establishing + programmatic dependency between device tasks. The + event supplied must not be an interprocess or + interop event. The event must disable timing (i.e. + created with ::CU_EVENT_DISABLE_TIMING flag set). + */ + , CU_LAUNCH_ATTRIBUTE_PRIORITY = 8 /**< Valid for streams, graph nodes, launches. */ + , CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP = 9 + , CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN = 10 +#ifdef __CUDA_API_VERSION_INTERNAL + , CU_LAUNCH_ATTRIBUTE_MAX +#endif +} CUlaunchAttributeID; + +typedef union CUlaunchAttributeValue_union { + char pad[64]; /**< Pad to 64 bytes */ + CUaccessPolicyWindow accessPolicyWindow; /**< Attribute ::CUaccessPolicyWindow. */ + int cooperative; /**< Nonzero indicates a cooperative kernel (see ::cuLaunchCooperativeKernel). */ + CUsynchronizationPolicy syncPolicy; /**< ::CUsynchronizationPolicy for work queued up in this stream */ + struct { + unsigned int x; + unsigned int y; + unsigned int z; + } clusterDim; /**< Cluster dimensions for the kernel node. */ + CUclusterSchedulingPolicy clusterSchedulingPolicyPreference; /**< Cluster scheduling policy preference for the kernel node. */ + int programmaticStreamSerializationAllowed; + struct { + CUevent event; + int flags; /* Does not accept ::CU_EVENT_RECORD_EXTERNAL */ + int triggerAtBlockStart; + } programmaticEvent; + int priority; /**< Execution priority of the kernel. */ + CUlaunchMemSyncDomainMap memSyncDomainMap; + CUlaunchMemSyncDomain memSyncDomain; +} CUlaunchAttributeValue; + +typedef struct CUlaunchAttribute_st { + CUlaunchAttributeID id; + char pad[8 - sizeof(CUlaunchAttributeID)]; + CUlaunchAttributeValue value; +} CUlaunchAttribute; + +typedef struct CUlaunchConfig_st { + unsigned int gridDimX; /**< Width of grid in blocks */ + unsigned int gridDimY; /**< Height of grid in blocks */ + unsigned int gridDimZ; /**< Depth of grid in blocks */ + unsigned int blockDimX; /**< X dimension of each thread block */ + unsigned int blockDimY; /**< Y dimension of each thread block */ + unsigned int blockDimZ; /**< Z dimension of each thread block */ + unsigned int sharedMemBytes; /**< Dynamic shared-memory size per thread block in bytes */ + CUstream hStream; /**< Stream identifier */ + CUlaunchAttribute *attrs; /**< nullable if numAttrs == 0 */ + unsigned int numAttrs; /**< number of attributes populated in attrs */ +} CUlaunchConfig; + +typedef CUlaunchAttributeID CUkernelNodeAttrID; +#define CU_KERNEL_NODE_ATTRIBUTE_ACCESS_POLICY_WINDOW CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW +#define CU_KERNEL_NODE_ATTRIBUTE_COOPERATIVE CU_LAUNCH_ATTRIBUTE_COOPERATIVE +#define CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_DIMENSION CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION +#define CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE +#define CU_KERNEL_NODE_ATTRIBUTE_PRIORITY CU_LAUNCH_ATTRIBUTE_PRIORITY +#define CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP +#define CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN + +typedef CUlaunchAttributeValue CUkernelNodeAttrValue_v1; +typedef CUkernelNodeAttrValue_v1 CUkernelNodeAttrValue; + +/** + * Possible stream capture statuses returned by ::cuStreamIsCapturing + */ +typedef enum CUstreamCaptureStatus_enum { + CU_STREAM_CAPTURE_STATUS_NONE = 0, /**< Stream is not capturing */ + CU_STREAM_CAPTURE_STATUS_ACTIVE = 1, /**< Stream is actively capturing */ + CU_STREAM_CAPTURE_STATUS_INVALIDATED = 2 /**< Stream is part of a capture sequence that + has been invalidated, but not terminated */ +} CUstreamCaptureStatus; + +/** + * Possible modes for stream capture thread interactions. For more details see + * ::cuStreamBeginCapture and ::cuThreadExchangeStreamCaptureMode + */ +typedef enum CUstreamCaptureMode_enum { + CU_STREAM_CAPTURE_MODE_GLOBAL = 0, + CU_STREAM_CAPTURE_MODE_THREAD_LOCAL = 1, + CU_STREAM_CAPTURE_MODE_RELAXED = 2 +} CUstreamCaptureMode; + +typedef CUlaunchAttributeID CUstreamAttrID; +#define CU_STREAM_ATTRIBUTE_ACCESS_POLICY_WINDOW CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW +#define CU_STREAM_ATTRIBUTE_SYNCHRONIZATION_POLICY CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY +#define CU_STREAM_ATTRIBUTE_PRIORITY CU_LAUNCH_ATTRIBUTE_PRIORITY +#define CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP +#define CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN + +typedef CUlaunchAttributeValue CUstreamAttrValue_v1; +typedef CUstreamAttrValue_v1 CUstreamAttrValue; + +/** + * Flags to specify search options. For more details see ::cuGetProcAddress + */ +typedef enum CUdriverProcAddress_flags_enum { + CU_GET_PROC_ADDRESS_DEFAULT = 0, /**< Default search mode for driver symbols. */ + CU_GET_PROC_ADDRESS_LEGACY_STREAM = 1 << 0, /**< Search for legacy versions of driver symbols. */ + CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM = 1 << 1 /**< Search for per-thread versions of driver symbols. */ +} CUdriverProcAddress_flags; + +/** + * Flags to indicate search status. For more details see ::cuGetProcAddress + */ +typedef enum CUdriverProcAddressQueryResult_enum { + CU_GET_PROC_ADDRESS_SUCCESS = 0, /**< Symbol was succesfully found */ + CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND = 1, /**< Symbol was not found in search */ + CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT = 2 /**< Symbol was found but version supplied was not sufficient */ +} CUdriverProcAddressQueryResult; + +/** + * Execution Affinity Types + */ +typedef enum CUexecAffinityType_enum { + CU_EXEC_AFFINITY_TYPE_SM_COUNT = 0, /**< Create a context with limited SMs. */ + CU_EXEC_AFFINITY_TYPE_MAX +} CUexecAffinityType; + +/** + * Value for ::CU_EXEC_AFFINITY_TYPE_SM_COUNT + */ +typedef struct CUexecAffinitySmCount_st { + unsigned int val; /**< The number of SMs the context is limited to use. */ +} CUexecAffinitySmCount_v1; +typedef CUexecAffinitySmCount_v1 CUexecAffinitySmCount; + +/** + * Execution Affinity Parameters + */ +typedef struct CUexecAffinityParam_st { + CUexecAffinityType type; + union { + CUexecAffinitySmCount smCount; /** Value for ::CU_EXEC_AFFINITY_TYPE_SM_COUNT */ + } param; +} CUexecAffinityParam_v1; +/** + * Execution Affinity Parameters + */ +typedef CUexecAffinityParam_v1 CUexecAffinityParam; + +/** + * Library options to be specified with ::cuLibraryLoadData() or ::cuLibraryLoadFromFile() + */ +typedef enum CUlibraryOption_enum +{ + CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE = 0, + + /** + * Specifes that the argument \p code passed to ::cuLibraryLoadData() will be preserved. + * Specifying this option will let the driver know that \p code can be accessed at any point + * until ::cuLibraryUnload(). The default behavior is for the driver to allocate and + * maintain its own copy of \p code. Note that this is only a memory usage optimization + * hint and the driver can choose to ignore it if required. + * Specifying this option with ::cuLibraryLoadFromFile() is invalid and + * will return ::CUDA_ERROR_INVALID_VALUE. + */ + CU_LIBRARY_BINARY_IS_PRESERVED = 1, + + CU_LIBRARY_NUM_OPTIONS +} CUlibraryOption; + +typedef struct CUlibraryHostUniversalFunctionAndDataTable_st +{ + void *functionTable; + size_t functionWindowSize; + void *dataTable; + size_t dataWindowSize; +} CUlibraryHostUniversalFunctionAndDataTable; + +/** + * Error codes + */ +typedef enum cudaError_enum { + /** + * The API call returned with no errors. In the case of query calls, this + * also means that the operation being queried is complete (see + * ::cuEventQuery() and ::cuStreamQuery()). + */ + CUDA_SUCCESS = 0, + + /** + * This indicates that one or more of the parameters passed to the API call + * is not within an acceptable range of values. + */ + CUDA_ERROR_INVALID_VALUE = 1, + + /** + * The API call failed because it was unable to allocate enough memory to + * perform the requested operation. + */ + CUDA_ERROR_OUT_OF_MEMORY = 2, + + /** + * This indicates that the CUDA driver has not been initialized with + * ::cuInit() or that initialization has failed. + */ + CUDA_ERROR_NOT_INITIALIZED = 3, + + /** + * This indicates that the CUDA driver is in the process of shutting down. + */ + CUDA_ERROR_DEINITIALIZED = 4, + + /** + * This indicates profiler is not initialized for this run. This can + * happen when the application is running with external profiling tools + * like visual profiler. + */ + CUDA_ERROR_PROFILER_DISABLED = 5, + + /** + * \deprecated + * This error return is deprecated as of CUDA 5.0. It is no longer an error + * to attempt to enable/disable the profiling via ::cuProfilerStart or + * ::cuProfilerStop without initialization. + */ + CUDA_ERROR_PROFILER_NOT_INITIALIZED = 6, + + /** + * \deprecated + * This error return is deprecated as of CUDA 5.0. It is no longer an error + * to call cuProfilerStart() when profiling is already enabled. + */ + CUDA_ERROR_PROFILER_ALREADY_STARTED = 7, + + /** + * \deprecated + * This error return is deprecated as of CUDA 5.0. It is no longer an error + * to call cuProfilerStop() when profiling is already disabled. + */ + CUDA_ERROR_PROFILER_ALREADY_STOPPED = 8, + + /** + * This indicates that the CUDA driver that the application has loaded is a + * stub library. Applications that run with the stub rather than a real + * driver loaded will result in CUDA API returning this error. + */ + CUDA_ERROR_STUB_LIBRARY = 34, + + /** + * This indicates that requested CUDA device is unavailable at the current + * time. Devices are often unavailable due to use of + * ::CU_COMPUTEMODE_EXCLUSIVE_PROCESS or ::CU_COMPUTEMODE_PROHIBITED. + */ + CUDA_ERROR_DEVICE_UNAVAILABLE = 46, + + /** + * This indicates that no CUDA-capable devices were detected by the installed + * CUDA driver. + */ + CUDA_ERROR_NO_DEVICE = 100, + + /** + * This indicates that the device ordinal supplied by the user does not + * correspond to a valid CUDA device or that the action requested is + * invalid for the specified device. + */ + CUDA_ERROR_INVALID_DEVICE = 101, + + /** + * This error indicates that the Grid license is not applied. + */ + CUDA_ERROR_DEVICE_NOT_LICENSED = 102, + + /** + * This indicates that the device kernel image is invalid. This can also + * indicate an invalid CUDA module. + */ + CUDA_ERROR_INVALID_IMAGE = 200, + + /** + * This most frequently indicates that there is no context bound to the + * current thread. This can also be returned if the context passed to an + * API call is not a valid handle (such as a context that has had + * ::cuCtxDestroy() invoked on it). This can also be returned if a user + * mixes different API versions (i.e. 3010 context with 3020 API calls). + * See ::cuCtxGetApiVersion() for more details. + */ + CUDA_ERROR_INVALID_CONTEXT = 201, + + /** + * This indicated that the context being supplied as a parameter to the + * API call was already the active context. + * \deprecated + * This error return is deprecated as of CUDA 3.2. It is no longer an + * error to attempt to push the active context via ::cuCtxPushCurrent(). + */ + CUDA_ERROR_CONTEXT_ALREADY_CURRENT = 202, + + /** + * This indicates that a map or register operation has failed. + */ + CUDA_ERROR_MAP_FAILED = 205, + + /** + * This indicates that an unmap or unregister operation has failed. + */ + CUDA_ERROR_UNMAP_FAILED = 206, + + /** + * This indicates that the specified array is currently mapped and thus + * cannot be destroyed. + */ + CUDA_ERROR_ARRAY_IS_MAPPED = 207, + + /** + * This indicates that the resource is already mapped. + */ + CUDA_ERROR_ALREADY_MAPPED = 208, + + /** + * This indicates that there is no kernel image available that is suitable + * for the device. This can occur when a user specifies code generation + * options for a particular CUDA source file that do not include the + * corresponding device configuration. + */ + CUDA_ERROR_NO_BINARY_FOR_GPU = 209, + + /** + * This indicates that a resource has already been acquired. + */ + CUDA_ERROR_ALREADY_ACQUIRED = 210, + + /** + * This indicates that a resource is not mapped. + */ + CUDA_ERROR_NOT_MAPPED = 211, + + /** + * This indicates that a mapped resource is not available for access as an + * array. + */ + CUDA_ERROR_NOT_MAPPED_AS_ARRAY = 212, + + /** + * This indicates that a mapped resource is not available for access as a + * pointer. + */ + CUDA_ERROR_NOT_MAPPED_AS_POINTER = 213, + + /** + * This indicates that an uncorrectable ECC error was detected during + * execution. + */ + CUDA_ERROR_ECC_UNCORRECTABLE = 214, + + /** + * This indicates that the ::CUlimit passed to the API call is not + * supported by the active device. + */ + CUDA_ERROR_UNSUPPORTED_LIMIT = 215, + + /** + * This indicates that the ::CUcontext passed to the API call can + * only be bound to a single CPU thread at a time but is already + * bound to a CPU thread. + */ + CUDA_ERROR_CONTEXT_ALREADY_IN_USE = 216, + + /** + * This indicates that peer access is not supported across the given + * devices. + */ + CUDA_ERROR_PEER_ACCESS_UNSUPPORTED = 217, + + /** + * This indicates that a PTX JIT compilation failed. + */ + CUDA_ERROR_INVALID_PTX = 218, + + /** + * This indicates an error with OpenGL or DirectX context. + */ + CUDA_ERROR_INVALID_GRAPHICS_CONTEXT = 219, + + /** + * This indicates that an uncorrectable NVLink error was detected during the + * execution. + */ + CUDA_ERROR_NVLINK_UNCORRECTABLE = 220, + + /** + * This indicates that the PTX JIT compiler library was not found. + */ + CUDA_ERROR_JIT_COMPILER_NOT_FOUND = 221, + + /** + * This indicates that the provided PTX was compiled with an unsupported toolchain. + */ + + CUDA_ERROR_UNSUPPORTED_PTX_VERSION = 222, + + /** + * This indicates that the PTX JIT compilation was disabled. + */ + CUDA_ERROR_JIT_COMPILATION_DISABLED = 223, + + /** + * This indicates that the ::CUexecAffinityType passed to the API call is not + * supported by the active device. + */ + CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY = 224, + + /** + * This indicates that the code to be compiled by the PTX JIT contains + * unsupported call to cudaDeviceSynchronize. + */ + CUDA_ERROR_UNSUPPORTED_DEVSIDE_SYNC = 225, + + /** + * This indicates that the device kernel source is invalid. This includes + * compilation/linker errors encountered in device code or user error. + */ + CUDA_ERROR_INVALID_SOURCE = 300, + + /** + * This indicates that the file specified was not found. + */ + CUDA_ERROR_FILE_NOT_FOUND = 301, + + /** + * This indicates that a link to a shared object failed to resolve. + */ + CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND = 302, + + /** + * This indicates that initialization of a shared object failed. + */ + CUDA_ERROR_SHARED_OBJECT_INIT_FAILED = 303, + + /** + * This indicates that an OS call failed. + */ + CUDA_ERROR_OPERATING_SYSTEM = 304, + + /** + * This indicates that a resource handle passed to the API call was not + * valid. Resource handles are opaque types like ::CUstream and ::CUevent. + */ + CUDA_ERROR_INVALID_HANDLE = 400, + + /** + * This indicates that a resource required by the API call is not in a + * valid state to perform the requested operation. + */ + CUDA_ERROR_ILLEGAL_STATE = 401, + + /** + * This indicates that a named symbol was not found. Examples of symbols + * are global/constant variable names, driver function names, texture names, + * and surface names. + */ + CUDA_ERROR_NOT_FOUND = 500, + + /** + * This indicates that asynchronous operations issued previously have not + * completed yet. This result is not actually an error, but must be indicated + * differently than ::CUDA_SUCCESS (which indicates completion). Calls that + * may return this value include ::cuEventQuery() and ::cuStreamQuery(). + */ + CUDA_ERROR_NOT_READY = 600, + + /** + * While executing a kernel, the device encountered a + * load or store instruction on an invalid memory address. + * This leaves the process in an inconsistent state and any further CUDA work + * will return the same error. To continue using CUDA, the process must be terminated + * and relaunched. + */ + CUDA_ERROR_ILLEGAL_ADDRESS = 700, + + /** + * This indicates that a launch did not occur because it did not have + * appropriate resources. This error usually indicates that the user has + * attempted to pass too many arguments to the device kernel, or the + * kernel launch specifies too many threads for the kernel's register + * count. Passing arguments of the wrong size (i.e. a 64-bit pointer + * when a 32-bit int is expected) is equivalent to passing too many + * arguments and can also result in this error. + */ + CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES = 701, + + /** + * This indicates that the device kernel took too long to execute. This can + * only occur if timeouts are enabled - see the device attribute + * ::CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT for more information. + * This leaves the process in an inconsistent state and any further CUDA work + * will return the same error. To continue using CUDA, the process must be terminated + * and relaunched. + */ + CUDA_ERROR_LAUNCH_TIMEOUT = 702, + + /** + * This error indicates a kernel launch that uses an incompatible texturing + * mode. + */ + CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING = 703, + + /** + * This error indicates that a call to ::cuCtxEnablePeerAccess() is + * trying to re-enable peer access to a context which has already + * had peer access to it enabled. + */ + CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED = 704, + + /** + * This error indicates that ::cuCtxDisablePeerAccess() is + * trying to disable peer access which has not been enabled yet + * via ::cuCtxEnablePeerAccess(). + */ + CUDA_ERROR_PEER_ACCESS_NOT_ENABLED = 705, + + /** + * This error indicates that the primary context for the specified device + * has already been initialized. + */ + CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE = 708, + + /** + * This error indicates that the context current to the calling thread + * has been destroyed using ::cuCtxDestroy, or is a primary context which + * has not yet been initialized. + */ + CUDA_ERROR_CONTEXT_IS_DESTROYED = 709, + + /** + * A device-side assert triggered during kernel execution. The context + * cannot be used anymore, and must be destroyed. All existing device + * memory allocations from this context are invalid and must be + * reconstructed if the program is to continue using CUDA. + */ + CUDA_ERROR_ASSERT = 710, + + /** + * This error indicates that the hardware resources required to enable + * peer access have been exhausted for one or more of the devices + * passed to ::cuCtxEnablePeerAccess(). + */ + CUDA_ERROR_TOO_MANY_PEERS = 711, + + /** + * This error indicates that the memory range passed to ::cuMemHostRegister() + * has already been registered. + */ + CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED = 712, + + /** + * This error indicates that the pointer passed to ::cuMemHostUnregister() + * does not correspond to any currently registered memory region. + */ + CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED = 713, + + /** + * While executing a kernel, the device encountered a stack error. + * This can be due to stack corruption or exceeding the stack size limit. + * This leaves the process in an inconsistent state and any further CUDA work + * will return the same error. To continue using CUDA, the process must be terminated + * and relaunched. + */ + CUDA_ERROR_HARDWARE_STACK_ERROR = 714, + + /** + * While executing a kernel, the device encountered an illegal instruction. + * This leaves the process in an inconsistent state and any further CUDA work + * will return the same error. To continue using CUDA, the process must be terminated + * and relaunched. + */ + CUDA_ERROR_ILLEGAL_INSTRUCTION = 715, + + /** + * While executing a kernel, the device encountered a load or store instruction + * on a memory address which is not aligned. + * This leaves the process in an inconsistent state and any further CUDA work + * will return the same error. To continue using CUDA, the process must be terminated + * and relaunched. + */ + CUDA_ERROR_MISALIGNED_ADDRESS = 716, + + /** + * While executing a kernel, the device encountered an instruction + * which can only operate on memory locations in certain address spaces + * (global, shared, or local), but was supplied a memory address not + * belonging to an allowed address space. + * This leaves the process in an inconsistent state and any further CUDA work + * will return the same error. To continue using CUDA, the process must be terminated + * and relaunched. + */ + CUDA_ERROR_INVALID_ADDRESS_SPACE = 717, + + /** + * While executing a kernel, the device program counter wrapped its address space. + * This leaves the process in an inconsistent state and any further CUDA work + * will return the same error. To continue using CUDA, the process must be terminated + * and relaunched. + */ + CUDA_ERROR_INVALID_PC = 718, + + /** + * An exception occurred on the device while executing a kernel. Common + * causes include dereferencing an invalid device pointer and accessing + * out of bounds shared memory. Less common cases can be system specific - more + * information about these cases can be found in the system specific user guide. + * This leaves the process in an inconsistent state and any further CUDA work + * will return the same error. To continue using CUDA, the process must be terminated + * and relaunched. + */ + CUDA_ERROR_LAUNCH_FAILED = 719, + + /** + * This error indicates that the number of blocks launched per grid for a kernel that was + * launched via either ::cuLaunchCooperativeKernel or ::cuLaunchCooperativeKernelMultiDevice + * exceeds the maximum number of blocks as allowed by ::cuOccupancyMaxActiveBlocksPerMultiprocessor + * or ::cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags times the number of multiprocessors + * as specified by the device attribute ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT. + */ + CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE = 720, + + /** + * This error indicates that the attempted operation is not permitted. + */ + CUDA_ERROR_NOT_PERMITTED = 800, + + /** + * This error indicates that the attempted operation is not supported + * on the current system or device. + */ + CUDA_ERROR_NOT_SUPPORTED = 801, + + /** + * This error indicates that the system is not yet ready to start any CUDA + * work. To continue using CUDA, verify the system configuration is in a + * valid state and all required driver daemons are actively running. + * More information about this error can be found in the system specific + * user guide. + */ + CUDA_ERROR_SYSTEM_NOT_READY = 802, + + /** + * This error indicates that there is a mismatch between the versions of + * the display driver and the CUDA driver. Refer to the compatibility documentation + * for supported versions. + */ + CUDA_ERROR_SYSTEM_DRIVER_MISMATCH = 803, + + /** + * This error indicates that the system was upgraded to run with forward compatibility + * but the visible hardware detected by CUDA does not support this configuration. + * Refer to the compatibility documentation for the supported hardware matrix or ensure + * that only supported hardware is visible during initialization via the CUDA_VISIBLE_DEVICES + * environment variable. + */ + CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE = 804, + + /** + * This error indicates that the MPS client failed to connect to the MPS control daemon or the MPS server. + */ + CUDA_ERROR_MPS_CONNECTION_FAILED = 805, + + /** + * This error indicates that the remote procedural call between the MPS server and the MPS client failed. + */ + CUDA_ERROR_MPS_RPC_FAILURE = 806, + + /** + * This error indicates that the MPS server is not ready to accept new MPS client requests. + * This error can be returned when the MPS server is in the process of recovering from a fatal failure. + */ + CUDA_ERROR_MPS_SERVER_NOT_READY = 807, + + /** + * This error indicates that the hardware resources required to create MPS client have been exhausted. + */ + CUDA_ERROR_MPS_MAX_CLIENTS_REACHED = 808, + + /** + * This error indicates the the hardware resources required to support device connections have been exhausted. + */ + CUDA_ERROR_MPS_MAX_CONNECTIONS_REACHED = 809, + + /** + * This error indicates that the MPS client has been terminated by the server. To continue using CUDA, the process must be terminated and relaunched. + */ + CUDA_ERROR_MPS_CLIENT_TERMINATED = 810, + + /** + * This error indicates that the module is using CUDA Dynamic Parallelism, but the current configuration, like MPS, does not support it. + */ + CUDA_ERROR_CDP_NOT_SUPPORTED = 811, + + /** + * This error indicates that a module contains an unsupported interaction between different versions of CUDA Dynamic Parallelism. + */ + CUDA_ERROR_CDP_VERSION_MISMATCH = 812, + + /** + * This error indicates that the operation is not permitted when + * the stream is capturing. + */ + CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED = 900, + + /** + * This error indicates that the current capture sequence on the stream + * has been invalidated due to a previous error. + */ + CUDA_ERROR_STREAM_CAPTURE_INVALIDATED = 901, + + /** + * This error indicates that the operation would have resulted in a merge + * of two independent capture sequences. + */ + CUDA_ERROR_STREAM_CAPTURE_MERGE = 902, + + /** + * This error indicates that the capture was not initiated in this stream. + */ + CUDA_ERROR_STREAM_CAPTURE_UNMATCHED = 903, + + /** + * This error indicates that the capture sequence contains a fork that was + * not joined to the primary stream. + */ + CUDA_ERROR_STREAM_CAPTURE_UNJOINED = 904, + + /** + * This error indicates that a dependency would have been created which + * crosses the capture sequence boundary. Only implicit in-stream ordering + * dependencies are allowed to cross the boundary. + */ + CUDA_ERROR_STREAM_CAPTURE_ISOLATION = 905, + + /** + * This error indicates a disallowed implicit dependency on a current capture + * sequence from cudaStreamLegacy. + */ + CUDA_ERROR_STREAM_CAPTURE_IMPLICIT = 906, + + /** + * This error indicates that the operation is not permitted on an event which + * was last recorded in a capturing stream. + */ + CUDA_ERROR_CAPTURED_EVENT = 907, + + /** + * A stream capture sequence not initiated with the ::CU_STREAM_CAPTURE_MODE_RELAXED + * argument to ::cuStreamBeginCapture was passed to ::cuStreamEndCapture in a + * different thread. + */ + CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD = 908, + + /** + * This error indicates that the timeout specified for the wait operation has lapsed. + */ + CUDA_ERROR_TIMEOUT = 909, + + /** + * This error indicates that the graph update was not performed because it included + * changes which violated constraints specific to instantiated graph update. + */ + CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE = 910, + + /** + * This indicates that an async error has occurred in a device outside of CUDA. + * If CUDA was waiting for an external device's signal before consuming shared data, + * the external device signaled an error indicating that the data is not valid for + * consumption. This leaves the process in an inconsistent state and any further CUDA + * work will return the same error. To continue using CUDA, the process must be + * terminated and relaunched. + */ + CUDA_ERROR_EXTERNAL_DEVICE = 911, + + /** + * Indicates a kernel launch error due to cluster misconfiguration. + */ + CUDA_ERROR_INVALID_CLUSTER_SIZE = 912, + + /** + * This indicates that an unknown internal error has occurred. + */ + CUDA_ERROR_UNKNOWN = 999 +} CUresult; + +/** + * P2P Attributes + */ +typedef enum CUdevice_P2PAttribute_enum { + CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK = 0x01, /**< A relative value indicating the performance of the link between two devices */ + CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED = 0x02, /**< P2P Access is enable */ + CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED = 0x03, /**< Atomic operation over the link supported */ + CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED = 0x04, /**< \deprecated use CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED instead */ + CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED = 0x04 /**< Accessing CUDA arrays over the link supported */ +} CUdevice_P2PAttribute; + +/** + * CUDA stream callback + * \param hStream The stream the callback was added to, as passed to ::cuStreamAddCallback. May be NULL. + * \param status ::CUDA_SUCCESS or any persistent error on the stream. + * \param userData User parameter provided at registration. + */ +typedef void (CUDA_CB *CUstreamCallback)(CUstream hStream, CUresult status, void *userData); + +/** + * Block size to per-block dynamic shared memory mapping for a certain + * kernel \param blockSize Block size of the kernel. + * + * \return The dynamic shared memory needed by a block. + */ +typedef size_t (CUDA_CB *CUoccupancyB2DSize)(int blockSize); + +/** + * If set, host memory is portable between CUDA contexts. + * Flag for ::cuMemHostAlloc() + */ +#define CU_MEMHOSTALLOC_PORTABLE 0x01 + +/** + * If set, host memory is mapped into CUDA address space and + * ::cuMemHostGetDevicePointer() may be called on the host pointer. + * Flag for ::cuMemHostAlloc() + */ +#define CU_MEMHOSTALLOC_DEVICEMAP 0x02 + +/** + * If set, host memory is allocated as write-combined - fast to write, + * faster to DMA, slow to read except via SSE4 streaming load instruction + * (MOVNTDQA). + * Flag for ::cuMemHostAlloc() + */ +#define CU_MEMHOSTALLOC_WRITECOMBINED 0x04 + +/** + * If set, host memory is portable between CUDA contexts. + * Flag for ::cuMemHostRegister() + */ +#define CU_MEMHOSTREGISTER_PORTABLE 0x01 + +/** + * If set, host memory is mapped into CUDA address space and + * ::cuMemHostGetDevicePointer() may be called on the host pointer. + * Flag for ::cuMemHostRegister() + */ +#define CU_MEMHOSTREGISTER_DEVICEMAP 0x02 + +/** + * If set, the passed memory pointer is treated as pointing to some + * memory-mapped I/O space, e.g. belonging to a third-party PCIe device. + * On Windows the flag is a no-op. + * On Linux that memory is marked as non cache-coherent for the GPU and + * is expected to be physically contiguous. It may return + * ::CUDA_ERROR_NOT_PERMITTED if run as an unprivileged user, + * ::CUDA_ERROR_NOT_SUPPORTED on older Linux kernel versions. + * On all other platforms, it is not supported and ::CUDA_ERROR_NOT_SUPPORTED + * is returned. + * Flag for ::cuMemHostRegister() + */ +#define CU_MEMHOSTREGISTER_IOMEMORY 0x04 + +/** +* If set, the passed memory pointer is treated as pointing to memory that is +* considered read-only by the device. On platforms without +* ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, this flag is +* required in order to register memory mapped to the CPU as read-only. Support +* for the use of this flag can be queried from the device attribute +* ::CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED. Using this flag with +* a current context associated with a device that does not have this attribute +* set will cause ::cuMemHostRegister to error with ::CUDA_ERROR_NOT_SUPPORTED. +*/ +#define CU_MEMHOSTREGISTER_READ_ONLY 0x08 + +/** + * 2D memory copy parameters + */ +typedef struct CUDA_MEMCPY2D_st { + size_t srcXInBytes; /**< Source X in bytes */ + size_t srcY; /**< Source Y */ + + CUmemorytype srcMemoryType; /**< Source memory type (host, device, array) */ + const void *srcHost; /**< Source host pointer */ + CUdeviceptr srcDevice; /**< Source device pointer */ + CUarray srcArray; /**< Source array reference */ + size_t srcPitch; /**< Source pitch (ignored when src is array) */ + + size_t dstXInBytes; /**< Destination X in bytes */ + size_t dstY; /**< Destination Y */ + + CUmemorytype dstMemoryType; /**< Destination memory type (host, device, array) */ + void *dstHost; /**< Destination host pointer */ + CUdeviceptr dstDevice; /**< Destination device pointer */ + CUarray dstArray; /**< Destination array reference */ + size_t dstPitch; /**< Destination pitch (ignored when dst is array) */ + + size_t WidthInBytes; /**< Width of 2D memory copy in bytes */ + size_t Height; /**< Height of 2D memory copy */ +} CUDA_MEMCPY2D_v2; +typedef CUDA_MEMCPY2D_v2 CUDA_MEMCPY2D; + +/** + * 3D memory copy parameters + */ +typedef struct CUDA_MEMCPY3D_st { + size_t srcXInBytes; /**< Source X in bytes */ + size_t srcY; /**< Source Y */ + size_t srcZ; /**< Source Z */ + size_t srcLOD; /**< Source LOD */ + CUmemorytype srcMemoryType; /**< Source memory type (host, device, array) */ + const void *srcHost; /**< Source host pointer */ + CUdeviceptr srcDevice; /**< Source device pointer */ + CUarray srcArray; /**< Source array reference */ + void *reserved0; /**< Must be NULL */ + size_t srcPitch; /**< Source pitch (ignored when src is array) */ + size_t srcHeight; /**< Source height (ignored when src is array; may be 0 if Depth==1) */ + + size_t dstXInBytes; /**< Destination X in bytes */ + size_t dstY; /**< Destination Y */ + size_t dstZ; /**< Destination Z */ + size_t dstLOD; /**< Destination LOD */ + CUmemorytype dstMemoryType; /**< Destination memory type (host, device, array) */ + void *dstHost; /**< Destination host pointer */ + CUdeviceptr dstDevice; /**< Destination device pointer */ + CUarray dstArray; /**< Destination array reference */ + void *reserved1; /**< Must be NULL */ + size_t dstPitch; /**< Destination pitch (ignored when dst is array) */ + size_t dstHeight; /**< Destination height (ignored when dst is array; may be 0 if Depth==1) */ + + size_t WidthInBytes; /**< Width of 3D memory copy in bytes */ + size_t Height; /**< Height of 3D memory copy */ + size_t Depth; /**< Depth of 3D memory copy */ +} CUDA_MEMCPY3D_v2; +typedef CUDA_MEMCPY3D_v2 CUDA_MEMCPY3D; + +/** + * 3D memory cross-context copy parameters + */ +typedef struct CUDA_MEMCPY3D_PEER_st { + size_t srcXInBytes; /**< Source X in bytes */ + size_t srcY; /**< Source Y */ + size_t srcZ; /**< Source Z */ + size_t srcLOD; /**< Source LOD */ + CUmemorytype srcMemoryType; /**< Source memory type (host, device, array) */ + const void *srcHost; /**< Source host pointer */ + CUdeviceptr srcDevice; /**< Source device pointer */ + CUarray srcArray; /**< Source array reference */ + CUcontext srcContext; /**< Source context (ignored with srcMemoryType is ::CU_MEMORYTYPE_ARRAY) */ + size_t srcPitch; /**< Source pitch (ignored when src is array) */ + size_t srcHeight; /**< Source height (ignored when src is array; may be 0 if Depth==1) */ + + size_t dstXInBytes; /**< Destination X in bytes */ + size_t dstY; /**< Destination Y */ + size_t dstZ; /**< Destination Z */ + size_t dstLOD; /**< Destination LOD */ + CUmemorytype dstMemoryType; /**< Destination memory type (host, device, array) */ + void *dstHost; /**< Destination host pointer */ + CUdeviceptr dstDevice; /**< Destination device pointer */ + CUarray dstArray; /**< Destination array reference */ + CUcontext dstContext; /**< Destination context (ignored with dstMemoryType is ::CU_MEMORYTYPE_ARRAY) */ + size_t dstPitch; /**< Destination pitch (ignored when dst is array) */ + size_t dstHeight; /**< Destination height (ignored when dst is array; may be 0 if Depth==1) */ + + size_t WidthInBytes; /**< Width of 3D memory copy in bytes */ + size_t Height; /**< Height of 3D memory copy */ + size_t Depth; /**< Depth of 3D memory copy */ +} CUDA_MEMCPY3D_PEER_v1; +typedef CUDA_MEMCPY3D_PEER_v1 CUDA_MEMCPY3D_PEER; + +/** + * Array descriptor + */ +typedef struct CUDA_ARRAY_DESCRIPTOR_st +{ + size_t Width; /**< Width of array */ + size_t Height; /**< Height of array */ + + CUarray_format Format; /**< Array format */ + unsigned int NumChannels; /**< Channels per array element */ +} CUDA_ARRAY_DESCRIPTOR_v2; +typedef CUDA_ARRAY_DESCRIPTOR_v2 CUDA_ARRAY_DESCRIPTOR; + +/** + * 3D array descriptor + */ +typedef struct CUDA_ARRAY3D_DESCRIPTOR_st +{ + size_t Width; /**< Width of 3D array */ + size_t Height; /**< Height of 3D array */ + size_t Depth; /**< Depth of 3D array */ + + CUarray_format Format; /**< Array format */ + unsigned int NumChannels; /**< Channels per array element */ + unsigned int Flags; /**< Flags */ +} CUDA_ARRAY3D_DESCRIPTOR_v2; +typedef CUDA_ARRAY3D_DESCRIPTOR_v2 CUDA_ARRAY3D_DESCRIPTOR; + +/** + * Indicates that the layered sparse CUDA array or CUDA mipmapped array has a single mip tail region for all layers + */ +#define CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL 0x1 + +/** + * CUDA array sparse properties + */ +typedef struct CUDA_ARRAY_SPARSE_PROPERTIES_st { + struct { + unsigned int width; /**< Width of sparse tile in elements */ + unsigned int height; /**< Height of sparse tile in elements */ + unsigned int depth; /**< Depth of sparse tile in elements */ + } tileExtent; + + /** + * First mip level at which the mip tail begins. + */ + unsigned int miptailFirstLevel; + /** + * Total size of the mip tail. + */ + unsigned long long miptailSize; + /** + * Flags will either be zero or ::CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL + */ + unsigned int flags; + unsigned int reserved[4]; +} CUDA_ARRAY_SPARSE_PROPERTIES_v1; +typedef CUDA_ARRAY_SPARSE_PROPERTIES_v1 CUDA_ARRAY_SPARSE_PROPERTIES; + +/** + * CUDA array memory requirements + */ +typedef struct CUDA_ARRAY_MEMORY_REQUIREMENTS_st { + size_t size; /**< Total required memory size */ + size_t alignment; /**< alignment requirement */ + unsigned int reserved[4]; +} CUDA_ARRAY_MEMORY_REQUIREMENTS_v1; +typedef CUDA_ARRAY_MEMORY_REQUIREMENTS_v1 CUDA_ARRAY_MEMORY_REQUIREMENTS; + +/** + * CUDA Resource descriptor + */ +typedef struct CUDA_RESOURCE_DESC_st +{ + CUresourcetype resType; /**< Resource type */ + + union { + struct { + CUarray hArray; /**< CUDA array */ + } array; + struct { + CUmipmappedArray hMipmappedArray; /**< CUDA mipmapped array */ + } mipmap; + struct { + CUdeviceptr devPtr; /**< Device pointer */ + CUarray_format format; /**< Array format */ + unsigned int numChannels; /**< Channels per array element */ + size_t sizeInBytes; /**< Size in bytes */ + } linear; + struct { + CUdeviceptr devPtr; /**< Device pointer */ + CUarray_format format; /**< Array format */ + unsigned int numChannels; /**< Channels per array element */ + size_t width; /**< Width of the array in elements */ + size_t height; /**< Height of the array in elements */ + size_t pitchInBytes; /**< Pitch between two rows in bytes */ + } pitch2D; + struct { + int reserved[32]; + } reserved; + } res; + + unsigned int flags; /**< Flags (must be zero) */ +} CUDA_RESOURCE_DESC_v1; +typedef CUDA_RESOURCE_DESC_v1 CUDA_RESOURCE_DESC; + +/** + * Texture descriptor + */ +typedef struct CUDA_TEXTURE_DESC_st { + CUaddress_mode addressMode[3]; /**< Address modes */ + CUfilter_mode filterMode; /**< Filter mode */ + unsigned int flags; /**< Flags */ + unsigned int maxAnisotropy; /**< Maximum anisotropy ratio */ + CUfilter_mode mipmapFilterMode; /**< Mipmap filter mode */ + float mipmapLevelBias; /**< Mipmap level bias */ + float minMipmapLevelClamp; /**< Mipmap minimum level clamp */ + float maxMipmapLevelClamp; /**< Mipmap maximum level clamp */ + float borderColor[4]; /**< Border Color */ + int reserved[12]; +} CUDA_TEXTURE_DESC_v1; +typedef CUDA_TEXTURE_DESC_v1 CUDA_TEXTURE_DESC; + +/** + * Resource view format + */ +typedef enum CUresourceViewFormat_enum +{ + CU_RES_VIEW_FORMAT_NONE = 0x00, /**< No resource view format (use underlying resource format) */ + CU_RES_VIEW_FORMAT_UINT_1X8 = 0x01, /**< 1 channel unsigned 8-bit integers */ + CU_RES_VIEW_FORMAT_UINT_2X8 = 0x02, /**< 2 channel unsigned 8-bit integers */ + CU_RES_VIEW_FORMAT_UINT_4X8 = 0x03, /**< 4 channel unsigned 8-bit integers */ + CU_RES_VIEW_FORMAT_SINT_1X8 = 0x04, /**< 1 channel signed 8-bit integers */ + CU_RES_VIEW_FORMAT_SINT_2X8 = 0x05, /**< 2 channel signed 8-bit integers */ + CU_RES_VIEW_FORMAT_SINT_4X8 = 0x06, /**< 4 channel signed 8-bit integers */ + CU_RES_VIEW_FORMAT_UINT_1X16 = 0x07, /**< 1 channel unsigned 16-bit integers */ + CU_RES_VIEW_FORMAT_UINT_2X16 = 0x08, /**< 2 channel unsigned 16-bit integers */ + CU_RES_VIEW_FORMAT_UINT_4X16 = 0x09, /**< 4 channel unsigned 16-bit integers */ + CU_RES_VIEW_FORMAT_SINT_1X16 = 0x0a, /**< 1 channel signed 16-bit integers */ + CU_RES_VIEW_FORMAT_SINT_2X16 = 0x0b, /**< 2 channel signed 16-bit integers */ + CU_RES_VIEW_FORMAT_SINT_4X16 = 0x0c, /**< 4 channel signed 16-bit integers */ + CU_RES_VIEW_FORMAT_UINT_1X32 = 0x0d, /**< 1 channel unsigned 32-bit integers */ + CU_RES_VIEW_FORMAT_UINT_2X32 = 0x0e, /**< 2 channel unsigned 32-bit integers */ + CU_RES_VIEW_FORMAT_UINT_4X32 = 0x0f, /**< 4 channel unsigned 32-bit integers */ + CU_RES_VIEW_FORMAT_SINT_1X32 = 0x10, /**< 1 channel signed 32-bit integers */ + CU_RES_VIEW_FORMAT_SINT_2X32 = 0x11, /**< 2 channel signed 32-bit integers */ + CU_RES_VIEW_FORMAT_SINT_4X32 = 0x12, /**< 4 channel signed 32-bit integers */ + CU_RES_VIEW_FORMAT_FLOAT_1X16 = 0x13, /**< 1 channel 16-bit floating point */ + CU_RES_VIEW_FORMAT_FLOAT_2X16 = 0x14, /**< 2 channel 16-bit floating point */ + CU_RES_VIEW_FORMAT_FLOAT_4X16 = 0x15, /**< 4 channel 16-bit floating point */ + CU_RES_VIEW_FORMAT_FLOAT_1X32 = 0x16, /**< 1 channel 32-bit floating point */ + CU_RES_VIEW_FORMAT_FLOAT_2X32 = 0x17, /**< 2 channel 32-bit floating point */ + CU_RES_VIEW_FORMAT_FLOAT_4X32 = 0x18, /**< 4 channel 32-bit floating point */ + CU_RES_VIEW_FORMAT_UNSIGNED_BC1 = 0x19, /**< Block compressed 1 */ + CU_RES_VIEW_FORMAT_UNSIGNED_BC2 = 0x1a, /**< Block compressed 2 */ + CU_RES_VIEW_FORMAT_UNSIGNED_BC3 = 0x1b, /**< Block compressed 3 */ + CU_RES_VIEW_FORMAT_UNSIGNED_BC4 = 0x1c, /**< Block compressed 4 unsigned */ + CU_RES_VIEW_FORMAT_SIGNED_BC4 = 0x1d, /**< Block compressed 4 signed */ + CU_RES_VIEW_FORMAT_UNSIGNED_BC5 = 0x1e, /**< Block compressed 5 unsigned */ + CU_RES_VIEW_FORMAT_SIGNED_BC5 = 0x1f, /**< Block compressed 5 signed */ + CU_RES_VIEW_FORMAT_UNSIGNED_BC6H = 0x20, /**< Block compressed 6 unsigned half-float */ + CU_RES_VIEW_FORMAT_SIGNED_BC6H = 0x21, /**< Block compressed 6 signed half-float */ + CU_RES_VIEW_FORMAT_UNSIGNED_BC7 = 0x22 /**< Block compressed 7 */ +} CUresourceViewFormat; + +/** + * Resource view descriptor + */ +typedef struct CUDA_RESOURCE_VIEW_DESC_st +{ + CUresourceViewFormat format; /**< Resource view format */ + size_t width; /**< Width of the resource view */ + size_t height; /**< Height of the resource view */ + size_t depth; /**< Depth of the resource view */ + unsigned int firstMipmapLevel; /**< First defined mipmap level */ + unsigned int lastMipmapLevel; /**< Last defined mipmap level */ + unsigned int firstLayer; /**< First layer index */ + unsigned int lastLayer; /**< Last layer index */ + unsigned int reserved[16]; +} CUDA_RESOURCE_VIEW_DESC_v1; +typedef CUDA_RESOURCE_VIEW_DESC_v1 CUDA_RESOURCE_VIEW_DESC; + +/** + * Size of tensor map descriptor + */ +#define CU_TENSOR_MAP_NUM_QWORDS 16 + +/** + * Tensor map descriptor. Requires compiler support for aligning to 64 bytes. + */ +typedef struct CUtensorMap_st { +#if __cplusplus >= 201103L + alignas(64) +#elif __STDC_VERSION__ >= 201112L + _Alignas(64) +#endif + cuuint64_t opaque[CU_TENSOR_MAP_NUM_QWORDS]; +} CUtensorMap; + +/** + * Tensor map data type + */ +typedef enum CUtensorMapDataType_enum { + CU_TENSOR_MAP_DATA_TYPE_UINT8 = 0, + CU_TENSOR_MAP_DATA_TYPE_UINT16, + CU_TENSOR_MAP_DATA_TYPE_UINT32, + CU_TENSOR_MAP_DATA_TYPE_INT32, + CU_TENSOR_MAP_DATA_TYPE_UINT64, + CU_TENSOR_MAP_DATA_TYPE_INT64, + CU_TENSOR_MAP_DATA_TYPE_FLOAT16, + CU_TENSOR_MAP_DATA_TYPE_FLOAT32, + CU_TENSOR_MAP_DATA_TYPE_FLOAT64, + CU_TENSOR_MAP_DATA_TYPE_BFLOAT16, + CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ, + CU_TENSOR_MAP_DATA_TYPE_TFLOAT32, + CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ +} CUtensorMapDataType; + +/** + * Tensor map interleave layout type + */ +typedef enum CUtensorMapInterleave_enum { + CU_TENSOR_MAP_INTERLEAVE_NONE = 0, + CU_TENSOR_MAP_INTERLEAVE_16B, + CU_TENSOR_MAP_INTERLEAVE_32B +} CUtensorMapInterleave; + +/** + * Tensor map swizzling mode of shared memory banks + */ +typedef enum CUtensorMapSwizzle_enum { + CU_TENSOR_MAP_SWIZZLE_NONE = 0, + CU_TENSOR_MAP_SWIZZLE_32B, + CU_TENSOR_MAP_SWIZZLE_64B, + CU_TENSOR_MAP_SWIZZLE_128B +} CUtensorMapSwizzle; + +/** + * Tensor map L2 promotion type + */ +typedef enum CUtensorMapL2promotion_enum { + CU_TENSOR_MAP_L2_PROMOTION_NONE = 0, + CU_TENSOR_MAP_L2_PROMOTION_L2_64B, + CU_TENSOR_MAP_L2_PROMOTION_L2_128B, + CU_TENSOR_MAP_L2_PROMOTION_L2_256B +} CUtensorMapL2promotion; + +/** + * Tensor map out-of-bounds fill type + */ +typedef enum CUtensorMapFloatOOBfill_enum { + CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE = 0, + CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA +} CUtensorMapFloatOOBfill; + +/** + * GPU Direct v3 tokens + */ +typedef struct CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st { + unsigned long long p2pToken; + unsigned int vaSpaceToken; +} CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1; +typedef CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1 CUDA_POINTER_ATTRIBUTE_P2P_TOKENS; + +/** +* Access flags that specify the level of access the current context's device has +* on the memory referenced. +*/ +typedef enum CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum { + CU_POINTER_ATTRIBUTE_ACCESS_FLAG_NONE = 0x0, /**< No access, meaning the device cannot access this memory at all, thus must be staged through accessible memory in order to complete certain operations */ + CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READ = 0x1, /**< Read-only access, meaning writes to this memory are considered invalid accesses and thus return error in that case. */ + CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READWRITE = 0x3 /**< Read-write access, the device has full read-write access to the memory */ +} CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS; + +/** + * Kernel launch parameters + */ +typedef struct CUDA_LAUNCH_PARAMS_st { + CUfunction function; /**< Kernel to launch */ + unsigned int gridDimX; /**< Width of grid in blocks */ + unsigned int gridDimY; /**< Height of grid in blocks */ + unsigned int gridDimZ; /**< Depth of grid in blocks */ + unsigned int blockDimX; /**< X dimension of each thread block */ + unsigned int blockDimY; /**< Y dimension of each thread block */ + unsigned int blockDimZ; /**< Z dimension of each thread block */ + unsigned int sharedMemBytes; /**< Dynamic shared-memory size per thread block in bytes */ + CUstream hStream; /**< Stream identifier */ + void **kernelParams; /**< Array of pointers to kernel parameters */ +} CUDA_LAUNCH_PARAMS_v1; +typedef CUDA_LAUNCH_PARAMS_v1 CUDA_LAUNCH_PARAMS; + +/** + * External memory handle types + */ +typedef enum CUexternalMemoryHandleType_enum { + /** + * Handle is an opaque file descriptor + */ + CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD = 1, + /** + * Handle is an opaque shared NT handle + */ + CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 = 2, + /** + * Handle is an opaque, globally shared handle + */ + CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT = 3, + /** + * Handle is a D3D12 heap object + */ + CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP = 4, + /** + * Handle is a D3D12 committed resource + */ + CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE = 5, + /** + * Handle is a shared NT handle to a D3D11 resource + */ + CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE = 6, + /** + * Handle is a globally shared handle to a D3D11 resource + */ + CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT = 7, + /** + * Handle is an NvSciBuf object + */ + CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF = 8 +} CUexternalMemoryHandleType; + +/** + * Indicates that the external memory object is a dedicated resource + */ +#define CUDA_EXTERNAL_MEMORY_DEDICATED 0x1 + +/** When the \p flags parameter of ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS + * contains this flag, it indicates that signaling an external semaphore object + * should skip performing appropriate memory synchronization operations over all + * the external memory objects that are imported as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF, + * which otherwise are performed by default to ensure data coherency with other + * importers of the same NvSciBuf memory objects. + */ +#define CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC 0x01 + +/** When the \p flags parameter of ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS + * contains this flag, it indicates that waiting on an external semaphore object + * should skip performing appropriate memory synchronization operations over all + * the external memory objects that are imported as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF, + * which otherwise are performed by default to ensure data coherency with other + * importers of the same NvSciBuf memory objects. + */ +#define CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC 0x02 + +/** + * When \p flags of ::cuDeviceGetNvSciSyncAttributes is set to this, + * it indicates that application needs signaler specific NvSciSyncAttr + * to be filled by ::cuDeviceGetNvSciSyncAttributes. + */ +#define CUDA_NVSCISYNC_ATTR_SIGNAL 0x1 + +/** + * When \p flags of ::cuDeviceGetNvSciSyncAttributes is set to this, + * it indicates that application needs waiter specific NvSciSyncAttr + * to be filled by ::cuDeviceGetNvSciSyncAttributes. + */ +#define CUDA_NVSCISYNC_ATTR_WAIT 0x2 +/** + * External memory handle descriptor + */ +typedef struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st { + /** + * Type of the handle + */ + CUexternalMemoryHandleType type; + union { + /** + * File descriptor referencing the memory object. Valid + * when type is + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD + */ + int fd; + /** + * Win32 handle referencing the semaphore object. Valid when + * type is one of the following: + * - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 + * - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT + * - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP + * - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE + * - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE + * - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT + * Exactly one of 'handle' and 'name' must be non-NULL. If + * type is one of the following: + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT + * then 'name' must be NULL. + */ + struct { + /** + * Valid NT handle. Must be NULL if 'name' is non-NULL + */ + void *handle; + /** + * Name of a valid memory object. + * Must be NULL if 'handle' is non-NULL. + */ + const void *name; + } win32; + /** + * A handle representing an NvSciBuf Object. Valid when type + * is ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF + */ + const void *nvSciBufObject; + } handle; + /** + * Size of the memory allocation + */ + unsigned long long size; + /** + * Flags must either be zero or ::CUDA_EXTERNAL_MEMORY_DEDICATED + */ + unsigned int flags; + unsigned int reserved[16]; +} CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1; +typedef CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1 CUDA_EXTERNAL_MEMORY_HANDLE_DESC; + +/** + * External memory buffer descriptor + */ +typedef struct CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st { + /** + * Offset into the memory object where the buffer's base is + */ + unsigned long long offset; + /** + * Size of the buffer + */ + unsigned long long size; + /** + * Flags reserved for future use. Must be zero. + */ + unsigned int flags; + unsigned int reserved[16]; +} CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1; +typedef CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1 CUDA_EXTERNAL_MEMORY_BUFFER_DESC; + +/** + * External memory mipmap descriptor + */ +typedef struct CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st { + /** + * Offset into the memory object where the base level of the + * mipmap chain is. + */ + unsigned long long offset; + /** + * Format, dimension and type of base level of the mipmap chain + */ + CUDA_ARRAY3D_DESCRIPTOR arrayDesc; + /** + * Total number of levels in the mipmap chain + */ + unsigned int numLevels; + unsigned int reserved[16]; +} CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1; +typedef CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1 CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC; + +/** + * External semaphore handle types + */ +typedef enum CUexternalSemaphoreHandleType_enum { + /** + * Handle is an opaque file descriptor + */ + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD = 1, + /** + * Handle is an opaque shared NT handle + */ + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 = 2, + /** + * Handle is an opaque, globally shared handle + */ + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT = 3, + /** + * Handle is a shared NT handle referencing a D3D12 fence object + */ + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE = 4, + /** + * Handle is a shared NT handle referencing a D3D11 fence object + */ + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE = 5, + /** + * Opaque handle to NvSciSync Object + */ + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC = 6, + /** + * Handle is a shared NT handle referencing a D3D11 keyed mutex object + */ + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX = 7, + /** + * Handle is a globally shared handle referencing a D3D11 keyed mutex object + */ + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT = 8, + /** + * Handle is an opaque file descriptor referencing a timeline semaphore + */ + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD = 9, + /** + * Handle is an opaque shared NT handle referencing a timeline semaphore + */ + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10 +} CUexternalSemaphoreHandleType; + +/** + * External semaphore handle descriptor + */ +typedef struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st { + /** + * Type of the handle + */ + CUexternalSemaphoreHandleType type; + union { + /** + * File descriptor referencing the semaphore object. Valid + * when type is one of the following: + * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD + * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD + */ + int fd; + /** + * Win32 handle referencing the semaphore object. Valid when + * type is one of the following: + * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 + * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT + * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE + * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE + * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX + * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 + * Exactly one of 'handle' and 'name' must be non-NULL. If + * type is one of the following: + * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT + * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT + * then 'name' must be NULL. + */ + struct { + /** + * Valid NT handle. Must be NULL if 'name' is non-NULL + */ + void *handle; + /** + * Name of a valid synchronization primitive. + * Must be NULL if 'handle' is non-NULL. + */ + const void *name; + } win32; + /** + * Valid NvSciSyncObj. Must be non NULL + */ + const void* nvSciSyncObj; + } handle; + /** + * Flags reserved for the future. Must be zero. + */ + unsigned int flags; + unsigned int reserved[16]; +} CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1; +typedef CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1 CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC; + +/** + * External semaphore signal parameters + */ +typedef struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st { + struct { + /** + * Parameters for fence objects + */ + struct { + /** + * Value of fence to be signaled + */ + unsigned long long value; + } fence; + union { + /** + * Pointer to NvSciSyncFence. Valid if ::CUexternalSemaphoreHandleType + * is of type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC. + */ + void *fence; + unsigned long long reserved; + } nvSciSync; + /** + * Parameters for keyed mutex objects + */ + struct { + /** + * Value of key to release the mutex with + */ + unsigned long long key; + } keyedMutex; + unsigned int reserved[12]; + } params; + /** + * Only when ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS is used to + * signal a ::CUexternalSemaphore of type + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, the valid flag is + * ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC which indicates + * that while signaling the ::CUexternalSemaphore, no memory synchronization + * operations should be performed for any external memory object imported + * as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. + * For all other types of ::CUexternalSemaphore, flags must be zero. + */ + unsigned int flags; + unsigned int reserved[16]; +} CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1; +typedef CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1 CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS; + +/** + * External semaphore wait parameters + */ +typedef struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st { + struct { + /** + * Parameters for fence objects + */ + struct { + /** + * Value of fence to be waited on + */ + unsigned long long value; + } fence; + /** + * Pointer to NvSciSyncFence. Valid if CUexternalSemaphoreHandleType + * is of type CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC. + */ + union { + void *fence; + unsigned long long reserved; + } nvSciSync; + /** + * Parameters for keyed mutex objects + */ + struct { + /** + * Value of key to acquire the mutex with + */ + unsigned long long key; + /** + * Timeout in milliseconds to wait to acquire the mutex + */ + unsigned int timeoutMs; + } keyedMutex; + unsigned int reserved[10]; + } params; + /** + * Only when ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS is used to wait on + * a ::CUexternalSemaphore of type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, + * the valid flag is ::CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC + * which indicates that while waiting for the ::CUexternalSemaphore, no memory + * synchronization operations should be performed for any external memory + * object imported as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. + * For all other types of ::CUexternalSemaphore, flags must be zero. + */ + unsigned int flags; + unsigned int reserved[16]; +} CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1; +typedef CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1 CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS; + +/** + * Semaphore signal node parameters + */ +typedef struct CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st { + CUexternalSemaphore* extSemArray; /**< Array of external semaphore handles. */ + const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS* paramsArray; /**< Array of external semaphore signal parameters. */ + unsigned int numExtSems; /**< Number of handles and parameters supplied in extSemArray and paramsArray. */ +} CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1; +typedef CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1 CUDA_EXT_SEM_SIGNAL_NODE_PARAMS; + +/** + * Semaphore wait node parameters + */ +typedef struct CUDA_EXT_SEM_WAIT_NODE_PARAMS_st { + CUexternalSemaphore* extSemArray; /**< Array of external semaphore handles. */ + const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS* paramsArray; /**< Array of external semaphore wait parameters. */ + unsigned int numExtSems; /**< Number of handles and parameters supplied in extSemArray and paramsArray. */ +} CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1; +typedef CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1 CUDA_EXT_SEM_WAIT_NODE_PARAMS; + +typedef unsigned long long CUmemGenericAllocationHandle_v1; +typedef CUmemGenericAllocationHandle_v1 CUmemGenericAllocationHandle; + +/** + * Flags for specifying particular handle types + */ +typedef enum CUmemAllocationHandleType_enum { + CU_MEM_HANDLE_TYPE_NONE = 0x0, /**< Does not allow any export mechanism. > */ + CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR = 0x1, /**< Allows a file descriptor to be used for exporting. Permitted only on POSIX systems. (int) */ + CU_MEM_HANDLE_TYPE_WIN32 = 0x2, /**< Allows a Win32 NT handle to be used for exporting. (HANDLE) */ + CU_MEM_HANDLE_TYPE_WIN32_KMT = 0x4, /**< Allows a Win32 KMT handle to be used for exporting. (D3DKMT_HANDLE) */ + CU_MEM_HANDLE_TYPE_MAX = 0x7FFFFFFF +} CUmemAllocationHandleType; + +/** + * Specifies the memory protection flags for mapping. + */ +typedef enum CUmemAccess_flags_enum { + CU_MEM_ACCESS_FLAGS_PROT_NONE = 0x0, /**< Default, make the address range not accessible */ + CU_MEM_ACCESS_FLAGS_PROT_READ = 0x1, /**< Make the address range read accessible */ + CU_MEM_ACCESS_FLAGS_PROT_READWRITE = 0x3, /**< Make the address range read-write accessible */ + CU_MEM_ACCESS_FLAGS_PROT_MAX = 0x7FFFFFFF +} CUmemAccess_flags; + +/** + * Specifies the type of location + */ +typedef enum CUmemLocationType_enum { + CU_MEM_LOCATION_TYPE_INVALID = 0x0, + CU_MEM_LOCATION_TYPE_DEVICE = 0x1, /**< Location is a device location, thus id is a device ordinal */ + CU_MEM_LOCATION_TYPE_MAX = 0x7FFFFFFF +} CUmemLocationType; + +/** +* Defines the allocation types available +*/ +typedef enum CUmemAllocationType_enum { + CU_MEM_ALLOCATION_TYPE_INVALID = 0x0, + + /** This allocation type is 'pinned', i.e. cannot migrate from its current + * location while the application is actively using it + */ + CU_MEM_ALLOCATION_TYPE_PINNED = 0x1, + CU_MEM_ALLOCATION_TYPE_MAX = 0x7FFFFFFF +} CUmemAllocationType; + +/** +* Flag for requesting different optimal and required granularities for an allocation. +*/ +typedef enum CUmemAllocationGranularity_flags_enum { + CU_MEM_ALLOC_GRANULARITY_MINIMUM = 0x0, /**< Minimum required granularity for allocation */ + CU_MEM_ALLOC_GRANULARITY_RECOMMENDED = 0x1 /**< Recommended granularity for allocation for best performance */ +} CUmemAllocationGranularity_flags; + +/** +* Specifies the handle type for address range +*/ +typedef enum CUmemRangeHandleType_enum +{ + CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD = 0x1, + CU_MEM_RANGE_HANDLE_TYPE_MAX = 0x7FFFFFFF +} CUmemRangeHandleType; + +/** + * Sparse subresource types + */ +typedef enum CUarraySparseSubresourceType_enum { + CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL = 0, + CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL = 1 +} CUarraySparseSubresourceType; + +/** + * Memory operation types + */ +typedef enum CUmemOperationType_enum { + CU_MEM_OPERATION_TYPE_MAP = 1, + CU_MEM_OPERATION_TYPE_UNMAP = 2 +} CUmemOperationType; + +/** + * Memory handle types + */ +typedef enum CUmemHandleType_enum { + CU_MEM_HANDLE_TYPE_GENERIC = 0 +} CUmemHandleType; + +/** + * Specifies the CUDA array or CUDA mipmapped array memory mapping information + */ +typedef struct CUarrayMapInfo_st { + CUresourcetype resourceType; /**< Resource type */ + + union { + CUmipmappedArray mipmap; + CUarray array; + } resource; + + CUarraySparseSubresourceType subresourceType; /**< Sparse subresource type */ + + union { + struct { + unsigned int level; /**< For CUDA mipmapped arrays must a valid mipmap level. For CUDA arrays must be zero */ + unsigned int layer; /**< For CUDA layered arrays must be a valid layer index. Otherwise, must be zero */ + unsigned int offsetX; /**< Starting X offset in elements */ + unsigned int offsetY; /**< Starting Y offset in elements */ + unsigned int offsetZ; /**< Starting Z offset in elements */ + unsigned int extentWidth; /**< Width in elements */ + unsigned int extentHeight; /**< Height in elements */ + unsigned int extentDepth; /**< Depth in elements */ + } sparseLevel; + struct { + unsigned int layer; /**< For CUDA layered arrays must be a valid layer index. Otherwise, must be zero */ + unsigned long long offset; /**< Offset within mip tail */ + unsigned long long size; /**< Extent in bytes */ + } miptail; + } subresource; + + CUmemOperationType memOperationType; /**< Memory operation type */ + CUmemHandleType memHandleType; /**< Memory handle type */ + + union { + CUmemGenericAllocationHandle memHandle; + } memHandle; + + unsigned long long offset; /**< Offset within the memory */ + unsigned int deviceBitMask; /**< Device ordinal bit mask */ + unsigned int flags; /**< flags for future use, must be zero now. */ + unsigned int reserved[2]; /**< Reserved for future use, must be zero now. */ +} CUarrayMapInfo_v1; +typedef CUarrayMapInfo_v1 CUarrayMapInfo; + +/** + * Specifies a memory location. + */ +typedef struct CUmemLocation_st { + CUmemLocationType type; /**< Specifies the location type, which modifies the meaning of id. */ + int id; /**< identifier for a given this location's ::CUmemLocationType. */ +} CUmemLocation_v1; +typedef CUmemLocation_v1 CUmemLocation; + +/** + * Specifies compression attribute for an allocation. + */ +typedef enum CUmemAllocationCompType_enum { + CU_MEM_ALLOCATION_COMP_NONE = 0x0, /**< Allocating non-compressible memory */ + CU_MEM_ALLOCATION_COMP_GENERIC = 0x1 /**< Allocating compressible memory */ +} CUmemAllocationCompType; + +/** + * This flag if set indicates that the memory will be used as a tile pool. + */ +#define CU_MEM_CREATE_USAGE_TILE_POOL 0x1 + +/** +* Specifies the allocation properties for a allocation. +*/ +typedef struct CUmemAllocationProp_st { + /** Allocation type */ + CUmemAllocationType type; + /** requested ::CUmemAllocationHandleType */ + CUmemAllocationHandleType requestedHandleTypes; + /** Location of allocation */ + CUmemLocation location; + /** + * Windows-specific POBJECT_ATTRIBUTES required when + * ::CU_MEM_HANDLE_TYPE_WIN32 is specified. This object attributes structure + * includes security attributes that define + * the scope of which exported allocations may be transferred to other + * processes. In all other cases, this field is required to be zero. + */ + void *win32HandleMetaData; + struct { + /** + * Allocation hint for requesting compressible memory. + * On devices that support Compute Data Compression, compressible + * memory can be used to accelerate accesses to data with unstructured + * sparsity and other compressible data patterns. Applications are + * expected to query allocation property of the handle obtained with + * ::cuMemCreate using ::cuMemGetAllocationPropertiesFromHandle to + * validate if the obtained allocation is compressible or not. Note that + * compressed memory may not be mappable on all devices. + */ + unsigned char compressionType; + unsigned char gpuDirectRDMACapable; + /** Bitmask indicating intended usage for this allocation */ + unsigned short usage; + unsigned char reserved[4]; + } allocFlags; +} CUmemAllocationProp_v1; +typedef CUmemAllocationProp_v1 CUmemAllocationProp; + +/** +* Flags for querying different granularities for a multicast object +*/ +typedef enum CUmulticastGranularity_flags_enum { + CU_MULTICAST_GRANULARITY_MINIMUM = 0x0, /**< Minimum required granularity */ + CU_MULTICAST_GRANULARITY_RECOMMENDED = 0x1 /**< Recommended granularity for best performance */ +} CUmulticastGranularity_flags; + +/** +* Specifies the properties for a multicast object. +*/ +typedef struct CUmulticastObjectProp_st { + /** + * The number of devices in the multicast team that will bind memory to this + * object + */ + unsigned int numDevices; + /** + * The maximum amount of memory that can be bound to this multicast object + * per device + */ + size_t size; + /** + * Bitmask of exportable handle types (see ::CUmemAllocationHandleType) for + * this object + */ + unsigned long long handleTypes; + /** + * Flags for future use, must be zero now + */ + unsigned long long flags; +} CUmulticastObjectProp_v1; +typedef CUmulticastObjectProp_v1 CUmulticastObjectProp; + +/** + * Memory access descriptor + */ +typedef struct CUmemAccessDesc_st { + CUmemLocation location; /**< Location on which the request is to change it's accessibility */ + CUmemAccess_flags flags; /**< ::CUmemProt accessibility flags to set on the request */ +} CUmemAccessDesc_v1; +typedef CUmemAccessDesc_v1 CUmemAccessDesc; + +/** + * CUDA Graph Update error types + */ +typedef enum CUgraphExecUpdateResult_enum { + CU_GRAPH_EXEC_UPDATE_SUCCESS = 0x0, /**< The update succeeded */ + CU_GRAPH_EXEC_UPDATE_ERROR = 0x1, /**< The update failed for an unexpected reason which is described in the return value of the function */ + CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED = 0x2, /**< The update failed because the topology changed */ + CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED = 0x3, /**< The update failed because a node type changed */ + CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED = 0x4, /**< The update failed because the function of a kernel node changed (CUDA driver < 11.2) */ + CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED = 0x5, /**< The update failed because the parameters changed in a way that is not supported */ + CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED = 0x6, /**< The update failed because something about the node is not supported */ + CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE = 0x7, /**< The update failed because the function of a kernel node changed in an unsupported way */ + CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED = 0x8 /**< The update failed because the node attributes changed in a way that is not supported */ +} CUgraphExecUpdateResult; + +/** + * Result information returned by cuGraphExecUpdate + */ +typedef struct CUgraphExecUpdateResultInfo_st { + /** + * Gives more specific detail when a cuda graph update fails. + */ + CUgraphExecUpdateResult result; + + /** + * The "to node" of the error edge when the topologies do not match. + * The error node when the error is associated with a specific node. + * NULL when the error is generic. + */ + CUgraphNode errorNode; + + /** + * The from node of error edge when the topologies do not match. Otherwise NULL. + */ + CUgraphNode errorFromNode; +} CUgraphExecUpdateResultInfo_v1; +typedef CUgraphExecUpdateResultInfo_v1 CUgraphExecUpdateResultInfo; + +/** + * CUDA memory pool attributes + */ +typedef enum CUmemPool_attribute_enum { + /** + * (value type = int) + * Allow cuMemAllocAsync to use memory asynchronously freed + * in another streams as long as a stream ordering dependency + * of the allocating stream on the free action exists. + * Cuda events and null stream interactions can create the required + * stream ordered dependencies. (default enabled) + */ + CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES = 1, + + /** + * (value type = int) + * Allow reuse of already completed frees when there is no dependency + * between the free and allocation. (default enabled) + */ + CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC, + + /** + * (value type = int) + * Allow cuMemAllocAsync to insert new stream dependencies + * in order to establish the stream ordering required to reuse + * a piece of memory released by cuFreeAsync (default enabled). + */ + CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES, + + /** + * (value type = cuuint64_t) + * Amount of reserved memory in bytes to hold onto before trying + * to release memory back to the OS. When more than the release + * threshold bytes of memory are held by the memory pool, the + * allocator will try to release memory back to the OS on the + * next call to stream, event or context synchronize. (default 0) + */ + CU_MEMPOOL_ATTR_RELEASE_THRESHOLD, + + /** + * (value type = cuuint64_t) + * Amount of backing memory currently allocated for the mempool. + */ + CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT, + + /** + * (value type = cuuint64_t) + * High watermark of backing memory allocated for the mempool since the + * last time it was reset. High watermark can only be reset to zero. + */ + CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH, + + /** + * (value type = cuuint64_t) + * Amount of memory from the pool that is currently in use by the application. + */ + CU_MEMPOOL_ATTR_USED_MEM_CURRENT, + + /** + * (value type = cuuint64_t) + * High watermark of the amount of memory from the pool that was in use by the application since + * the last time it was reset. High watermark can only be reset to zero. + */ + CU_MEMPOOL_ATTR_USED_MEM_HIGH +} CUmemPool_attribute; + +/** + * Specifies the properties of allocations made from the pool. + */ +typedef struct CUmemPoolProps_st { + CUmemAllocationType allocType; /**< Allocation type. Currently must be specified as CU_MEM_ALLOCATION_TYPE_PINNED */ + CUmemAllocationHandleType handleTypes; /**< Handle types that will be supported by allocations from the pool. */ + CUmemLocation location; /**< Location where allocations should reside. */ + /** + * Windows-specific LPSECURITYATTRIBUTES required when + * ::CU_MEM_HANDLE_TYPE_WIN32 is specified. This security attribute defines + * the scope of which exported allocations may be transferred to other + * processes. In all other cases, this field is required to be zero. + */ + void *win32SecurityAttributes; + unsigned char reserved[64]; /**< reserved for future use, must be 0 */ +} CUmemPoolProps_v1; +typedef CUmemPoolProps_v1 CUmemPoolProps; + +/** + * Opaque data for exporting a pool allocation + */ +typedef struct CUmemPoolPtrExportData_st { + unsigned char reserved[64]; +} CUmemPoolPtrExportData_v1; +typedef CUmemPoolPtrExportData_v1 CUmemPoolPtrExportData; + +/** + * Memory allocation node parameters + */ +typedef struct CUDA_MEM_ALLOC_NODE_PARAMS_st { + /** + * in: location where the allocation should reside (specified in ::location). + * ::handleTypes must be ::CU_MEM_HANDLE_TYPE_NONE. IPC is not supported. + */ + CUmemPoolProps poolProps; + const CUmemAccessDesc *accessDescs; /**< in: array of memory access descriptors. Used to describe peer GPU access */ + size_t accessDescCount; /**< in: number of memory access descriptors. Must not exceed the number of GPUs. */ + size_t bytesize; /**< in: size in bytes of the requested allocation */ + CUdeviceptr dptr; /**< out: address of the allocation returned by CUDA */ +} CUDA_MEM_ALLOC_NODE_PARAMS; + +typedef enum CUgraphMem_attribute_enum { + /** + * (value type = cuuint64_t) + * Amount of memory, in bytes, currently associated with graphs + */ + CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT, + + /** + * (value type = cuuint64_t) + * High watermark of memory, in bytes, associated with graphs since the + * last time it was reset. High watermark can only be reset to zero. + */ + CU_GRAPH_MEM_ATTR_USED_MEM_HIGH, + + /** + * (value type = cuuint64_t) + * Amount of memory, in bytes, currently allocated for use by + * the CUDA graphs asynchronous allocator. + */ + CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT, + + /** + * (value type = cuuint64_t) + * High watermark of memory, in bytes, currently allocated for use by + * the CUDA graphs asynchronous allocator. + */ + CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH +} CUgraphMem_attribute; + +/** + * If set, each kernel launched as part of ::cuLaunchCooperativeKernelMultiDevice only + * waits for prior work in the stream corresponding to that GPU to complete before the + * kernel begins execution. + */ +#define CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC 0x01 + +/** + * If set, any subsequent work pushed in a stream that participated in a call to + * ::cuLaunchCooperativeKernelMultiDevice will only wait for the kernel launched on + * the GPU corresponding to that stream to complete before it begins execution. + */ +#define CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC 0x02 + +/** + * If set, the CUDA array is a collection of layers, where each layer is either a 1D + * or a 2D array and the Depth member of CUDA_ARRAY3D_DESCRIPTOR specifies the number + * of layers, not the depth of a 3D array. + */ +#define CUDA_ARRAY3D_LAYERED 0x01 + +/** + * Deprecated, use CUDA_ARRAY3D_LAYERED + */ +#define CUDA_ARRAY3D_2DARRAY 0x01 + +/** + * This flag must be set in order to bind a surface reference + * to the CUDA array + */ +#define CUDA_ARRAY3D_SURFACE_LDST 0x02 + +/** + * If set, the CUDA array is a collection of six 2D arrays, representing faces of a cube. The + * width of such a CUDA array must be equal to its height, and Depth must be six. + * If ::CUDA_ARRAY3D_LAYERED flag is also set, then the CUDA array is a collection of cubemaps + * and Depth must be a multiple of six. + */ +#define CUDA_ARRAY3D_CUBEMAP 0x04 + +/** + * This flag must be set in order to perform texture gather operations + * on a CUDA array. + */ +#define CUDA_ARRAY3D_TEXTURE_GATHER 0x08 + +/** + * This flag if set indicates that the CUDA + * array is a DEPTH_TEXTURE. + */ +#define CUDA_ARRAY3D_DEPTH_TEXTURE 0x10 + +/** + * This flag indicates that the CUDA array may be bound as a color target + * in an external graphics API + */ +#define CUDA_ARRAY3D_COLOR_ATTACHMENT 0x20 + +/** + * This flag if set indicates that the CUDA array or CUDA mipmapped array + * is a sparse CUDA array or CUDA mipmapped array respectively + */ +#define CUDA_ARRAY3D_SPARSE 0x40 + +/** + * This flag if set indicates that the CUDA array or CUDA mipmapped array + * will allow deferred memory mapping + */ +#define CUDA_ARRAY3D_DEFERRED_MAPPING 0x80 + +/** + * Override the texref format with a format inferred from the array. + * Flag for ::cuTexRefSetArray() + */ +#define CU_TRSA_OVERRIDE_FORMAT 0x01 + +/** + * Read the texture as integers rather than promoting the values to floats + * in the range [0,1]. + * Flag for ::cuTexRefSetFlags() and ::cuTexObjectCreate() + */ +#define CU_TRSF_READ_AS_INTEGER 0x01 + +/** + * Use normalized texture coordinates in the range [0,1) instead of [0,dim). + * Flag for ::cuTexRefSetFlags() and ::cuTexObjectCreate() + */ +#define CU_TRSF_NORMALIZED_COORDINATES 0x02 + +/** + * Perform sRGB->linear conversion during texture read. + * Flag for ::cuTexRefSetFlags() and ::cuTexObjectCreate() + */ +#define CU_TRSF_SRGB 0x10 + + /** + * Disable any trilinear filtering optimizations. + * Flag for ::cuTexRefSetFlags() and ::cuTexObjectCreate() + */ +#define CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION 0x20 + +/** + * Enable seamless cube map filtering. + * Flag for ::cuTexObjectCreate() + */ +#define CU_TRSF_SEAMLESS_CUBEMAP 0x40 + +/** + * C++ compile time constant for CU_LAUNCH_PARAM_END + */ +#define CU_LAUNCH_PARAM_END_AS_INT 0x00 + +/** + * End of array terminator for the \p extra parameter to + * ::cuLaunchKernel + */ +#define CU_LAUNCH_PARAM_END ((void*)CU_LAUNCH_PARAM_END_AS_INT) + +/** + * C++ compile time constant for CU_LAUNCH_PARAM_BUFFER_POINTER + */ +#define CU_LAUNCH_PARAM_BUFFER_POINTER_AS_INT 0x01 + +/** + * Indicator that the next value in the \p extra parameter to + * ::cuLaunchKernel will be a pointer to a buffer containing all kernel + * parameters used for launching kernel \p f. This buffer needs to + * honor all alignment/padding requirements of the individual parameters. + * If ::CU_LAUNCH_PARAM_BUFFER_SIZE is not also specified in the + * \p extra array, then ::CU_LAUNCH_PARAM_BUFFER_POINTER will have no + * effect. + */ +#define CU_LAUNCH_PARAM_BUFFER_POINTER ((void*)CU_LAUNCH_PARAM_BUFFER_POINTER_AS_INT) + +/** + * C++ compile time constant for CU_LAUNCH_PARAM_BUFFER_SIZE + */ +#define CU_LAUNCH_PARAM_BUFFER_SIZE_AS_INT 0x02 + +/** + * Indicator that the next value in the \p extra parameter to + * ::cuLaunchKernel will be a pointer to a size_t which contains the + * size of the buffer specified with ::CU_LAUNCH_PARAM_BUFFER_POINTER. + * It is required that ::CU_LAUNCH_PARAM_BUFFER_POINTER also be specified + * in the \p extra array if the value associated with + * ::CU_LAUNCH_PARAM_BUFFER_SIZE is not zero. + */ +#define CU_LAUNCH_PARAM_BUFFER_SIZE ((void*)CU_LAUNCH_PARAM_BUFFER_SIZE_AS_INT) + +/** + * For texture references loaded into the module, use default texunit from + * texture reference. + */ +#define CU_PARAM_TR_DEFAULT -1 + +/** + * Device that represents the CPU + */ +#define CU_DEVICE_CPU ((CUdevice)-1) + +/** + * Device that represents an invalid device + */ +#define CU_DEVICE_INVALID ((CUdevice)-2) + +/** + * Bitmasks for ::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS + */ +typedef enum CUflushGPUDirectRDMAWritesOptions_enum { + CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST = 1<<0, /**< ::cuFlushGPUDirectRDMAWrites() and its CUDA Runtime API counterpart are supported on the device. */ + CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_MEMOPS = 1<<1 /**< The ::CU_STREAM_WAIT_VALUE_FLUSH flag and the ::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES MemOp are supported on the device. */ +} CUflushGPUDirectRDMAWritesOptions; + +/** + * Platform native ordering for GPUDirect RDMA writes + */ +typedef enum CUGPUDirectRDMAWritesOrdering_enum { + CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE = 0, /**< The device does not natively support ordering of remote writes. ::cuFlushGPUDirectRDMAWrites() can be leveraged if supported. */ + CU_GPU_DIRECT_RDMA_WRITES_ORDERING_OWNER = 100, /**< Natively, the device can consistently consume remote writes, although other CUDA devices may not. */ + CU_GPU_DIRECT_RDMA_WRITES_ORDERING_ALL_DEVICES = 200 /**< Any CUDA device in the system can consistently consume remote writes to this device. */ +} CUGPUDirectRDMAWritesOrdering; + +/** + * The scopes for ::cuFlushGPUDirectRDMAWrites + */ +typedef enum CUflushGPUDirectRDMAWritesScope_enum { + CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER = 100, /**< Blocks until remote writes are visible to the CUDA device context owning the data. */ + CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES = 200 /**< Blocks until remote writes are visible to all CUDA device contexts. */ +} CUflushGPUDirectRDMAWritesScope; + +/** + * The targets for ::cuFlushGPUDirectRDMAWrites + */ +typedef enum CUflushGPUDirectRDMAWritesTarget_enum { + CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX = 0 /**< Sets the target for ::cuFlushGPUDirectRDMAWrites() to the currently active CUDA device context. */ +} CUflushGPUDirectRDMAWritesTarget; + +/** + * The additional write options for ::cuGraphDebugDotPrint + */ +typedef enum CUgraphDebugDot_flags_enum { + CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE = 1<<0, /**< Output all debug data as if every debug flag is enabled */ + CU_GRAPH_DEBUG_DOT_FLAGS_RUNTIME_TYPES = 1<<1, /**< Use CUDA Runtime structures for output */ + CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_PARAMS = 1<<2, /**< Adds CUDA_KERNEL_NODE_PARAMS values to output */ + CU_GRAPH_DEBUG_DOT_FLAGS_MEMCPY_NODE_PARAMS = 1<<3, /**< Adds CUDA_MEMCPY3D values to output */ + CU_GRAPH_DEBUG_DOT_FLAGS_MEMSET_NODE_PARAMS = 1<<4, /**< Adds CUDA_MEMSET_NODE_PARAMS values to output */ + CU_GRAPH_DEBUG_DOT_FLAGS_HOST_NODE_PARAMS = 1<<5, /**< Adds CUDA_HOST_NODE_PARAMS values to output */ + CU_GRAPH_DEBUG_DOT_FLAGS_EVENT_NODE_PARAMS = 1<<6, /**< Adds CUevent handle from record and wait nodes to output */ + CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_SIGNAL_NODE_PARAMS = 1<<7, /**< Adds CUDA_EXT_SEM_SIGNAL_NODE_PARAMS values to output */ + CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_WAIT_NODE_PARAMS = 1<<8, /**< Adds CUDA_EXT_SEM_WAIT_NODE_PARAMS values to output */ + CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_ATTRIBUTES = 1<<9, /**< Adds CUkernelNodeAttrValue values to output */ + CU_GRAPH_DEBUG_DOT_FLAGS_HANDLES = 1<<10, /**< Adds node handles and every kernel function handle to output */ + CU_GRAPH_DEBUG_DOT_FLAGS_MEM_ALLOC_NODE_PARAMS = 1<<11, /**< Adds memory alloc node parameters to output */ + CU_GRAPH_DEBUG_DOT_FLAGS_MEM_FREE_NODE_PARAMS = 1<<12, /**< Adds memory free node parameters to output */ + CU_GRAPH_DEBUG_DOT_FLAGS_BATCH_MEM_OP_NODE_PARAMS = 1<<13 /**< Adds batch mem op node parameters to output */ + , CU_GRAPH_DEBUG_DOT_FLAGS_EXTRA_TOPO_INFO = 1<<14 /**< Adds edge numbering information */ +} CUgraphDebugDot_flags; + +/** + * Flags for user objects for graphs + */ +typedef enum CUuserObject_flags_enum { + CU_USER_OBJECT_NO_DESTRUCTOR_SYNC = 1 /**< Indicates the destructor execution is not synchronized by any CUDA handle. */ +} CUuserObject_flags; + +/** + * Flags for retaining user object references for graphs + */ +typedef enum CUuserObjectRetain_flags_enum { + CU_GRAPH_USER_OBJECT_MOVE = 1 /**< Transfer references from the caller rather than creating new references. */ +} CUuserObjectRetain_flags; + +/** + * Flags for instantiating a graph + */ +typedef enum CUgraphInstantiate_flags_enum { + CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH = 1 /**< Automatically free memory allocated in a graph before relaunching. */ + , CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD = 2 /**< Automatically upload the graph after instantiaton. */ + , CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH = 4 /**< Instantiate the graph to be launchable from the device. */ + , CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY = 8 /**< Run the graph using the per-node priority attributes rather than the + priority of the stream it is launched into. */ +} CUgraphInstantiate_flags; + +/** @} */ /* END CUDA_TYPES */ + +#if defined(__GNUC__) + #if defined(__CUDA_API_PUSH_VISIBILITY_DEFAULT) + #pragma GCC visibility push(default) + #endif +#endif + +#ifdef _WIN32 +#define CUDAAPI __stdcall +#else +#define CUDAAPI +#endif + +/** + * \defgroup CUDA_ERROR Error Handling + * + * ___MANBRIEF___ error handling functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the error handling functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** + * \brief Gets the string description of an error code + * + * Sets \p *pStr to the address of a NULL-terminated string description + * of the error code \p error. + * If the error code is not recognized, ::CUDA_ERROR_INVALID_VALUE + * will be returned and \p *pStr will be set to the NULL address. + * + * \param error - Error code to convert to string + * \param pStr - Address of the string pointer. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::CUresult, + * ::cudaGetErrorString + */ +CUresult CUDAAPI cuGetErrorString(CUresult error, const char **pStr); + +/** + * \brief Gets the string representation of an error code enum name + * + * Sets \p *pStr to the address of a NULL-terminated string representation + * of the name of the enum error code \p error. + * If the error code is not recognized, ::CUDA_ERROR_INVALID_VALUE + * will be returned and \p *pStr will be set to the NULL address. + * + * \param error - Error code to convert to string + * \param pStr - Address of the string pointer. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::CUresult, + * ::cudaGetErrorName + */ +CUresult CUDAAPI cuGetErrorName(CUresult error, const char **pStr); + +/** @} */ /* END CUDA_ERROR */ + +/** + * \defgroup CUDA_INITIALIZE Initialization + * + * ___MANBRIEF___ initialization functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the initialization functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** + * \brief Initialize the CUDA driver API + * Initializes the driver API and must be called before any other function from + * the driver API in the current process. Currently, the \p Flags parameter must be 0. If ::cuInit() + * has not been called, any function from the driver API will return + * ::CUDA_ERROR_NOT_INITIALIZED. + * + * \param Flags - Initialization flag for CUDA. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_SYSTEM_DRIVER_MISMATCH, + * ::CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE + * \notefnerr + */ +CUresult CUDAAPI cuInit(unsigned int Flags); + +/** @} */ /* END CUDA_INITIALIZE */ + +/** + * \defgroup CUDA_VERSION Version Management + * + * ___MANBRIEF___ version management functions of the low-level CUDA driver + * API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the version management functions of the low-level + * CUDA driver application programming interface. + * + * @{ + */ + +/** + * \brief Returns the latest CUDA version supported by driver + * + * Returns in \p *driverVersion the version of CUDA supported by + * the driver. The version is returned as + * (1000 × major + 10 × minor). For example, CUDA 9.2 + * would be represented by 9020. + * + * This function automatically returns ::CUDA_ERROR_INVALID_VALUE if + * \p driverVersion is NULL. + * + * \param driverVersion - Returns the CUDA driver version + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa + * ::cudaDriverGetVersion, + * ::cudaRuntimeGetVersion + */ +CUresult CUDAAPI cuDriverGetVersion(int *driverVersion); + +/** @} */ /* END CUDA_VERSION */ + +/** + * \defgroup CUDA_DEVICE Device Management + * + * ___MANBRIEF___ device management functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the device management functions of the low-level + * CUDA driver application programming interface. + * + * @{ + */ + +/** + * \brief Returns a handle to a compute device + * + * Returns in \p *device a device handle given an ordinal in the range [0, + * ::cuDeviceGetCount()-1]. + * + * \param device - Returned device handle + * \param ordinal - Device number to get handle for + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGetAttribute, + * ::cuDeviceGetCount, + * ::cuDeviceGetName, + * ::cuDeviceGetUuid, + * ::cuDeviceGetLuid, + * ::cuDeviceTotalMem, + * ::cuDeviceGetExecAffinitySupport + */ +CUresult CUDAAPI cuDeviceGet(CUdevice *device, int ordinal); + +/** + * \brief Returns the number of compute-capable devices + * + * Returns in \p *count the number of devices with compute capability greater + * than or equal to 2.0 that are available for execution. If there is no such + * device, ::cuDeviceGetCount() returns 0. + * + * \param count - Returned number of compute-capable devices + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa + * ::cuDeviceGetAttribute, + * ::cuDeviceGetName, + * ::cuDeviceGetUuid, + * ::cuDeviceGetLuid, + * ::cuDeviceGet, + * ::cuDeviceTotalMem, + * ::cuDeviceGetExecAffinitySupport, + * ::cudaGetDeviceCount + */ +CUresult CUDAAPI cuDeviceGetCount(int *count); + +/** + * \brief Returns an identifier string for the device + * + * Returns an ASCII string identifying the device \p dev in the NULL-terminated + * string pointed to by \p name. \p len specifies the maximum length of the + * string that may be returned. + * + * \param name - Returned identifier string for the device + * \param len - Maximum length of string to store in \p name + * \param dev - Device to get identifier string for + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGetAttribute, + * ::cuDeviceGetUuid, + * ::cuDeviceGetLuid, + * ::cuDeviceGetCount, + * ::cuDeviceGet, + * ::cuDeviceTotalMem, + * ::cuDeviceGetExecAffinitySupport, + * ::cudaGetDeviceProperties + */ +CUresult CUDAAPI cuDeviceGetName(char *name, int len, CUdevice dev); + +/** + * \brief Return an UUID for the device + * + * Note there is a later version of this API, ::cuDeviceGetUuid_v2. It will + * supplant this version in 12.0, which is retained for minor version compatibility. + * + * Returns 16-octets identifying the device \p dev in the structure + * pointed by the \p uuid. + * + * \param uuid - Returned UUID + * \param dev - Device to get identifier string for + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGetUuid_v2 + * ::cuDeviceGetAttribute, + * ::cuDeviceGetCount, + * ::cuDeviceGetName, + * ::cuDeviceGetLuid, + * ::cuDeviceGet, + * ::cuDeviceTotalMem, + * ::cuDeviceGetExecAffinitySupport, + * ::cudaGetDeviceProperties + */ +CUresult CUDAAPI cuDeviceGetUuid(CUuuid *uuid, CUdevice dev); + +/** + * \brief Return an UUID for the device (11.4+) + * + * Returns 16-octets identifying the device \p dev in the structure + * pointed by the \p uuid. If the device is in MIG mode, returns its + * MIG UUID which uniquely identifies the subscribed MIG compute instance. + * + * \param uuid - Returned UUID + * \param dev - Device to get identifier string for + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGetAttribute, + * ::cuDeviceGetCount, + * ::cuDeviceGetName, + * ::cuDeviceGetLuid, + * ::cuDeviceGet, + * ::cuDeviceTotalMem, + * ::cudaGetDeviceProperties + */ +CUresult CUDAAPI cuDeviceGetUuid_v2(CUuuid *uuid, CUdevice dev); + +/** + * \brief Return an LUID and device node mask for the device + * + * Return identifying information (\p luid and \p deviceNodeMask) to allow + * matching device with graphics APIs. + * + * \param luid - Returned LUID + * \param deviceNodeMask - Returned device node mask + * \param dev - Device to get identifier string for + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGetAttribute, + * ::cuDeviceGetCount, + * ::cuDeviceGetName, + * ::cuDeviceGet, + * ::cuDeviceTotalMem, + * ::cuDeviceGetExecAffinitySupport, + * ::cudaGetDeviceProperties + */ +CUresult CUDAAPI cuDeviceGetLuid(char *luid, unsigned int *deviceNodeMask, CUdevice dev); + +/** + * \brief Returns the total amount of memory on the device + * + * Returns in \p *bytes the total amount of memory available on the device + * \p dev in bytes. + * + * \param bytes - Returned memory available on device in bytes + * \param dev - Device handle + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGetAttribute, + * ::cuDeviceGetCount, + * ::cuDeviceGetName, + * ::cuDeviceGetUuid, + * ::cuDeviceGet, + * ::cuDeviceGetExecAffinitySupport, + * ::cudaMemGetInfo + */ +CUresult CUDAAPI cuDeviceTotalMem(size_t *bytes, CUdevice dev); + +/** + * \brief Returns the maximum number of elements allocatable in a 1D linear texture for a given texture element size. + * + * Returns in \p maxWidthInElements the maximum number of texture elements allocatable in a 1D linear texture + * for given \p format and \p numChannels. + * + * \param maxWidthInElements - Returned maximum number of texture elements allocatable for given \p format and \p numChannels. + * \param format - Texture format. + * \param numChannels - Number of channels per texture element. + * \param dev - Device handle. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGetAttribute, + * ::cuDeviceGetCount, + * ::cuDeviceGetName, + * ::cuDeviceGetUuid, + * ::cuDeviceGet, + * ::cudaMemGetInfo, + * ::cuDeviceTotalMem + */ +CUresult CUDAAPI cuDeviceGetTexture1DLinearMaxWidth(size_t *maxWidthInElements, CUarray_format format, unsigned numChannels, CUdevice dev); + +/** + * \brief Returns information about the device + * + * Returns in \p *pi the integer value of the attribute \p attrib on device + * \p dev. The supported attributes are: + * - ::CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK: Maximum number of threads per + * block; + * - ::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X: Maximum x-dimension of a block + * - ::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y: Maximum y-dimension of a block + * - ::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z: Maximum z-dimension of a block + * - ::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X: Maximum x-dimension of a grid + * - ::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y: Maximum y-dimension of a grid + * - ::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z: Maximum z-dimension of a grid + * - ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK: Maximum amount of + * shared memory available to a thread block in bytes + * - ::CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY: Memory available on device for + * __constant__ variables in a CUDA C kernel in bytes + * - ::CU_DEVICE_ATTRIBUTE_WARP_SIZE: Warp size in threads + * - ::CU_DEVICE_ATTRIBUTE_MAX_PITCH: Maximum pitch in bytes allowed by the + * memory copy functions that involve memory regions allocated through + * ::cuMemAllocPitch() + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH: Maximum 1D + * texture width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH: Maximum width + * for a 1D texture bound to linear memory + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH: Maximum + * mipmapped 1D texture width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH: Maximum 2D + * texture width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT: Maximum 2D + * texture height + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH: Maximum width + * for a 2D texture bound to linear memory + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT: Maximum height + * for a 2D texture bound to linear memory + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH: Maximum pitch + * in bytes for a 2D texture bound to linear memory + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH: Maximum + * mipmapped 2D texture width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT: Maximum + * mipmapped 2D texture height + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH: Maximum 3D + * texture width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT: Maximum 3D + * texture height + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH: Maximum 3D + * texture depth + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE: + * Alternate maximum 3D texture width, 0 if no alternate + * maximum 3D texture size is supported + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE: + * Alternate maximum 3D texture height, 0 if no alternate + * maximum 3D texture size is supported + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE: + * Alternate maximum 3D texture depth, 0 if no alternate + * maximum 3D texture size is supported + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH: + * Maximum cubemap texture width or height + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH: + * Maximum 1D layered texture width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS: + * Maximum layers in a 1D layered texture + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH: + * Maximum 2D layered texture width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT: + * Maximum 2D layered texture height + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS: + * Maximum layers in a 2D layered texture + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH: + * Maximum cubemap layered texture width or height + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS: + * Maximum layers in a cubemap layered texture + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH: + * Maximum 1D surface width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH: + * Maximum 2D surface width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT: + * Maximum 2D surface height + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH: + * Maximum 3D surface width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT: + * Maximum 3D surface height + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH: + * Maximum 3D surface depth + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH: + * Maximum 1D layered surface width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS: + * Maximum layers in a 1D layered surface + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH: + * Maximum 2D layered surface width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT: + * Maximum 2D layered surface height + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS: + * Maximum layers in a 2D layered surface + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH: + * Maximum cubemap surface width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH: + * Maximum cubemap layered surface width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS: + * Maximum layers in a cubemap layered surface + * - ::CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK: Maximum number of 32-bit + * registers available to a thread block + * - ::CU_DEVICE_ATTRIBUTE_CLOCK_RATE: The typical clock frequency in kilohertz + * - ::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT: Alignment requirement; texture + * base addresses aligned to ::textureAlign bytes do not need an offset + * applied to texture fetches + * - ::CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT: Pitch alignment requirement + * for 2D texture references bound to pitched memory + * - ::CU_DEVICE_ATTRIBUTE_GPU_OVERLAP: 1 if the device can concurrently copy + * memory between host and device while executing a kernel, or 0 if not + * - ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT: Number of multiprocessors on + * the device + * - ::CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT: 1 if there is a run time limit + * for kernels executed on the device, or 0 if not + * - ::CU_DEVICE_ATTRIBUTE_INTEGRATED: 1 if the device is integrated with the + * memory subsystem, or 0 if not + * - ::CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY: 1 if the device can map host + * memory into the CUDA address space, or 0 if not + * - ::CU_DEVICE_ATTRIBUTE_COMPUTE_MODE: Compute mode that device is currently + * in. Available modes are as follows: + * - ::CU_COMPUTEMODE_DEFAULT: Default mode - Device is not restricted and + * can have multiple CUDA contexts present at a single time. + * - ::CU_COMPUTEMODE_PROHIBITED: Compute-prohibited mode - Device is + * prohibited from creating new CUDA contexts. + * - ::CU_COMPUTEMODE_EXCLUSIVE_PROCESS: Compute-exclusive-process mode - Device + * can have only one context used by a single process at a time. + * - ::CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS: 1 if the device supports + * executing multiple kernels within the same context simultaneously, or 0 if + * not. It is not guaranteed that multiple kernels will be resident + * on the device concurrently so this feature should not be relied upon for + * correctness. + * - ::CU_DEVICE_ATTRIBUTE_ECC_ENABLED: 1 if error correction is enabled on the + * device, 0 if error correction is disabled or not supported by the device + * - ::CU_DEVICE_ATTRIBUTE_PCI_BUS_ID: PCI bus identifier of the device + * - ::CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID: PCI device (also known as slot) identifier + * of the device + * - ::CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID: PCI domain identifier of the device + * - ::CU_DEVICE_ATTRIBUTE_TCC_DRIVER: 1 if the device is using a TCC driver. TCC + * is only available on Tesla hardware running Windows Vista or later + * - ::CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE: Peak memory clock frequency in kilohertz + * - ::CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH: Global memory bus width in bits + * - ::CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE: Size of L2 cache in bytes. 0 if the device doesn't have L2 cache + * - ::CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR: Maximum resident threads per multiprocessor + * - ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING: 1 if the device shares a unified address space with + * the host, or 0 if not + * - ::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR: Major compute capability version number + * - ::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR: Minor compute capability version number + * - ::CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED: 1 if device supports caching globals + * in L1 cache, 0 if caching globals in L1 cache is not supported by the device + * - ::CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED: 1 if device supports caching locals + * in L1 cache, 0 if caching locals in L1 cache is not supported by the device + * - ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR: Maximum amount of + * shared memory available to a multiprocessor in bytes; this amount is shared + * by all thread blocks simultaneously resident on a multiprocessor + * - ::CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR: Maximum number of 32-bit + * registers available to a multiprocessor; this number is shared by all thread + * blocks simultaneously resident on a multiprocessor + * - ::CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY: 1 if device supports allocating managed memory + * on this system, 0 if allocating managed memory is not supported by the device on this system. + * - ::CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD: 1 if device is on a multi-GPU board, 0 if not. + * - ::CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID: Unique identifier for a group of devices + * associated with the same board. Devices on the same multi-GPU board will share the same identifier. + * - ::CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED: 1 if Link between the device and the host + * supports native atomic operations. + * - ::CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO: Ratio of single precision performance + * (in floating-point operations per second) to double precision performance. + * - ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS: Device supports coherently accessing + * pageable memory without calling cudaHostRegister on it. + * - ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS: Device can coherently access managed memory + * concurrently with the CPU. + * - ::CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED: Device supports Compute Preemption. + * - ::CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM: Device can access host registered + * memory at the same virtual address as the CPU. + * - ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN: The maximum per block shared memory size + * supported on this device. This is the maximum value that can be opted into when using the cuFuncSetAttribute() or cuKernelSetAttribute() call. + * For more details see ::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES + * - ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES: Device accesses pageable memory via the host's + * page tables. + * - ::CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST: The host can directly access managed memory on the device without migration. + * - ::CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED: Device supports virtual memory management APIs like ::cuMemAddressReserve, ::cuMemCreate, ::cuMemMap and related APIs + * - ::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED: Device supports exporting memory to a posix file descriptor with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate + * - ::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED: Device supports exporting memory to a Win32 NT handle with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate + * - ::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED: Device supports exporting memory to a Win32 KMT handle with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate + * - ::CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR: Maximum number of thread blocks that can reside on a multiprocessor + * - ::CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED: Device supports compressible memory allocation via ::cuMemCreate + * - ::CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE: Maximum L2 persisting lines capacity setting in bytes + * - ::CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE: Maximum value of CUaccessPolicyWindow::num_bytes + * - ::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED: Device supports specifying the GPUDirect RDMA flag with ::cuMemCreate. + * - ::CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK: Amount of shared memory per block reserved by CUDA driver in bytes + * - ::CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED: Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays. + * - ::CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED: Device supports using the ::cuMemHostRegister flag ::CU_MEMHOSTERGISTER_READ_ONLY to register memory that must be mapped as read-only to the GPU + * - ::CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED: Device supports using the ::cuMemAllocAsync and ::cuMemPool family of APIs + * - ::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED: Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see https://docs.nvidia.com/cuda/gpudirect-rdma for more information) + * - ::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS: The returned attribute shall be interpreted as a bitmask, where the individual bits are described by the ::CUflushGPUDirectRDMAWritesOptions enum + * - ::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING: GPUDirect RDMA writes to the device do not need to be flushed for consumers within the scope indicated by the returned attribute. See ::CUGPUDirectRDMAWritesOrdering for the numerical values returned here. + * - ::CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES: Bitmask of handle types supported with mempool based IPC + * - ::CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED: Device supports deferred mapping CUDA arrays and CUDA mipmapped arrays. + * + * \param pi - Returned device attribute value + * \param attrib - Device attribute to query + * \param dev - Device handle + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGetCount, + * ::cuDeviceGetName, + * ::cuDeviceGetUuid, + * ::cuDeviceGet, + * ::cuDeviceTotalMem, + * ::cuDeviceGetExecAffinitySupport, + * ::cudaDeviceGetAttribute, + * ::cudaGetDeviceProperties + */ +CUresult CUDAAPI cuDeviceGetAttribute(int *pi, CUdevice_attribute attrib, CUdevice dev); + +/** + * \brief Return NvSciSync attributes that this device can support. + * + * Returns in \p nvSciSyncAttrList, the properties of NvSciSync that + * this CUDA device, \p dev can support. The returned \p nvSciSyncAttrList + * can be used to create an NvSciSync object that matches this device's capabilities. + * + * If NvSciSyncAttrKey_RequiredPerm field in \p nvSciSyncAttrList is + * already set this API will return ::CUDA_ERROR_INVALID_VALUE. + * + * The applications should set \p nvSciSyncAttrList to a valid + * NvSciSyncAttrList failing which this API will return + * ::CUDA_ERROR_INVALID_HANDLE. + * + * The \p flags controls how applications intends to use + * the NvSciSync created from the \p nvSciSyncAttrList. The valid flags are: + * - ::CUDA_NVSCISYNC_ATTR_SIGNAL, specifies that the applications intends to + * signal an NvSciSync on this CUDA device. + * - ::CUDA_NVSCISYNC_ATTR_WAIT, specifies that the applications intends to + * wait on an NvSciSync on this CUDA device. + * + * At least one of these flags must be set, failing which the API + * returns ::CUDA_ERROR_INVALID_VALUE. Both the flags are orthogonal + * to one another: a developer may set both these flags that allows to + * set both wait and signal specific attributes in the same \p nvSciSyncAttrList. + * + * Note that this API updates the input \p nvSciSyncAttrList with values equivalent + * to the following public attribute key-values: + * NvSciSyncAttrKey_RequiredPerm is set to + * - NvSciSyncAccessPerm_SignalOnly if ::CUDA_NVSCISYNC_ATTR_SIGNAL is set in \p flags. + * - NvSciSyncAccessPerm_WaitOnly if ::CUDA_NVSCISYNC_ATTR_WAIT is set in \p flags. + * - NvSciSyncAccessPerm_WaitSignal if both ::CUDA_NVSCISYNC_ATTR_WAIT and + * ::CUDA_NVSCISYNC_ATTR_SIGNAL are set in \p flags. + * NvSciSyncAttrKey_PrimitiveInfo is set to + * - NvSciSyncAttrValPrimitiveType_SysmemSemaphore on any valid \p device. + * - NvSciSyncAttrValPrimitiveType_Syncpoint if \p device is a Tegra device. + * - NvSciSyncAttrValPrimitiveType_SysmemSemaphorePayload64b if \p device is GA10X+. + * NvSciSyncAttrKey_GpuId is set to the same UUID that is returned for this + * \p device from ::cuDeviceGetUuid. + * + * \param nvSciSyncAttrList - Return NvSciSync attributes supported. + * \param dev - Valid Cuda Device to get NvSciSync attributes for. + * \param flags - flags describing NvSciSync usage. + * + * \return + * + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_OUT_OF_MEMORY + * + * \sa + * ::cuImportExternalSemaphore, + * ::cuDestroyExternalSemaphore, + * ::cuSignalExternalSemaphoresAsync, + * ::cuWaitExternalSemaphoresAsync + */ +CUresult CUDAAPI cuDeviceGetNvSciSyncAttributes(void *nvSciSyncAttrList, CUdevice dev, int flags); + +/** + * \brief Sets the current memory pool of a device + * + * The memory pool must be local to the specified device. + * ::cuMemAllocAsync allocates from the current mempool of the provided stream's device. + * By default, a device's current memory pool is its default memory pool. + * + * \note Use ::cuMemAllocFromPoolAsync to specify asynchronous allocations from a device different + * than the one the stream runs on. + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuDeviceGetDefaultMemPool, ::cuDeviceGetMemPool, ::cuMemPoolCreate, ::cuMemPoolDestroy, ::cuMemAllocFromPoolAsync + */ +CUresult CUDAAPI cuDeviceSetMemPool(CUdevice dev, CUmemoryPool pool); + +/** + * \brief Gets the current mempool for a device + * + * Returns the last pool provided to ::cuDeviceSetMemPool for this device + * or the device's default memory pool if ::cuDeviceSetMemPool has never been called. + * By default the current mempool is the default mempool for a device. + * Otherwise the returned pool must have been set with ::cuDeviceSetMemPool. + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuDeviceGetDefaultMemPool, ::cuMemPoolCreate, ::cuDeviceSetMemPool + */ +CUresult CUDAAPI cuDeviceGetMemPool(CUmemoryPool *pool, CUdevice dev); + +/** + * \brief Returns the default mempool of a device + * + * The default mempool of a device contains device memory from that device. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_NOT_SUPPORTED + * \notefnerr + * + * \sa ::cuMemAllocAsync, ::cuMemPoolTrimTo, ::cuMemPoolGetAttribute, ::cuMemPoolSetAttribute, cuMemPoolSetAccess, ::cuDeviceGetMemPool, ::cuMemPoolCreate + */ +CUresult CUDAAPI cuDeviceGetDefaultMemPool(CUmemoryPool *pool_out, CUdevice dev); + +/** + * \brief Returns information about the execution affinity support of the device. + * + * Returns in \p *pi whether execution affinity type \p type is supported by device \p dev. + * The supported types are: + * - ::CU_EXEC_AFFINITY_TYPE_SM_COUNT: 1 if context with limited SMs is supported by the device, + * or 0 if not; + * + * \param pi - 1 if the execution affinity type \p type is supported by the device, or 0 if not + * \param type - Execution affinity type to query + * \param dev - Device handle + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGetAttribute, + * ::cuDeviceGetCount, + * ::cuDeviceGetName, + * ::cuDeviceGetUuid, + * ::cuDeviceGet, + * ::cuDeviceTotalMem + */ +CUresult CUDAAPI cuDeviceGetExecAffinitySupport(int *pi, CUexecAffinityType type, CUdevice dev); + +/** + * \brief Blocks until remote writes are visible to the specified scope + * + * Blocks until GPUDirect RDMA writes to the target context via mappings + * created through APIs like nvidia_p2p_get_pages (see + * https://docs.nvidia.com/cuda/gpudirect-rdma for more information), are + * visible to the specified scope. + * + * If the scope equals or lies within the scope indicated by + * ::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING, the call + * will be a no-op and can be safely omitted for performance. This can be + * determined by comparing the numerical values between the two enums, with + * smaller scopes having smaller values. + * + * Users may query support for this API via + * ::CU_DEVICE_ATTRIBUTE_FLUSH_FLUSH_GPU_DIRECT_RDMA_OPTIONS. + * + * \param target - The target of the operation, see ::CUflushGPUDirectRDMAWritesTarget + * \param scope - The scope of the operation, see ::CUflushGPUDirectRDMAWritesScope + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * \notefnerr + * + */ +CUresult CUDAAPI cuFlushGPUDirectRDMAWrites(CUflushGPUDirectRDMAWritesTarget target, CUflushGPUDirectRDMAWritesScope scope); + +/** @} */ /* END CUDA_DEVICE */ + +/** + * \defgroup CUDA_DEVICE_DEPRECATED Device Management [DEPRECATED] + * + * ___MANBRIEF___ deprecated device management functions of the low-level CUDA + * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the device management functions of the low-level + * CUDA driver application programming interface. + * + * @{ + */ + +/** + * \brief Returns properties for a selected device + * + * \deprecated + * + * This function was deprecated as of CUDA 5.0 and replaced by ::cuDeviceGetAttribute(). + * + * Returns in \p *prop the properties of device \p dev. The ::CUdevprop + * structure is defined as: + * + * \code + typedef struct CUdevprop_st { + int maxThreadsPerBlock; + int maxThreadsDim[3]; + int maxGridSize[3]; + int sharedMemPerBlock; + int totalConstantMemory; + int SIMDWidth; + int memPitch; + int regsPerBlock; + int clockRate; + int textureAlign + } CUdevprop; + * \endcode + * where: + * + * - ::maxThreadsPerBlock is the maximum number of threads per block; + * - ::maxThreadsDim[3] is the maximum sizes of each dimension of a block; + * - ::maxGridSize[3] is the maximum sizes of each dimension of a grid; + * - ::sharedMemPerBlock is the total amount of shared memory available per + * block in bytes; + * - ::totalConstantMemory is the total amount of constant memory available on + * the device in bytes; + * - ::SIMDWidth is the warp size; + * - ::memPitch is the maximum pitch allowed by the memory copy functions that + * involve memory regions allocated through ::cuMemAllocPitch(); + * - ::regsPerBlock is the total number of registers available per block; + * - ::clockRate is the clock frequency in kilohertz; + * - ::textureAlign is the alignment requirement; texture base addresses that + * are aligned to ::textureAlign bytes do not need an offset applied to + * texture fetches. + * + * \param prop - Returned properties of device + * \param dev - Device to get properties for + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGetAttribute, + * ::cuDeviceGetCount, + * ::cuDeviceGetName, + * ::cuDeviceGetUuid, + * ::cuDeviceGet, + * ::cuDeviceTotalMem + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuDeviceGetProperties(CUdevprop *prop, CUdevice dev); + +/** + * \brief Returns the compute capability of the device + * + * \deprecated + * + * This function was deprecated as of CUDA 5.0 and its functionality superseded + * by ::cuDeviceGetAttribute(). + * + * Returns in \p *major and \p *minor the major and minor revision numbers that + * define the compute capability of the device \p dev. + * + * \param major - Major revision number + * \param minor - Minor revision number + * \param dev - Device handle + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGetAttribute, + * ::cuDeviceGetCount, + * ::cuDeviceGetName, + * ::cuDeviceGetUuid, + * ::cuDeviceGet, + * ::cuDeviceTotalMem + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuDeviceComputeCapability(int *major, int *minor, CUdevice dev); + +/** @} */ /* END CUDA_DEVICE_DEPRECATED */ + +/** + * \defgroup CUDA_PRIMARY_CTX Primary Context Management + * + * ___MANBRIEF___ primary context management functions of the low-level CUDA driver + * API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the primary context management functions of the low-level + * CUDA driver application programming interface. + * + * The primary context is unique per device and shared with the CUDA runtime API. + * These functions allow integration with other libraries using CUDA. + * + * @{ + */ + +/** + * \brief Retain the primary context on the GPU + * + * Retains the primary context on the device. + * Once the user successfully retains the primary context, the primary context + * will be active and available to the user until the user releases it + * with ::cuDevicePrimaryCtxRelease() or resets it with ::cuDevicePrimaryCtxReset(). + * Unlike ::cuCtxCreate() the newly retained context is not pushed onto the stack. + * + * Retaining the primary context for the first time will fail with ::CUDA_ERROR_UNKNOWN + * if the compute mode of the device is ::CU_COMPUTEMODE_PROHIBITED. The function + * ::cuDeviceGetAttribute() can be used with ::CU_DEVICE_ATTRIBUTE_COMPUTE_MODE to + * determine the compute mode of the device. + * The nvidia-smi tool can be used to set the compute mode for + * devices. Documentation for nvidia-smi can be obtained by passing a + * -h option to it. + * + * Please note that the primary context always supports pinned allocations. Other + * flags can be specified by ::cuDevicePrimaryCtxSetFlags(). + * + * \param pctx - Returned context handle of the new context + * \param dev - Device for which primary context is requested + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa ::cuDevicePrimaryCtxRelease, + * ::cuDevicePrimaryCtxSetFlags, + * ::cuCtxCreate, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize + */ +CUresult CUDAAPI cuDevicePrimaryCtxRetain(CUcontext *pctx, CUdevice dev); + +/** + * \brief Release the primary context on the GPU + * + * Releases the primary context interop on the device. + * A retained context should always be released once the user is done using + * it. The context is automatically reset once the last reference to it is + * released. This behavior is different when the primary context was retained + * by the CUDA runtime from CUDA 4.0 and earlier. In this case, the primary + * context remains always active. + * + * Releasing a primary context that has not been previously retained will + * fail with ::CUDA_ERROR_INVALID_CONTEXT. + * + * Please note that unlike ::cuCtxDestroy() this method does not pop the context + * from stack in any circumstances. + * + * \param dev - Device which primary context is released + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_INVALID_CONTEXT + * \notefnerr + * + * \sa ::cuDevicePrimaryCtxRetain, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize + */ +CUresult CUDAAPI cuDevicePrimaryCtxRelease(CUdevice dev); + +/** + * \brief Set flags for the primary context + * + * Sets the flags for the primary context on the device overwriting perviously + * set ones. + * + * The three LSBs of the \p flags parameter can be used to control how the OS + * thread, which owns the CUDA context at the time of an API call, interacts + * with the OS scheduler when waiting for results from the GPU. Only one of + * the scheduling flags can be set when creating a context. + * + * - ::CU_CTX_SCHED_SPIN: Instruct CUDA to actively spin when waiting for + * results from the GPU. This can decrease latency when waiting for the GPU, + * but may lower the performance of CPU threads if they are performing work in + * parallel with the CUDA thread. + * + * - ::CU_CTX_SCHED_YIELD: Instruct CUDA to yield its thread when waiting for + * results from the GPU. This can increase latency when waiting for the GPU, + * but can increase the performance of CPU threads performing work in parallel + * with the GPU. + * + * - ::CU_CTX_SCHED_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a + * synchronization primitive when waiting for the GPU to finish work. + * + * - ::CU_CTX_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a + * synchronization primitive when waiting for the GPU to finish work.
+ * Deprecated: This flag was deprecated as of CUDA 4.0 and was + * replaced with ::CU_CTX_SCHED_BLOCKING_SYNC. + * + * - ::CU_CTX_SCHED_AUTO: The default value if the \p flags parameter is zero, + * uses a heuristic based on the number of active CUDA contexts in the + * process \e C and the number of logical processors in the system \e P. If + * \e C > \e P, then CUDA will yield to other OS threads when waiting for + * the GPU (::CU_CTX_SCHED_YIELD), otherwise CUDA will not yield while + * waiting for results and actively spin on the processor (::CU_CTX_SCHED_SPIN). + * Additionally, on Tegra devices, ::CU_CTX_SCHED_AUTO uses a heuristic based on + * the power profile of the platform and may choose ::CU_CTX_SCHED_BLOCKING_SYNC + * for low-powered devices. + * + * - ::CU_CTX_LMEM_RESIZE_TO_MAX: Instruct CUDA to not reduce local memory + * after resizing local memory for a kernel. This can prevent thrashing by + * local memory allocations when launching many kernels with high local + * memory usage at the cost of potentially increased memory usage.
+ * Deprecated: This flag is deprecated and the behavior enabled + * by this flag is now the default and cannot be disabled. + * + * - ::CU_CTX_COREDUMP_ENABLE: If GPU coredumps have not been enabled globally + * with ::cuCoredumpSetAttributeGlobal or environment variables, this flag can + * be set during context creation to instruct CUDA to create a coredump if + * this context raises an exception during execution. These environment variables + * are described in the CUDA-GDB user guide under the "GPU core dump support" + * section. + * The initial settings will be taken from the global settings at the time of + * context creation. The other settings that control coredump output can be + * modified by calling ::cuCoredumpSetAttribute from the created context after + * it becomes current. + * + * - ::CU_CTX_USER_COREDUMP_ENABLE: If user-triggered GPU coredumps have not + * been enabled globally with ::cuCoredumpSetAttributeGlobal or environment + * variables, this flag can be set during context creation to instruct CUDA to + * create a coredump if data is written to a certain pipe that is present in the + * OS space. These environment variables are described in the CUDA-GDB user + * guide under the "GPU core dump support" section. + * It is important to note that the pipe name *must* be set with + * ::cuCoredumpSetAttributeGlobal before creating the context if this flag is + * used. Setting this flag implies that ::CU_CTX_COREDUMP_ENABLE is set. + * The initial settings will be taken from the global settings at the time of + * context creation. The other settings that control coredump output can be + * modified by calling ::cuCoredumpSetAttribute from the created context after + * it becomes current. + * + * \param dev - Device for which the primary context flags are set + * \param flags - New flags for the device + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_INVALID_VALUE, + * \notefnerr + * + * \sa ::cuDevicePrimaryCtxRetain, + * ::cuDevicePrimaryCtxGetState, + * ::cuCtxCreate, + * ::cuCtxGetFlags, + * ::cuCtxSetFlags, + * ::cudaSetDeviceFlags + */ +CUresult CUDAAPI cuDevicePrimaryCtxSetFlags(CUdevice dev, unsigned int flags); + +/** + * \brief Get the state of the primary context + * + * Returns in \p *flags the flags for the primary context of \p dev, and in + * \p *active whether it is active. See ::cuDevicePrimaryCtxSetFlags for flag + * values. + * + * \param dev - Device to get primary context flags for + * \param flags - Pointer to store flags + * \param active - Pointer to store context state; 0 = inactive, 1 = active + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_INVALID_VALUE, + * \notefnerr + * + * \sa + * ::cuDevicePrimaryCtxSetFlags, + * ::cuCtxGetFlags, + * ::cuCtxSetFlags, + * ::cudaGetDeviceFlags + */ +CUresult CUDAAPI cuDevicePrimaryCtxGetState(CUdevice dev, unsigned int *flags, int *active); + +/** + * \brief Destroy all allocations and reset all state on the primary context + * + * Explicitly destroys and cleans up all resources associated with the current + * device in the current process. + * + * Note that it is responsibility of the calling function to ensure that no + * other module in the process is using the device any more. For that reason + * it is recommended to use ::cuDevicePrimaryCtxRelease() in most cases. + * However it is safe for other modules to call ::cuDevicePrimaryCtxRelease() + * even after resetting the device. + * Resetting the primary context does not release it, an application that has + * retained the primary context should explicitly release its usage. + * + * \param dev - Device for which primary context is destroyed + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE + * \notefnerr + * + * \sa ::cuDevicePrimaryCtxRetain, + * ::cuDevicePrimaryCtxRelease, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize, + * ::cudaDeviceReset + */ +CUresult CUDAAPI cuDevicePrimaryCtxReset(CUdevice dev); + +/** @} */ /* END CUDA_PRIMARY_CTX */ + +/** + * \defgroup CUDA_CTX Context Management + * + * ___MANBRIEF___ context management functions of the low-level CUDA driver + * API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the context management functions of the low-level + * CUDA driver application programming interface. + * + * Please note that some functions are described in + * \ref CUDA_PRIMARY_CTX "Primary Context Management" section. + * + * @{ + */ + +/** + * \brief Create a CUDA context + * + * \note In most cases it is recommended to use ::cuDevicePrimaryCtxRetain. + * + * Creates a new CUDA context and associates it with the calling thread. The + * \p flags parameter is described below. The context is created with a usage + * count of 1 and the caller of ::cuCtxCreate() must call ::cuCtxDestroy() + * when done using the context. If a context is already current to the thread, + * it is supplanted by the newly created context and may be restored by a subsequent + * call to ::cuCtxPopCurrent(). + * + * The three LSBs of the \p flags parameter can be used to control how the OS + * thread, which owns the CUDA context at the time of an API call, interacts + * with the OS scheduler when waiting for results from the GPU. Only one of + * the scheduling flags can be set when creating a context. + * + * - ::CU_CTX_SCHED_SPIN: Instruct CUDA to actively spin when waiting for + * results from the GPU. This can decrease latency when waiting for the GPU, + * but may lower the performance of CPU threads if they are performing work in + * parallel with the CUDA thread. + * + * - ::CU_CTX_SCHED_YIELD: Instruct CUDA to yield its thread when waiting for + * results from the GPU. This can increase latency when waiting for the GPU, + * but can increase the performance of CPU threads performing work in parallel + * with the GPU. + * + * - ::CU_CTX_SCHED_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a + * synchronization primitive when waiting for the GPU to finish work. + * + * - ::CU_CTX_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a + * synchronization primitive when waiting for the GPU to finish work.
+ * Deprecated: This flag was deprecated as of CUDA 4.0 and was + * replaced with ::CU_CTX_SCHED_BLOCKING_SYNC. + * + * - ::CU_CTX_SCHED_AUTO: The default value if the \p flags parameter is zero, + * uses a heuristic based on the number of active CUDA contexts in the + * process \e C and the number of logical processors in the system \e P. If + * \e C > \e P, then CUDA will yield to other OS threads when waiting for + * the GPU (::CU_CTX_SCHED_YIELD), otherwise CUDA will not yield while + * waiting for results and actively spin on the processor (::CU_CTX_SCHED_SPIN). + * Additionally, on Tegra devices, ::CU_CTX_SCHED_AUTO uses a heuristic based on + * the power profile of the platform and may choose ::CU_CTX_SCHED_BLOCKING_SYNC + * for low-powered devices. + * + * - ::CU_CTX_MAP_HOST: Instruct CUDA to support mapped pinned allocations. + * This flag must be set in order to allocate pinned host memory that is + * accessible to the GPU. + * + * - ::CU_CTX_LMEM_RESIZE_TO_MAX: Instruct CUDA to not reduce local memory + * after resizing local memory for a kernel. This can prevent thrashing by + * local memory allocations when launching many kernels with high local + * memory usage at the cost of potentially increased memory usage.
+ * Deprecated: This flag is deprecated and the behavior enabled + * by this flag is now the default and cannot be disabled. + * Instead, the per-thread stack size can be controlled with ::cuCtxSetLimit(). + * + * - ::CU_CTX_COREDUMP_ENABLE: If GPU coredumps have not been enabled globally + * with ::cuCoredumpSetAttributeGlobal or environment variables, this flag can + * be set during context creation to instruct CUDA to create a coredump if + * this context raises an exception during execution. These environment variables + * are described in the CUDA-GDB user guide under the "GPU core dump support" + * section. + * The initial attributes will be taken from the global attributes at the time of + * context creation. The other attributes that control coredump output can be + * modified by calling ::cuCoredumpSetAttribute from the created context after + * it becomes current. + * + * - ::CU_CTX_USER_COREDUMP_ENABLE: If user-triggered GPU coredumps have not + * been enabled globally with ::cuCoredumpSetAttributeGlobal or environment + * variables, this flag can be set during context creation to instruct CUDA to + * create a coredump if data is written to a certain pipe that is present in the + * OS space. These environment variables are described in the CUDA-GDB user + * guide under the "GPU core dump support" section. + * It is important to note that the pipe name *must* be set with + * ::cuCoredumpSetAttributeGlobal before creating the context if this flag is + * used. Setting this flag implies that ::CU_CTX_COREDUMP_ENABLE is set. + * The initial attributes will be taken from the global attributes at the time of + * context creation. The other attributes that control coredump output can be + * modified by calling ::cuCoredumpSetAttribute from the created context after + * it becomes current. + * Setting this flag on any context creation is equivalent to setting the + * ::CU_COREDUMP_ENABLE_USER_TRIGGER attribute to \p true globally. + * + * Context creation will fail with ::CUDA_ERROR_UNKNOWN if the compute mode of + * the device is ::CU_COMPUTEMODE_PROHIBITED. The function ::cuDeviceGetAttribute() + * can be used with ::CU_DEVICE_ATTRIBUTE_COMPUTE_MODE to determine the + * compute mode of the device. The nvidia-smi tool can be used to set + * the compute mode for * devices. + * Documentation for nvidia-smi can be obtained by passing a + * -h option to it. + * + * \param pctx - Returned context handle of the new context + * \param flags - Context creation flags + * \param dev - Device to create context on + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCoredumpSetAttributeGlobal, + * ::cuCoredumpSetAttribute, + * ::cuCtxSynchronize + */ +CUresult CUDAAPI cuCtxCreate(CUcontext *pctx, unsigned int flags, CUdevice dev); + +/** + * \brief Create a CUDA context with execution affinity + * + * Creates a new CUDA context with execution affinity and associates it with + * the calling thread. The \p paramsArray and \p flags parameter are described below. + * The context is created with a usage count of 1 and the caller of ::cuCtxCreate() must + * call ::cuCtxDestroy() when done using the context. If a context is already + * current to the thread, it is supplanted by the newly created context and may + * be restored by a subsequent call to ::cuCtxPopCurrent(). + * + * The type and the amount of execution resource the context can use is limited by \p paramsArray + * and \p numParams. The \p paramsArray is an array of \p CUexecAffinityParam and the \p numParams + * describes the size of the array. If two \p CUexecAffinityParam in the array have the same type, + * the latter execution affinity parameter overrides the former execution affinity parameter. + * The supported execution affinity types are: + * - ::CU_EXEC_AFFINITY_TYPE_SM_COUNT limits the portion of SMs that the context can use. The portion + * of SMs is specified as the number of SMs via \p CUexecAffinitySmCount. This limit will be internally + * rounded up to the next hardware-supported amount. Hence, it is imperative to query the actual execution + * affinity of the context via \p cuCtxGetExecAffinity after context creation. Currently, this attribute + * is only supported under Volta+ MPS. + * + * The three LSBs of the \p flags parameter can be used to control how the OS + * thread, which owns the CUDA context at the time of an API call, interacts + * with the OS scheduler when waiting for results from the GPU. Only one of + * the scheduling flags can be set when creating a context. + * + * - ::CU_CTX_SCHED_SPIN: Instruct CUDA to actively spin when waiting for + * results from the GPU. This can decrease latency when waiting for the GPU, + * but may lower the performance of CPU threads if they are performing work in + * parallel with the CUDA thread. + * + * - ::CU_CTX_SCHED_YIELD: Instruct CUDA to yield its thread when waiting for + * results from the GPU. This can increase latency when waiting for the GPU, + * but can increase the performance of CPU threads performing work in parallel + * with the GPU. + * + * - ::CU_CTX_SCHED_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a + * synchronization primitive when waiting for the GPU to finish work. + * + * - ::CU_CTX_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a + * synchronization primitive when waiting for the GPU to finish work.
+ * Deprecated: This flag was deprecated as of CUDA 4.0 and was + * replaced with ::CU_CTX_SCHED_BLOCKING_SYNC. + * + * - ::CU_CTX_SCHED_AUTO: The default value if the \p flags parameter is zero, + * uses a heuristic based on the number of active CUDA contexts in the + * process \e C and the number of logical processors in the system \e P. If + * \e C > \e P, then CUDA will yield to other OS threads when waiting for + * the GPU (::CU_CTX_SCHED_YIELD), otherwise CUDA will not yield while + * waiting for results and actively spin on the processor (::CU_CTX_SCHED_SPIN). + * Additionally, on Tegra devices, ::CU_CTX_SCHED_AUTO uses a heuristic based on + * the power profile of the platform and may choose ::CU_CTX_SCHED_BLOCKING_SYNC + * for low-powered devices. + * + * - ::CU_CTX_MAP_HOST: Instruct CUDA to support mapped pinned allocations. + * This flag must be set in order to allocate pinned host memory that is + * accessible to the GPU. + * + * - ::CU_CTX_LMEM_RESIZE_TO_MAX: Instruct CUDA to not reduce local memory + * after resizing local memory for a kernel. This can prevent thrashing by + * local memory allocations when launching many kernels with high local + * memory usage at the cost of potentially increased memory usage.
+ * Deprecated: This flag is deprecated and the behavior enabled + * by this flag is now the default and cannot be disabled. + * Instead, the per-thread stack size can be controlled with ::cuCtxSetLimit(). + * + * - ::CU_CTX_COREDUMP_ENABLE: If GPU coredumps have not been enabled globally + * with ::cuCoredumpSetAttributeGlobal or environment variables, this flag can + * be set during context creation to instruct CUDA to create a coredump if + * this context raises an exception during execution. These environment variables + * are described in the CUDA-GDB user guide under the "GPU core dump support" + * section. + * The initial attributes will be taken from the global attributes at the time of + * context creation. The other attributes that control coredump output can be + * modified by calling ::cuCoredumpSetAttribute from the created context after + * it becomes current. + * + * - ::CU_CTX_USER_COREDUMP_ENABLE: If user-triggered GPU coredumps have not + * been enabled globally with ::cuCoredumpSetAttributeGlobal or environment + * variables, this flag can be set during context creation to instruct CUDA to + * create a coredump if data is written to a certain pipe that is present in the + * OS space. These environment variables are described in the CUDA-GDB user + * guide under the "GPU core dump support" section. + * It is important to note that the pipe name *must* be set with + * ::cuCoredumpSetAttributeGlobal before creating the context if this flag is + * used. Setting this flag implies that ::CU_CTX_COREDUMP_ENABLE is set. + * The initial attributes will be taken from the global attributes at the time of + * context creation. The other attributes that control coredump output can be + * modified by calling ::cuCoredumpSetAttribute from the created context after + * it becomes current. + * Setting this flag on any context creation is equivalent to setting the + * ::CU_COREDUMP_ENABLE_USER_TRIGGER attribute to \p true globally. + * + * Context creation will fail with ::CUDA_ERROR_UNKNOWN if the compute mode of + * the device is ::CU_COMPUTEMODE_PROHIBITED. The function ::cuDeviceGetAttribute() + * can be used with ::CU_DEVICE_ATTRIBUTE_COMPUTE_MODE to determine the + * compute mode of the device. The nvidia-smi tool can be used to set + * the compute mode for * devices. + * Documentation for nvidia-smi can be obtained by passing a + * -h option to it. + * + * \param pctx - Returned context handle of the new context + * \param paramsArray - Execution affinity parameters + * \param numParams - Number of execution affinity parameters + * \param flags - Context creation flags + * \param dev - Device to create context on + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize, + * ::cuCoredumpSetAttributeGlobal, + * ::cuCoredumpSetAttribute, + * ::CUexecAffinityParam + */ +CUresult CUDAAPI cuCtxCreate_v3(CUcontext *pctx, CUexecAffinityParam *paramsArray, int numParams, unsigned int flags, CUdevice dev); + +/** + * \brief Destroy a CUDA context + * + * Destroys the CUDA context specified by \p ctx. The context \p ctx will be + * destroyed regardless of how many threads it is current to. + * It is the responsibility of the calling function to ensure that no API + * call issues using \p ctx while ::cuCtxDestroy() is executing. + * + * Destroys and cleans up all resources associated with the context. + * It is the caller's responsibility to ensure that the context or its resources + * are not accessed or passed in subsequent API calls and doing so will result in undefined behavior. + * These resources include CUDA types such as ::CUmodule, ::CUfunction, ::CUstream, ::CUevent, + * ::CUarray, ::CUmipmappedArray, ::CUtexObject, ::CUsurfObject, ::CUtexref, ::CUsurfref, + * ::CUgraphicsResource, ::CUlinkState, ::CUexternalMemory and ::CUexternalSemaphore. + * + * If \p ctx is current to the calling thread then \p ctx will also be + * popped from the current thread's context stack (as though ::cuCtxPopCurrent() + * were called). If \p ctx is current to other threads, then \p ctx will + * remain current to those threads, and attempting to access \p ctx from + * those threads will result in the error ::CUDA_ERROR_CONTEXT_IS_DESTROYED. + * + * \param ctx - Context to destroy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize + */ +CUresult CUDAAPI cuCtxDestroy(CUcontext ctx); + +/** + * \brief Pushes a context on the current CPU thread + * + * Pushes the given context \p ctx onto the CPU thread's stack of current + * contexts. The specified context becomes the CPU thread's current context, so + * all CUDA functions that operate on the current context are affected. + * + * The previous current context may be made current again by calling + * ::cuCtxDestroy() or ::cuCtxPopCurrent(). + * + * \param ctx - Context to push + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize + */ +CUresult CUDAAPI cuCtxPushCurrent(CUcontext ctx); + +/** + * \brief Pops the current CUDA context from the current CPU thread. + * + * Pops the current CUDA context from the CPU thread and passes back the + * old context handle in \p *pctx. That context may then be made current + * to a different CPU thread by calling ::cuCtxPushCurrent(). + * + * If a context was current to the CPU thread before ::cuCtxCreate() or + * ::cuCtxPushCurrent() was called, this function makes that context current to + * the CPU thread again. + * + * \param pctx - Returned popped context handle + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize + */ +CUresult CUDAAPI cuCtxPopCurrent(CUcontext *pctx); + +/** + * \brief Binds the specified CUDA context to the calling CPU thread + * + * Binds the specified CUDA context to the calling CPU thread. + * If \p ctx is NULL then the CUDA context previously bound to the + * calling CPU thread is unbound and ::CUDA_SUCCESS is returned. + * + * If there exists a CUDA context stack on the calling CPU thread, this + * will replace the top of that stack with \p ctx. + * If \p ctx is NULL then this will be equivalent to popping the top + * of the calling CPU thread's CUDA context stack (or a no-op if the + * calling CPU thread's CUDA context stack is empty). + * + * \param ctx - Context to bind to the calling CPU thread + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT + * \notefnerr + * + * \sa + * ::cuCtxGetCurrent, + * ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cudaSetDevice + */ +CUresult CUDAAPI cuCtxSetCurrent(CUcontext ctx); + +/** + * \brief Returns the CUDA context bound to the calling CPU thread. + * + * Returns in \p *pctx the CUDA context bound to the calling CPU thread. + * If no context is bound to the calling CPU thread then \p *pctx is + * set to NULL and ::CUDA_SUCCESS is returned. + * + * \param pctx - Returned context handle + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * \notefnerr + * + * \sa + * ::cuCtxSetCurrent, + * ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cudaGetDevice + */ +CUresult CUDAAPI cuCtxGetCurrent(CUcontext *pctx); + +/** + * \brief Returns the device ID for the current context + * + * Returns in \p *device the ordinal of the current context's device. + * + * \param device - Returned device ID for the current context + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize, + * ::cudaGetDevice + */ +CUresult CUDAAPI cuCtxGetDevice(CUdevice *device); + +/** + * \brief Returns the flags for the current context + * + * Returns in \p *flags the flags of the current context. See ::cuCtxCreate + * for flag values. + * + * \param flags - Pointer to store flags of current context + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetCurrent, + * ::cuCtxGetDevice, + * ::cuCtxGetLimit, + * ::cuCtxGetSharedMemConfig, + * ::cuCtxGetStreamPriorityRange, + * ::cuCtxSetFlags, + * ::cudaGetDeviceFlags + */ +CUresult CUDAAPI cuCtxGetFlags(unsigned int *flags); + +/** + * \brief Sets the flags for the current context + * + * \param flags - Flags to set on the current context + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetCurrent, + * ::cuCtxGetDevice, + * ::cuCtxGetLimit, + * ::cuCtxGetSharedMemConfig, + * ::cuCtxGetStreamPriorityRange, + * ::cuCtxGetFlags, + * ::cudaGetDeviceFlags, + * ::cuDevicePrimaryCtxSetFlags, + */ +CUresult CUDAAPI cuCtxSetFlags(unsigned int flags); + +/** + * \brief Returns the unique Id associated with the context supplied + * + * Returns in \p ctxId the unique Id which is associated with a given context. + * The Id is unique for the life of the program for this instance of CUDA. + * If context is supplied as NULL and there is one current, the Id of the + * current context is returned. + * + * \param ctx - Context for which to obtain the Id + * \param ctxId - Pointer to store the Id of the context + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_CONTEXT_IS_DESTROYED, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPushCurrent + */ +CUresult CUDAAPI cuCtxGetId(CUcontext ctx, unsigned long long *ctxId); + +/** + * \brief Block for a context's tasks to complete + * + * Blocks until the device has completed all preceding requested tasks. + * ::cuCtxSynchronize() returns an error if one of the preceding tasks failed. + * If the context was created with the ::CU_CTX_SCHED_BLOCKING_SYNC flag, the + * CPU thread will block until the GPU context has finished its work. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cudaDeviceSynchronize + */ +CUresult CUDAAPI cuCtxSynchronize(void); + +/** + * \brief Set resource limits + * + * Setting \p limit to \p value is a request by the application to update + * the current limit maintained by the context. The driver is free to + * modify the requested value to meet h/w requirements (this could be + * clamping to minimum or maximum values, rounding up to nearest element + * size, etc). The application can use ::cuCtxGetLimit() to find out exactly + * what the limit has been set to. + * + * Setting each ::CUlimit has its own specific restrictions, so each is + * discussed here. + * + * - ::CU_LIMIT_STACK_SIZE controls the stack size in bytes of each GPU thread. + * The driver automatically increases the per-thread stack size + * for each kernel launch as needed. This size isn't reset back to the + * original value after each launch. Setting this value will take effect + * immediately, and if necessary, the device will block until all preceding + * requested tasks are complete. + * + * - ::CU_LIMIT_PRINTF_FIFO_SIZE controls the size in bytes of the FIFO used + * by the ::printf() device system call. Setting ::CU_LIMIT_PRINTF_FIFO_SIZE + * must be performed before launching any kernel that uses the ::printf() + * device system call, otherwise ::CUDA_ERROR_INVALID_VALUE will be returned. + * + * - ::CU_LIMIT_MALLOC_HEAP_SIZE controls the size in bytes of the heap used + * by the ::malloc() and ::free() device system calls. Setting + * ::CU_LIMIT_MALLOC_HEAP_SIZE must be performed before launching any kernel + * that uses the ::malloc() or ::free() device system calls, otherwise + * ::CUDA_ERROR_INVALID_VALUE will be returned. + * + * - ::CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH controls the maximum nesting depth of + * a grid at which a thread can safely call ::cudaDeviceSynchronize(). Setting + * this limit must be performed before any launch of a kernel that uses the + * device runtime and calls ::cudaDeviceSynchronize() above the default sync + * depth, two levels of grids. Calls to ::cudaDeviceSynchronize() will fail + * with error code ::cudaErrorSyncDepthExceeded if the limitation is + * violated. This limit can be set smaller than the default or up the maximum + * launch depth of 24. When setting this limit, keep in mind that additional + * levels of sync depth require the driver to reserve large amounts of device + * memory which can no longer be used for user allocations. If these + * reservations of device memory fail, ::cuCtxSetLimit() will return + * ::CUDA_ERROR_OUT_OF_MEMORY, and the limit can be reset to a lower value. + * This limit is only applicable to devices of compute capability < 9.0. + * Attempting to set this limit on devices of other compute capability + * versions will result in the error ::CUDA_ERROR_UNSUPPORTED_LIMIT being + * returned. + * + * - ::CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT controls the maximum number of + * outstanding device runtime launches that can be made from the current + * context. A grid is outstanding from the point of launch up until the grid + * is known to have been completed. Device runtime launches which violate + * this limitation fail and return ::cudaErrorLaunchPendingCountExceeded when + * ::cudaGetLastError() is called after launch. If more pending launches than + * the default (2048 launches) are needed for a module using the device + * runtime, this limit can be increased. Keep in mind that being able to + * sustain additional pending launches will require the driver to reserve + * larger amounts of device memory upfront which can no longer be used for + * allocations. If these reservations fail, ::cuCtxSetLimit() will return + * ::CUDA_ERROR_OUT_OF_MEMORY, and the limit can be reset to a lower value. + * This limit is only applicable to devices of compute capability 3.5 and + * higher. Attempting to set this limit on devices of compute capability less + * than 3.5 will result in the error ::CUDA_ERROR_UNSUPPORTED_LIMIT being + * returned. + * + * - ::CU_LIMIT_MAX_L2_FETCH_GRANULARITY controls the L2 cache fetch granularity. + * Values can range from 0B to 128B. This is purely a performance hint and + * it can be ignored or clamped depending on the platform. + * + * - ::CU_LIMIT_PERSISTING_L2_CACHE_SIZE controls size in bytes available for + * persisting L2 cache. This is purely a performance hint and it can be + * ignored or clamped depending on the platform. + * + * \param limit - Limit to set + * \param value - Size of limit + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_UNSUPPORTED_LIMIT, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_INVALID_CONTEXT + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSynchronize, + * ::cudaDeviceSetLimit + */ +CUresult CUDAAPI cuCtxSetLimit(CUlimit limit, size_t value); + +/** + * \brief Returns resource limits + * + * Returns in \p *pvalue the current size of \p limit. The supported + * ::CUlimit values are: + * - ::CU_LIMIT_STACK_SIZE: stack size in bytes of each GPU thread. + * - ::CU_LIMIT_PRINTF_FIFO_SIZE: size in bytes of the FIFO used by the + * ::printf() device system call. + * - ::CU_LIMIT_MALLOC_HEAP_SIZE: size in bytes of the heap used by the + * ::malloc() and ::free() device system calls. + * - ::CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH: maximum grid depth at which a thread + * can issue the device runtime call ::cudaDeviceSynchronize() to wait on + * child grid launches to complete. + * - ::CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT: maximum number of outstanding + * device runtime launches that can be made from this context. + * - ::CU_LIMIT_MAX_L2_FETCH_GRANULARITY: L2 cache fetch granularity. + * - ::CU_LIMIT_PERSISTING_L2_CACHE_SIZE: Persisting L2 cache size in bytes + * + * \param limit - Limit to query + * \param pvalue - Returned size of limit + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_UNSUPPORTED_LIMIT + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize, + * ::cudaDeviceGetLimit + */ +CUresult CUDAAPI cuCtxGetLimit(size_t *pvalue, CUlimit limit); + +/** + * \brief Returns the preferred cache configuration for the current context. + * + * On devices where the L1 cache and shared memory use the same hardware + * resources, this function returns through \p pconfig the preferred cache configuration + * for the current context. This is only a preference. The driver will use + * the requested configuration if possible, but it is free to choose a different + * configuration if required to execute functions. + * + * This will return a \p pconfig of ::CU_FUNC_CACHE_PREFER_NONE on devices + * where the size of the L1 cache and shared memory are fixed. + * + * The supported cache configurations are: + * - ::CU_FUNC_CACHE_PREFER_NONE: no preference for shared memory or L1 (default) + * - ::CU_FUNC_CACHE_PREFER_SHARED: prefer larger shared memory and smaller L1 cache + * - ::CU_FUNC_CACHE_PREFER_L1: prefer larger L1 cache and smaller shared memory + * - ::CU_FUNC_CACHE_PREFER_EQUAL: prefer equal sized L1 cache and shared memory + * + * \param pconfig - Returned cache configuration + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize, + * ::cuFuncSetCacheConfig, + * ::cudaDeviceGetCacheConfig + */ +CUresult CUDAAPI cuCtxGetCacheConfig(CUfunc_cache *pconfig); + +/** + * \brief Sets the preferred cache configuration for the current context. + * + * On devices where the L1 cache and shared memory use the same hardware + * resources, this sets through \p config the preferred cache configuration for + * the current context. This is only a preference. The driver will use + * the requested configuration if possible, but it is free to choose a different + * configuration if required to execute the function. Any function preference + * set via ::cuFuncSetCacheConfig() or ::cuKernelSetCacheConfig() will be preferred over this context-wide + * setting. Setting the context-wide cache configuration to + * ::CU_FUNC_CACHE_PREFER_NONE will cause subsequent kernel launches to prefer + * to not change the cache configuration unless required to launch the kernel. + * + * This setting does nothing on devices where the size of the L1 cache and + * shared memory are fixed. + * + * Launching a kernel with a different preference than the most recent + * preference setting may insert a device-side synchronization point. + * + * The supported cache configurations are: + * - ::CU_FUNC_CACHE_PREFER_NONE: no preference for shared memory or L1 (default) + * - ::CU_FUNC_CACHE_PREFER_SHARED: prefer larger shared memory and smaller L1 cache + * - ::CU_FUNC_CACHE_PREFER_L1: prefer larger L1 cache and smaller shared memory + * - ::CU_FUNC_CACHE_PREFER_EQUAL: prefer equal sized L1 cache and shared memory + * + * \param config - Requested cache configuration + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize, + * ::cuFuncSetCacheConfig, + * ::cudaDeviceSetCacheConfig, + * ::cuKernelSetCacheConfig + */ +CUresult CUDAAPI cuCtxSetCacheConfig(CUfunc_cache config); + +/** + * \brief Returns the current shared memory configuration for the current context. + * + * This function will return in \p pConfig the current size of shared memory banks + * in the current context. On devices with configurable shared memory banks, + * ::cuCtxSetSharedMemConfig can be used to change this setting, so that all + * subsequent kernel launches will by default use the new bank size. When + * ::cuCtxGetSharedMemConfig is called on devices without configurable shared + * memory, it will return the fixed bank size of the hardware. + * + * The returned bank configurations can be either: + * - ::CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE: shared memory bank width is + * four bytes. + * - ::CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE: shared memory bank width will + * eight bytes. + * + * \param pConfig - returned shared memory configuration + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize, + * ::cuCtxGetSharedMemConfig, + * ::cuFuncSetCacheConfig, + * ::cudaDeviceGetSharedMemConfig + */ +CUresult CUDAAPI cuCtxGetSharedMemConfig(CUsharedconfig *pConfig); + +/** + * \brief Sets the shared memory configuration for the current context. + * + * On devices with configurable shared memory banks, this function will set + * the context's shared memory bank size which is used for subsequent kernel + * launches. + * + * Changed the shared memory configuration between launches may insert a device + * side synchronization point between those launches. + * + * Changing the shared memory bank size will not increase shared memory usage + * or affect occupancy of kernels, but may have major effects on performance. + * Larger bank sizes will allow for greater potential bandwidth to shared memory, + * but will change what kinds of accesses to shared memory will result in bank + * conflicts. + * + * This function will do nothing on devices with fixed shared memory bank size. + * + * The supported bank configurations are: + * - ::CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE: set bank width to the default initial + * setting (currently, four bytes). + * - ::CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE: set shared memory bank width to + * be natively four bytes. + * - ::CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE: set shared memory bank width to + * be natively eight bytes. + * + * \param config - requested shared memory configuration + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize, + * ::cuCtxGetSharedMemConfig, + * ::cuFuncSetCacheConfig, + * ::cudaDeviceSetSharedMemConfig + */ +CUresult CUDAAPI cuCtxSetSharedMemConfig(CUsharedconfig config); + +/** + * \brief Gets the context's API version. + * + * Returns a version number in \p version corresponding to the capabilities of + * the context (e.g. 3010 or 3020), which library developers can use to direct + * callers to a specific API version. If \p ctx is NULL, returns the API version + * used to create the currently bound context. + * + * Note that new API versions are only introduced when context capabilities are + * changed that break binary compatibility, so the API version and driver version + * may be different. For example, it is valid for the API version to be 3020 while + * the driver version is 4020. + * + * \param ctx - Context to check + * \param version - Pointer to version + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize + */ +CUresult CUDAAPI cuCtxGetApiVersion(CUcontext ctx, unsigned int *version); + +/** + * \brief Returns numerical values that correspond to the least and + * greatest stream priorities. + * + * Returns in \p *leastPriority and \p *greatestPriority the numerical values that correspond + * to the least and greatest stream priorities respectively. Stream priorities + * follow a convention where lower numbers imply greater priorities. The range of + * meaningful stream priorities is given by [\p *greatestPriority, \p *leastPriority]. + * If the user attempts to create a stream with a priority value that is + * outside the meaningful range as specified by this API, the priority is + * automatically clamped down or up to either \p *leastPriority or \p *greatestPriority + * respectively. See ::cuStreamCreateWithPriority for details on creating a + * priority stream. + * A NULL may be passed in for \p *leastPriority or \p *greatestPriority if the value + * is not desired. + * + * This function will return '0' in both \p *leastPriority and \p *greatestPriority if + * the current context's device does not support stream priorities + * (see ::cuDeviceGetAttribute). + * + * \param leastPriority - Pointer to an int in which the numerical value for least + * stream priority is returned + * \param greatestPriority - Pointer to an int in which the numerical value for greatest + * stream priority is returned + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \notefnerr + * + * \sa ::cuStreamCreateWithPriority, + * ::cuStreamGetPriority, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize, + * ::cudaDeviceGetStreamPriorityRange + */ +CUresult CUDAAPI cuCtxGetStreamPriorityRange(int *leastPriority, int *greatestPriority); + +/** + * \brief Resets all persisting lines in cache to normal status. + * + * ::cuCtxResetPersistingL2Cache Resets all persisting lines in cache to normal + * status. Takes effect on function return. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_SUPPORTED + * \notefnerr + * + * \sa + * ::CUaccessPolicyWindow + */ +CUresult CUDAAPI cuCtxResetPersistingL2Cache(void); + +/** + * \brief Returns the execution affinity setting for the current context. + * + * Returns in \p *pExecAffinity the current value of \p type. The supported + * ::CUexecAffinityType values are: + * - ::CU_EXEC_AFFINITY_TYPE_SM_COUNT: number of SMs the context is limited to use. + * + * \param type - Execution affinity type to query + * \param pExecAffinity - Returned execution affinity + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY + * \notefnerr + * + * \sa + * ::CUexecAffinityParam + */ +CUresult CUDAAPI cuCtxGetExecAffinity(CUexecAffinityParam *pExecAffinity, CUexecAffinityType type); + + +/** @} */ /* END CUDA_CTX */ + +/** + * \defgroup CUDA_CTX_DEPRECATED Context Management [DEPRECATED] + * + * ___MANBRIEF___ deprecated context management functions of the low-level CUDA + * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the deprecated context management functions of the low-level + * CUDA driver application programming interface. + * + * @{ + */ + +/** + * \brief Increment a context's usage-count + * + * \deprecated + * + * Note that this function is deprecated and should not be used. + * + * Increments the usage count of the context and passes back a context handle + * in \p *pctx that must be passed to ::cuCtxDetach() when the application is + * done with the context. ::cuCtxAttach() fails if there is no context current + * to the thread. + * + * Currently, the \p flags parameter must be 0. + * + * \param pctx - Returned context handle of the current context + * \param flags - Context attach flags (must be 0) + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxDetach, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuCtxAttach(CUcontext *pctx, unsigned int flags); + +/** + * \brief Decrement a context's usage-count + * + * \deprecated + * + * Note that this function is deprecated and should not be used. + * + * Decrements the usage count of the context \p ctx, and destroys the context + * if the usage count goes to 0. The context must be a handle that was passed + * back by ::cuCtxCreate() or ::cuCtxAttach(), and must be current to the + * calling thread. + * + * \param ctx - Context to destroy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuCtxDetach(CUcontext ctx); + +/** @} */ /* END CUDA_CTX_DEPRECATED */ + + +/** + * \defgroup CUDA_MODULE Module Management + * + * ___MANBRIEF___ module management functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the module management functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** + * \brief Loads a compute module + * + * Takes a filename \p fname and loads the corresponding module \p module into + * the current context. The CUDA driver API does not attempt to lazily + * allocate the resources needed by a module; if the memory for functions and + * data (constant and global) needed by the module cannot be allocated, + * ::cuModuleLoad() fails. The file should be a \e cubin file as output by + * \b nvcc, or a \e PTX file either as output by \b nvcc or handwritten, or + * a \e fatbin file as output by \b nvcc from toolchain 4.0 or later. + * + * \param module - Returned module + * \param fname - Filename of module to load + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_PTX, + * ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, + * ::CUDA_ERROR_NOT_FOUND, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_FILE_NOT_FOUND, + * ::CUDA_ERROR_NO_BINARY_FOR_GPU, + * ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + * ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND + * \notefnerr + * + * \sa ::cuModuleGetFunction, + * ::cuModuleGetGlobal, + * ::cuModuleGetTexRef, + * ::cuModuleLoadData, + * ::cuModuleLoadDataEx, + * ::cuModuleLoadFatBinary, + * ::cuModuleUnload + */ +CUresult CUDAAPI cuModuleLoad(CUmodule *module, const char *fname); + +/** + * \brief Load a module's data + * + * Takes a pointer \p image and loads the corresponding module \p module into + * the current context. The pointer may be obtained by mapping a \e cubin or + * \e PTX or \e fatbin file, passing a \e cubin or \e PTX or \e fatbin file + * as a NULL-terminated text string, or incorporating a \e cubin or \e fatbin + * object into the executable resources and using operating system calls such + * as Windows \c FindResource() to obtain the pointer. + * + * \param module - Returned module + * \param image - Module data to load + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_PTX, + * ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_NO_BINARY_FOR_GPU, + * ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + * ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND + * \notefnerr + * + * \sa ::cuModuleGetFunction, + * ::cuModuleGetGlobal, + * ::cuModuleGetTexRef, + * ::cuModuleLoad, + * ::cuModuleLoadDataEx, + * ::cuModuleLoadFatBinary, + * ::cuModuleUnload + */ +CUresult CUDAAPI cuModuleLoadData(CUmodule *module, const void *image); + +/** + * \brief Load a module's data with options + * + * Takes a pointer \p image and loads the corresponding module \p module into + * the current context. The pointer may be obtained by mapping a \e cubin or + * \e PTX or \e fatbin file, passing a \e cubin or \e PTX or \e fatbin file + * as a NULL-terminated text string, or incorporating a \e cubin or \e fatbin + * object into the executable resources and using operating system calls such + * as Windows \c FindResource() to obtain the pointer. Options are passed as + * an array via \p options and any corresponding parameters are passed in + * \p optionValues. The number of total options is supplied via \p numOptions. + * Any outputs will be returned via \p optionValues. + * + * \param module - Returned module + * \param image - Module data to load + * \param numOptions - Number of options + * \param options - Options for JIT + * \param optionValues - Option values for JIT + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_PTX, + * ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_NO_BINARY_FOR_GPU, + * ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + * ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND + * \notefnerr + * + * \sa ::cuModuleGetFunction, + * ::cuModuleGetGlobal, + * ::cuModuleGetTexRef, + * ::cuModuleLoad, + * ::cuModuleLoadData, + * ::cuModuleLoadFatBinary, + * ::cuModuleUnload + */ +CUresult CUDAAPI cuModuleLoadDataEx(CUmodule *module, const void *image, unsigned int numOptions, CUjit_option *options, void **optionValues); + +/** + * \brief Load a module's data + * + * Takes a pointer \p fatCubin and loads the corresponding module \p module + * into the current context. The pointer represents a fat binary object, + * which is a collection of different \e cubin and/or \e PTX files, all + * representing the same device code, but compiled and optimized for different + * architectures. + * + * Prior to CUDA 4.0, there was no documented API for constructing and using + * fat binary objects by programmers. Starting with CUDA 4.0, fat binary + * objects can be constructed by providing the -fatbin option to \b nvcc. + * More information can be found in the \b nvcc document. + * + * \param module - Returned module + * \param fatCubin - Fat binary to load + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_PTX, + * ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, + * ::CUDA_ERROR_NOT_FOUND, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_NO_BINARY_FOR_GPU, + * ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + * ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND + * \notefnerr + * + * \sa ::cuModuleGetFunction, + * ::cuModuleGetGlobal, + * ::cuModuleGetTexRef, + * ::cuModuleLoad, + * ::cuModuleLoadData, + * ::cuModuleLoadDataEx, + * ::cuModuleUnload + */ +CUresult CUDAAPI cuModuleLoadFatBinary(CUmodule *module, const void *fatCubin); + +/** + * \brief Unloads a module + * + * Unloads a module \p hmod from the current context. + * + * \param hmod - Module to unload + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_destroy_ub + * + * \sa ::cuModuleGetFunction, + * ::cuModuleGetGlobal, + * ::cuModuleGetTexRef, + * ::cuModuleLoad, + * ::cuModuleLoadData, + * ::cuModuleLoadDataEx, + * ::cuModuleLoadFatBinary + */ +CUresult CUDAAPI cuModuleUnload(CUmodule hmod); + +/** + * CUDA Lazy Loading status + */ +typedef enum CUmoduleLoadingMode_enum { + CU_MODULE_EAGER_LOADING = 0x1, /**< Lazy Kernel Loading is not enabled */ + CU_MODULE_LAZY_LOADING = 0x2, /**< Lazy Kernel Loading is enabled */ +} CUmoduleLoadingMode; + +/** + * \brief Query lazy loading mode + * + * Returns lazy loading mode + * Module loading mode is controlled by CUDA_MODULE_LOADING env variable + * + * \param mode - Returns the lazy loading mode + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \notefnerr + * + * \sa + * ::cuModuleLoad, + */ +CUresult CUDAAPI cuModuleGetLoadingMode(CUmoduleLoadingMode *mode); + +/** + * \brief Returns a function handle + * + * Returns in \p *hfunc the handle of the function of name \p name located in + * module \p hmod. If no function of that name exists, ::cuModuleGetFunction() + * returns ::CUDA_ERROR_NOT_FOUND. + * + * \param hfunc - Returned function handle + * \param hmod - Module to retrieve function from + * \param name - Name of function to retrieve + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_FOUND + * \notefnerr + * + * \sa ::cuModuleGetGlobal, + * ::cuModuleGetTexRef, + * ::cuModuleLoad, + * ::cuModuleLoadData, + * ::cuModuleLoadDataEx, + * ::cuModuleLoadFatBinary, + * ::cuModuleUnload + */ +CUresult CUDAAPI cuModuleGetFunction(CUfunction *hfunc, CUmodule hmod, const char *name); + +/** + * \brief Returns a global pointer from a module + * + * Returns in \p *dptr and \p *bytes the base pointer and size of the + * global of name \p name located in module \p hmod. If no variable of that name + * exists, ::cuModuleGetGlobal() returns ::CUDA_ERROR_NOT_FOUND. + * One of the parameters \p dptr or \p bytes (not both) can be NULL in which + * case it is ignored. + * + * \param dptr - Returned global device pointer + * \param bytes - Returned global size in bytes + * \param hmod - Module to retrieve global from + * \param name - Name of global to retrieve + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_FOUND + * \notefnerr + * + * \sa ::cuModuleGetFunction, + * ::cuModuleGetTexRef, + * ::cuModuleLoad, + * ::cuModuleLoadData, + * ::cuModuleLoadDataEx, + * ::cuModuleLoadFatBinary, + * ::cuModuleUnload, + * ::cudaGetSymbolAddress, + * ::cudaGetSymbolSize + */ +CUresult CUDAAPI cuModuleGetGlobal(CUdeviceptr *dptr, size_t *bytes, CUmodule hmod, const char *name); + +/** + * \brief Creates a pending JIT linker invocation. + * + * If the call is successful, the caller owns the returned CUlinkState, which + * should eventually be destroyed with ::cuLinkDestroy. The + * device code machine size (32 or 64 bit) will match the calling application. + * + * Both linker and compiler options may be specified. Compiler options will + * be applied to inputs to this linker action which must be compiled from PTX. + * The options ::CU_JIT_WALL_TIME, + * ::CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES, and ::CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES + * will accumulate data until the CUlinkState is destroyed. + * + * \p optionValues must remain valid for the life of the CUlinkState if output + * options are used. No other references to inputs are maintained after this + * call returns. + * + * \note For LTO-IR input, only LTO-IR compiled with toolkits prior to CUDA 12.0 will be accepted + * + * \param numOptions Size of options arrays + * \param options Array of linker and compiler options + * \param optionValues Array of option values, each cast to void * + * \param stateOut On success, this will contain a CUlinkState to specify + * and complete this action + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND + * \notefnerr + * + * \sa ::cuLinkAddData, + * ::cuLinkAddFile, + * ::cuLinkComplete, + * ::cuLinkDestroy + */ +CUresult CUDAAPI +cuLinkCreate(unsigned int numOptions, CUjit_option *options, void **optionValues, CUlinkState *stateOut); + +/** + * \brief Add an input to a pending linker invocation + * + * Ownership of \p data is retained by the caller. No reference is retained to any + * inputs after this call returns. + * + * This method accepts only compiler options, which are used if the data must + * be compiled from PTX, and does not accept any of + * ::CU_JIT_WALL_TIME, ::CU_JIT_INFO_LOG_BUFFER, ::CU_JIT_ERROR_LOG_BUFFER, + * ::CU_JIT_TARGET_FROM_CUCONTEXT, or ::CU_JIT_TARGET. + * + * \note For LTO-IR input, only LTO-IR compiled with toolkits prior to CUDA 12.0 will be accepted + * + * \param state A pending linker action. + * \param type The type of the input data. + * \param data The input data. PTX must be NULL-terminated. + * \param size The length of the input data. + * \param name An optional name for this input in log messages. + * \param numOptions Size of options. + * \param options Options to be applied only for this input (overrides options from ::cuLinkCreate). + * \param optionValues Array of option values, each cast to void *. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_IMAGE, + * ::CUDA_ERROR_INVALID_PTX, + * ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_NO_BINARY_FOR_GPU + * + * \sa ::cuLinkCreate, + * ::cuLinkAddFile, + * ::cuLinkComplete, + * ::cuLinkDestroy + */ +CUresult CUDAAPI +cuLinkAddData(CUlinkState state, CUjitInputType type, void *data, size_t size, const char *name, + unsigned int numOptions, CUjit_option *options, void **optionValues); + +/** + * \brief Add a file input to a pending linker invocation + * + * No reference is retained to any inputs after this call returns. + * + * This method accepts only compiler options, which are used if the input + * must be compiled from PTX, and does not accept any of + * ::CU_JIT_WALL_TIME, ::CU_JIT_INFO_LOG_BUFFER, ::CU_JIT_ERROR_LOG_BUFFER, + * ::CU_JIT_TARGET_FROM_CUCONTEXT, or ::CU_JIT_TARGET. + * + * This method is equivalent to invoking ::cuLinkAddData on the contents + * of the file. + * + * \note For LTO-IR input, only LTO-IR compiled with toolkits prior to CUDA 12.0 will be accepted + * + * \param state A pending linker action + * \param type The type of the input data + * \param path Path to the input file + * \param numOptions Size of options + * \param options Options to be applied only for this input (overrides options from ::cuLinkCreate) + * \param optionValues Array of option values, each cast to void * + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_FILE_NOT_FOUND + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_IMAGE, + * ::CUDA_ERROR_INVALID_PTX, + * ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_NO_BINARY_FOR_GPU + * + * \sa ::cuLinkCreate, + * ::cuLinkAddData, + * ::cuLinkComplete, + * ::cuLinkDestroy + */ +CUresult CUDAAPI +cuLinkAddFile(CUlinkState state, CUjitInputType type, const char *path, + unsigned int numOptions, CUjit_option *options, void **optionValues); + +/** + * \brief Complete a pending linker invocation + * + * Completes the pending linker action and returns the cubin image for the linked + * device code, which can be used with ::cuModuleLoadData. The cubin is owned by + * \p state, so it should be loaded before \p state is destroyed via ::cuLinkDestroy. + * This call does not destroy \p state. + * + * \param state A pending linker invocation + * \param cubinOut On success, this will point to the output image + * \param sizeOut Optional parameter to receive the size of the generated image + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * + * \sa ::cuLinkCreate, + * ::cuLinkAddData, + * ::cuLinkAddFile, + * ::cuLinkDestroy, + * ::cuModuleLoadData + */ +CUresult CUDAAPI +cuLinkComplete(CUlinkState state, void **cubinOut, size_t *sizeOut); + +/** + * \brief Destroys state for a JIT linker invocation. + * + * \param state State object for the linker invocation + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE + * + * \sa ::cuLinkCreate + */ +CUresult CUDAAPI +cuLinkDestroy(CUlinkState state); + +/** @} */ /* END CUDA_MODULE */ + +/** + * \defgroup CUDA_MODULE_DEPRECATED Module Management [DEPRECATED] + * + * ___MANBRIEF___ deprecated module management functions of the low-level CUDA + * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the deprecated module management functions of the low-level + * CUDA driver application programming interface. + * + * @{ + */ + +/** + * \brief Returns a handle to a texture reference + * + * \deprecated + * + * Returns in \p *pTexRef the handle of the texture reference of name \p name + * in the module \p hmod. If no texture reference of that name exists, + * ::cuModuleGetTexRef() returns ::CUDA_ERROR_NOT_FOUND. This texture reference + * handle should not be destroyed, since it will be destroyed when the module + * is unloaded. + * + * \param pTexRef - Returned texture reference + * \param hmod - Module to retrieve texture reference from + * \param name - Name of texture reference to retrieve + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_FOUND + * \notefnerr + * + * \sa + * ::cuModuleGetFunction, + * ::cuModuleGetGlobal, + * ::cuModuleGetSurfRef, + * ::cuModuleLoad, + * ::cuModuleLoadData, + * ::cuModuleLoadDataEx, + * ::cuModuleLoadFatBinary, + * ::cuModuleUnload + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuModuleGetTexRef(CUtexref *pTexRef, CUmodule hmod, const char *name); + +/** + * \brief Returns a handle to a surface reference + * + * \deprecated + * + * Returns in \p *pSurfRef the handle of the surface reference of name \p name + * in the module \p hmod. If no surface reference of that name exists, + * ::cuModuleGetSurfRef() returns ::CUDA_ERROR_NOT_FOUND. + * + * \param pSurfRef - Returned surface reference + * \param hmod - Module to retrieve surface reference from + * \param name - Name of surface reference to retrieve + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_FOUND + * \notefnerr + * + * \sa + * ::cuModuleGetFunction, + * ::cuModuleGetGlobal, + * ::cuModuleGetTexRef, + * ::cuModuleLoad, + * ::cuModuleLoadData, + * ::cuModuleLoadDataEx, + * ::cuModuleLoadFatBinary, + * ::cuModuleUnload + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuModuleGetSurfRef(CUsurfref *pSurfRef, CUmodule hmod, const char *name); + +/** @} */ /* END CUDA_MODULE_DEPRECATED */ + +/** + * \defgroup CUDA_LIBRARY Library Management + * + * ___MANBRIEF___ library management functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the library management functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** + * \brief Load a library with specified code and options + * + * Takes a pointer \p code and loads the corresponding library \p library into + * all contexts existent at the time of the call and future contexts at the time + * of creation until the library is unloaded with ::cuLibraryUnload(). + * + * The pointer may be obtained by mapping a \e cubin or \e PTX or \e fatbin file, + * passing a \e cubin or \e PTX or \e fatbin file as a NULL-terminated text string, or + * incorporating a \e cubin or \e fatbin object into the executable resources and + * using operating system calls such as Windows \c FindResource() to obtain the pointer. + * + * Options are passed as an array via \p jitOptions and any corresponding parameters are passed in + * \p jitOptionsValues. The number of total JIT options is supplied via \p numJitOptions. + * Any outputs will be returned via \p jitOptionsValues. + * + * Library load options are passed as an array via \p libraryOptions and any corresponding parameters are passed in + * \p libraryOptionValues. The number of total library load options is supplied via \p numLibraryOptions. + * + * \param library - Returned library + * \param code - Code to load + * \param jitOptions - Options for JIT + * \param jitOptionsValues - Option values for JIT + * \param numJitOptions - Number of options + * \param libraryOptions - Options for loading + * \param libraryOptionValues - Option values for loading + * \param numLibraryOptions - Number of options for loading + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_PTX, + * ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_NO_BINARY_FOR_GPU, + * ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + * ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND + * + * \sa ::cuLibraryLoadFromFile, + * ::cuLibraryUnload, + * ::cuModuleLoad, + * ::cuModuleLoadData, + * ::cuModuleLoadDataEx + */ +CUresult CUDAAPI cuLibraryLoadData(CUlibrary *library, const void *code, + CUjit_option *jitOptions, void **jitOptionsValues, unsigned int numJitOptions, + CUlibraryOption *libraryOptions, void** libraryOptionValues, unsigned int numLibraryOptions); + +/** + * \brief Load a library with specified file and options + * + * Takes a filename \p fileName and loads the corresponding library \p library into + * all contexts existent at the time of the call and future contexts at the time of + * creation until the library is unloaded with ::cuLibraryUnload(). + * + * The file should be a \e cubin file as output by \b nvcc, or a \e PTX file either + * as output by \b nvcc or handwritten, or a \e fatbin file as output by \b nvcc + * from toolchain 4.0 or later. + * + * Options are passed as an array via \p jitOptions and any corresponding parameters are + * passed in \p jitOptionsValues. The number of total options is supplied via \p numJitOptions. + * Any outputs will be returned via \p jitOptionsValues. + * + * Library load options are passed as an array via \p libraryOptions and any corresponding parameters are passed in + * \p libraryOptionValues. The number of total library load options is supplied via \p numLibraryOptions. + * + * \param library - Returned library + * \param fileName - File to load from + * \param jitOptions - Options for JIT + * \param jitOptionsValues - Option values for JIT + * \param numJitOptions - Number of options + * \param libraryOptions - Options for loading + * \param libraryOptionValues - Option values for loading + * \param numLibraryOptions - Number of options for loading + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_PTX, + * ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_NO_BINARY_FOR_GPU, + * ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + * ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND + * + * \sa ::cuLibraryLoadData, + * ::cuLibraryUnload, + * ::cuModuleLoad, + * ::cuModuleLoadData, + * ::cuModuleLoadDataEx + */ +CUresult CUDAAPI cuLibraryLoadFromFile(CUlibrary *library, const char *fileName, + CUjit_option *jitOptions, void **jitOptionsValues, unsigned int numJitOptions, + CUlibraryOption *libraryOptions, void **libraryOptionValues, unsigned int numLibraryOptions); + +/** + * \brief Unloads a library + * + * Unloads the library specified with \p library + * + * \param library - Library to unload + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuLibraryLoadData, + * ::cuLibraryLoadFromFile, + * ::cuModuleUnload + */ +CUresult CUDAAPI cuLibraryUnload(CUlibrary library); + +/** + * \brief Returns a kernel handle + * + * Returns in \p pKernel the handle of the kernel with name \p name located in library \p library. + * If kernel handle is not found, the call returns ::CUDA_ERROR_NOT_FOUND. + * + * \param pKernel - Returned kernel handle + * \param library - Library to retrieve kernel from + * \param name - Name of kernel to retrieve + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_FOUND, + * + * \sa ::cuLibraryLoadData, + * ::cuLibraryLoadFromFile, + * ::cuLibraryUnload, + * ::cuKernelGetFunction, + * ::cuLibraryGetModule, + * ::cuModuleGetFunction + */ +CUresult CUDAAPI cuLibraryGetKernel(CUkernel *pKernel, CUlibrary library, const char *name); + +/** + * \brief Returns a module handle + * + * Returns in \p pMod the module handle associated with the current context located in + * library \p library. If module handle is not found, the call returns ::CUDA_ERROR_NOT_FOUND. + * + * \param pMod - Returned module handle + * \param library - Library to retrieve module from + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_FOUND, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_CONTEXT_IS_DESTROYED + * + * \sa ::cuLibraryLoadData, + * ::cuLibraryLoadFromFile, + * ::cuLibraryUnload, + * ::cuModuleGetFunction + */ +CUresult CUDAAPI cuLibraryGetModule(CUmodule *pMod, CUlibrary library); + +/** + * \brief Returns a function handle + * + * Returns in \p pFunc the handle of the function for the requested kernel \p kernel and + * the current context. If function handle is not found, the call returns ::CUDA_ERROR_NOT_FOUND. + * + * \param pFunc - Returned function handle + * \param kernel - Kernel to retrieve function for the requested context + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_FOUND, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_CONTEXT_IS_DESTROYED + * + * \sa ::cuLibraryLoadData, + * ::cuLibraryLoadFromFile, + * ::cuLibraryUnload, + * ::cuLibraryGetKernel, + * ::cuLibraryGetModule, + * ::cuModuleGetFunction + */ +CUresult CUDAAPI cuKernelGetFunction(CUfunction *pFunc, CUkernel kernel); + +/** + * \brief Returns a global device pointer + * + * Returns in \p *dptr and \p *bytes the base pointer and size of the global with + * name \p name for the requested library \p library and the current context. + * If no global for the requested name \p name exists, the call returns ::CUDA_ERROR_NOT_FOUND. + * One of the parameters \p dptr or \p bytes (not both) can be NULL in which + * case it is ignored. + * + * \param dptr - Returned global device pointer for the requested context + * \param bytes - Returned global size in bytes + * \param library - Library to retrieve global from + * \param name - Name of global to retrieve + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_FOUND, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_CONTEXT_IS_DESTROYED + * + * \sa ::cuLibraryLoadData, + * ::cuLibraryLoadFromFile, + * ::cuLibraryUnload, + * ::cuLibraryGetModule, + * cuModuleGetGlobal + */ +CUresult CUDAAPI cuLibraryGetGlobal(CUdeviceptr *dptr, size_t *bytes, CUlibrary library, const char *name); + +/** + * \brief Returns a pointer to managed memory + * + * Returns in \p *dptr and \p *bytes the base pointer and size of the managed memory with + * name \p name for the requested library \p library. If no managed memory with the + * requested name \p name exists, the call returns ::CUDA_ERROR_NOT_FOUND. One of the parameters + * \p dptr or \p bytes (not both) can be NULL in which case it is ignored. + * Note that managed memory for library \p library is shared across devices and is registered + * when the library is loaded into atleast one context. + * + * \note The API requires a CUDA context to be present and initialized on at least one device. + * If no context is present, the call returns ::CUDA_ERROR_NOT_FOUND. + * + * \param dptr - Returned pointer to the managed memory + * \param bytes - Returned memory size in bytes + * \param library - Library to retrieve managed memory from + * \param name - Name of managed memory to retrieve + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_FOUND, + * + * \sa ::cuLibraryLoadData, + * ::cuLibraryLoadFromFile, + * ::cuLibraryUnload, + */ +CUresult CUDAAPI cuLibraryGetManaged(CUdeviceptr *dptr, size_t *bytes, CUlibrary library, const char *name); + +/** + * \brief Returns a pointer to a unified function + * + * Returns in \p *fptr the function pointer to a unified function denoted by \p symbol. + * If no unified function with name \p symbol exists, the call returns ::CUDA_ERROR_NOT_FOUND. + * If there is no device with attribute ::CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS present in the system, + * the call may return ::CUDA_ERROR_NOT_FOUND. + * + * \param fptr - Returned pointer to a unified function + * \param library - Library to retrieve function pointer memory from + * \param symbol - Name of function pointer to retrieve + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_FOUND, + * + * \sa ::cuLibraryLoadData, + * ::cuLibraryLoadFromFile, + * ::cuLibraryUnload, + */ +CUresult CUDAAPI cuLibraryGetUnifiedFunction(void **fptr, CUlibrary library, const char *symbol); + +/** + * \brief Returns information about a kernel + * + * Returns in \p *pi the integer value of the attribute \p attrib for the kernel + * \p kernel for the requested device \p dev. The supported attributes are: + * - ::CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK: The maximum number of threads + * per block, beyond which a launch of the kernel would fail. This number + * depends on both the kernel and the requested device. + * - ::CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES: The size in bytes of + * statically-allocated shared memory per block required by this kernel. + * This does not include dynamically-allocated shared memory requested by + * the user at runtime. + * - ::CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES: The size in bytes of user-allocated + * constant memory required by this kernel. + * - ::CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES: The size in bytes of local memory + * used by each thread of this kernel. + * - ::CU_FUNC_ATTRIBUTE_NUM_REGS: The number of registers used by each thread + * of this kernel. + * - ::CU_FUNC_ATTRIBUTE_PTX_VERSION: The PTX virtual architecture version for + * which the kernel was compiled. This value is the major PTX version * 10 + * + the minor PTX version, so a PTX version 1.3 function would return the + * value 13. Note that this may return the undefined value of 0 for cubins + * compiled prior to CUDA 3.0. + * - ::CU_FUNC_ATTRIBUTE_BINARY_VERSION: The binary architecture version for + * which the kernel was compiled. This value is the major binary + * version * 10 + the minor binary version, so a binary version 1.3 function + * would return the value 13. Note that this will return a value of 10 for + * legacy cubins that do not have a properly-encoded binary architecture + * version. + * - ::CU_FUNC_CACHE_MODE_CA: The attribute to indicate whether the kernel has + * been compiled with user specified option "-Xptxas --dlcm=ca" set. + * - ::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: The maximum size in bytes of + * dynamically-allocated shared memory. + * - ::CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: Preferred shared memory-L1 + * cache split ratio in percent of total shared memory. + * - ::CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET: If this attribute is set, the + * kernel must launch with a valid cluster size specified. + * - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH: The required cluster width in + * blocks. + * - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT: The required cluster height in + * blocks. + * - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH: The required cluster depth in + * blocks. + * - ::CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED: Indicates whether + * the function can be launched with non-portable cluster size. 1 is allowed, + * 0 is disallowed. A non-portable cluster size may only function on the + * specific SKUs the program is tested on. The launch might fail if the + * program is run on a different hardware platform. CUDA API provides + * cudaOccupancyMaxActiveClusters to assist with checking whether the desired + * size can be launched on the current device. A portable cluster size is + * guaranteed to be functional on all compute capabilities higher than the + * target compute capability. The portable cluster size for sm_90 is 8 blocks + * per cluster. This value may increase for future compute capabilities. The + * specific hardware unit may support higher cluster sizes that’s not + * guaranteed to be portable. + * - ::CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE: The block + * scheduling policy of a function. The value type is CUclusterSchedulingPolicy. + * + * \note If another thread is trying to set the same attribute on the same device using + * ::cuKernelSetAttribute() simultaneously, the attribute query will give the old or new + * value depending on the interleavings chosen by the OS scheduler and memory consistency. + * + * \param pi - Returned attribute value + * \param attrib - Attribute requested + * \param kernel - Kernel to query attribute of + * \param dev - Device to query attribute of + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * + * \sa ::cuLibraryLoadData, + * ::cuLibraryLoadFromFile, + * ::cuLibraryUnload, + * ::cuKernelSetAttribute, + * ::cuLibraryGetKernel, + * ::cuLaunchKernel, + * ::cuKernelGetFunction, + * ::cuLibraryGetModule, + * ::cuModuleGetFunction, + * ::cuFuncGetAttribute + */ +CUresult CUDAAPI cuKernelGetAttribute(int *pi, CUfunction_attribute attrib, CUkernel kernel, CUdevice dev); + +/** + * \brief Sets information about a kernel + * + * This call sets the value of a specified attribute \p attrib on the kernel \p kernel + * for the requested device \p dev to an integer value specified by \p val. + * This function returns CUDA_SUCCESS if the new value of the attribute could be + * successfully set. If the set fails, this call will return an error. + * Not all attributes can have values set. Attempting to set a value on a read-only + * attribute will result in an error (CUDA_ERROR_INVALID_VALUE) + * + * Note that attributes set using ::cuFuncSetAttribute() will override the attribute + * set by this API irrespective of whether the call to ::cuFuncSetAttribute() is made + * before or after this API call. However, ::cuKernelGetAttribute() will always + * return the attribute value set by this API. + * + * Supported attributes are: + * - ::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: This is the maximum size in bytes of + * dynamically-allocated shared memory. The value should contain the requested + * maximum size of dynamically-allocated shared memory. The sum of this value and + * the function attribute ::CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES cannot exceed the + * device attribute ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN. + * The maximal size of requestable dynamic shared memory may differ by GPU + * architecture. + * - ::CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: On devices where the L1 + * cache and shared memory use the same hardware resources, this sets the shared memory + * carveout preference, in percent of the total shared memory. + * See ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR + * This is only a hint, and the driver can choose a different ratio if required to execute the function. + * - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH: The required cluster width in + * blocks. The width, height, and depth values must either all be 0 or all be + * positive. The validity of the cluster dimensions is checked at launch time. + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. + * - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT: The required cluster height in + * blocks. The width, height, and depth values must either all be 0 or all be + * positive. The validity of the cluster dimensions is checked at launch time. + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. + * - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH: The required cluster depth in + * blocks. The width, height, and depth values must either all be 0 or all be + * positive. The validity of the cluster dimensions is checked at launch time. + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. + * - ::CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE: The block + * scheduling policy of a function. The value type is CUclusterSchedulingPolicy. + * + * \note The API has stricter locking requirements in comparison to its legacy counterpart + * ::cuFuncSetAttribute() due to device-wide semantics. If multiple threads are trying to + * set the same attribute on the same device simultaneously, the attribute setting will depend + * on the interleavings chosen by the OS scheduler and memory consistency. + * + * \param attrib - Attribute requested + * \param val - Value to set + * \param kernel - Kernel to set attribute of + * \param dev - Device to set attribute of + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * + * \sa ::cuLibraryLoadData, + * ::cuLibraryLoadFromFile, + * ::cuLibraryUnload, + * ::cuKernelGetAttribute, + * ::cuLibraryGetKernel, + * ::cuLaunchKernel, + * ::cuKernelGetFunction, + * ::cuLibraryGetModule, + * ::cuModuleGetFunction, + * ::cuFuncSetAttribute + */ +CUresult CUDAAPI cuKernelSetAttribute(CUfunction_attribute attrib, int val, CUkernel kernel, CUdevice dev); + +/** + * \brief Sets the preferred cache configuration for a device kernel. + * + * On devices where the L1 cache and shared memory use the same hardware + * resources, this sets through \p config the preferred cache configuration for + * the device kernel \p kernel on the requested device \p dev. This is only a preference. + * The driver will use the requested configuration if possible, but it is free to choose a different + * configuration if required to execute \p kernel. Any context-wide preference + * set via ::cuCtxSetCacheConfig() will be overridden by this per-kernel + * setting. + * + * Note that attributes set using ::cuFuncSetCacheConfig() will override the attribute + * set by this API irrespective of whether the call to ::cuFuncSetCacheConfig() is made + * before or after this API call. + * + * This setting does nothing on devices where the size of the L1 cache and + * shared memory are fixed. + * + * Launching a kernel with a different preference than the most recent + * preference setting may insert a device-side synchronization point. + * + * + * The supported cache configurations are: + * - ::CU_FUNC_CACHE_PREFER_NONE: no preference for shared memory or L1 (default) + * - ::CU_FUNC_CACHE_PREFER_SHARED: prefer larger shared memory and smaller L1 cache + * - ::CU_FUNC_CACHE_PREFER_L1: prefer larger L1 cache and smaller shared memory + * - ::CU_FUNC_CACHE_PREFER_EQUAL: prefer equal sized L1 cache and shared memory + * + * \note The API has stricter locking requirements in comparison to its legacy counterpart + * ::cuFuncSetCacheConfig() due to device-wide semantics. If multiple threads are trying to + * set a config on the same device simultaneously, the cache config setting will depend + * on the interleavings chosen by the OS scheduler and memory consistency. + * + * \param kernel - Kernel to configure cache for + * \param config - Requested cache configuration + * \param dev - Device to set attribute of + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * + * \sa ::cuLibraryLoadData, + * ::cuLibraryLoadFromFile, + * ::cuLibraryUnload, + * ::cuLibraryGetKernel, + * ::cuKernelGetFunction, + * ::cuLibraryGetModule, + * ::cuModuleGetFunction, + * ::cuFuncSetCacheConfig, + * ::cuCtxSetCacheConfig, + * ::cuLaunchKernel + */ +CUresult CUDAAPI cuKernelSetCacheConfig(CUkernel kernel, CUfunc_cache config, CUdevice dev); + +/** @} */ /* END CUDA_LIBRARY */ + +/** + * \defgroup CUDA_MEM Memory Management + * + * ___MANBRIEF___ memory management functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the memory management functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** + * \brief Gets free and total memory + * + * Returns in \p *total the total amount of memory available to the the current context. + * Returns in \p *free the amount of memory on the device that is free according to the OS. + * CUDA is not guaranteed to be able to allocate all of the memory that the OS reports as free. + * In a multi-tenet situation, free estimate returned is prone to race condition where + * a new allocation/free done by a different process or a different thread in the same + * process between the time when free memory was estimated and reported, will result in + * deviation in free value reported and actual free memory. + * + * The integrated GPU on Tegra shares memory with CPU and other component + * of the SoC. The free and total values returned by the API excludes + * the SWAP memory space maintained by the OS on some platforms. + * The OS may move some of the memory pages into swap area as the GPU or + * CPU allocate or access memory. See Tegra app note on how to calculate + * total and free memory on Tegra. + * + * \param free - Returned free memory in bytes + * \param total - Returned total memory in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemGetInfo + */ +CUresult CUDAAPI cuMemGetInfo(size_t *free, size_t *total); + +/** + * \brief Allocates device memory + * + * Allocates \p bytesize bytes of linear memory on the device and returns in + * \p *dptr a pointer to the allocated memory. The allocated memory is suitably + * aligned for any kind of variable. The memory is not cleared. If \p bytesize + * is 0, ::cuMemAlloc() returns ::CUDA_ERROR_INVALID_VALUE. + * + * \param dptr - Returned device pointer + * \param bytesize - Requested allocation size in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMalloc + */ +CUresult CUDAAPI cuMemAlloc(CUdeviceptr *dptr, size_t bytesize); + +/** + * \brief Allocates pitched device memory + * + * Allocates at least \p WidthInBytes * \p Height bytes of linear memory on + * the device and returns in \p *dptr a pointer to the allocated memory. The + * function may pad the allocation to ensure that corresponding pointers in + * any given row will continue to meet the alignment requirements for + * coalescing as the address is updated from row to row. \p ElementSizeBytes + * specifies the size of the largest reads and writes that will be performed + * on the memory range. \p ElementSizeBytes may be 4, 8 or 16 (since coalesced + * memory transactions are not possible on other data sizes). If + * \p ElementSizeBytes is smaller than the actual read/write size of a kernel, + * the kernel will run correctly, but possibly at reduced speed. The pitch + * returned in \p *pPitch by ::cuMemAllocPitch() is the width in bytes of the + * allocation. The intended usage of pitch is as a separate parameter of the + * allocation, used to compute addresses within the 2D array. Given the row + * and column of an array element of type \b T, the address is computed as: + * \code + T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column; + * \endcode + * + * The pitch returned by ::cuMemAllocPitch() is guaranteed to work with + * ::cuMemcpy2D() under all circumstances. For allocations of 2D arrays, it is + * recommended that programmers consider performing pitch allocations using + * ::cuMemAllocPitch(). Due to alignment restrictions in the hardware, this is + * especially true if the application will be performing 2D memory copies + * between different regions of device memory (whether linear memory or CUDA + * arrays). + * + * The byte alignment of the pitch returned by ::cuMemAllocPitch() is guaranteed + * to match or exceed the alignment requirement for texture binding with + * ::cuTexRefSetAddress2D(). + * + * \param dptr - Returned device pointer + * \param pPitch - Returned pitch of allocation in bytes + * \param WidthInBytes - Requested allocation width in bytes + * \param Height - Requested allocation height in rows + * \param ElementSizeBytes - Size of largest reads/writes for range + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMallocPitch + */ +CUresult CUDAAPI cuMemAllocPitch(CUdeviceptr *dptr, size_t *pPitch, size_t WidthInBytes, size_t Height, unsigned int ElementSizeBytes); + +/** + * \brief Frees device memory + * + * Frees the memory space pointed to by \p dptr, which must have been returned + * by a previous call to one of the following memory allocation APIs - ::cuMemAlloc(), + * ::cuMemAllocPitch(), ::cuMemAllocManaged(), ::cuMemAllocAsync(), ::cuMemAllocFromPoolAsync() + * + * Note - This API will not perform any implict synchronization when the pointer was allocated with + * ::cuMemAllocAsync or ::cuMemAllocFromPoolAsync. Callers must ensure that all accesses to the + * pointer have completed before invoking ::cuMemFree. For best performance and memory reuse, users + * should use ::cuMemFreeAsync to free memory allocated via the stream ordered memory allocator. + * + * \param dptr - Pointer to memory to free + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemAllocManaged, ::cuMemAllocAsync, ::cuMemAllocFromPoolAsync, + * ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, ::cuMemcpy3D, ::cuMemcpy3DAsync, + * ::cuMemcpyAtoA, ::cuMemcpyAtoD, ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, + * ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, + * ::cuMemcpyHtoAAsync, ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, ::cuMemFreeAsync, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaFree + */ +CUresult CUDAAPI cuMemFree(CUdeviceptr dptr); + +/** + * \brief Get information on memory allocations + * + * Returns the base address in \p *pbase and size in \p *psize of the + * allocation by ::cuMemAlloc() or ::cuMemAllocPitch() that contains the input + * pointer \p dptr. Both parameters \p pbase and \p psize are optional. If one + * of them is NULL, it is ignored. + * + * \param pbase - Returned base address + * \param psize - Returned size of device memory allocation + * \param dptr - Device pointer to query + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_NOT_FOUND, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32 + */ +CUresult CUDAAPI cuMemGetAddressRange(CUdeviceptr *pbase, size_t *psize, CUdeviceptr dptr); + +/** + * \brief Allocates page-locked host memory + * + * Allocates \p bytesize bytes of host memory that is page-locked and + * accessible to the device. The driver tracks the virtual memory ranges + * allocated with this function and automatically accelerates calls to + * functions such as ::cuMemcpy(). Since the memory can be accessed directly by + * the device, it can be read or written with much higher bandwidth than + * pageable memory obtained with functions such as ::malloc(). Allocating + * excessive amounts of memory with ::cuMemAllocHost() may degrade system + * performance, since it reduces the amount of memory available to the system + * for paging. As a result, this function is best used sparingly to allocate + * staging areas for data exchange between host and device. + * + * Note all host memory allocated using ::cuMemHostAlloc() will automatically + * be immediately accessible to all contexts on all devices which support unified + * addressing (as may be queried using ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING). + * The device pointer that may be used to access this host memory from those + * contexts is always equal to the returned host pointer \p *pp. + * See \ref CUDA_UNIFIED for additional details. + * + * \param pp - Returned host pointer to page-locked memory + * \param bytesize - Requested allocation size in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMallocHost + */ +CUresult CUDAAPI cuMemAllocHost(void **pp, size_t bytesize); + +/** + * \brief Frees page-locked host memory + * + * Frees the memory space pointed to by \p p, which must have been returned by + * a previous call to ::cuMemAllocHost(). + * + * \param p - Pointer to memory to free + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaFreeHost + */ +CUresult CUDAAPI cuMemFreeHost(void *p); + +/** + * \brief Allocates page-locked host memory + * + * Allocates \p bytesize bytes of host memory that is page-locked and accessible + * to the device. The driver tracks the virtual memory ranges allocated with + * this function and automatically accelerates calls to functions such as + * ::cuMemcpyHtoD(). Since the memory can be accessed directly by the device, + * it can be read or written with much higher bandwidth than pageable memory + * obtained with functions such as ::malloc(). Allocating excessive amounts of + * pinned memory may degrade system performance, since it reduces the amount + * of memory available to the system for paging. As a result, this function is + * best used sparingly to allocate staging areas for data exchange between + * host and device. + * + * The \p Flags parameter enables different options to be specified that + * affect the allocation, as follows. + * + * - ::CU_MEMHOSTALLOC_PORTABLE: The memory returned by this call will be + * considered as pinned memory by all CUDA contexts, not just the one that + * performed the allocation. + * + * - ::CU_MEMHOSTALLOC_DEVICEMAP: Maps the allocation into the CUDA address + * space. The device pointer to the memory may be obtained by calling + * ::cuMemHostGetDevicePointer(). + * + * - ::CU_MEMHOSTALLOC_WRITECOMBINED: Allocates the memory as write-combined + * (WC). WC memory can be transferred across the PCI Express bus more + * quickly on some system configurations, but cannot be read efficiently by + * most CPUs. WC memory is a good option for buffers that will be written by + * the CPU and read by the GPU via mapped pinned memory or host->device + * transfers. + * + * All of these flags are orthogonal to one another: a developer may allocate + * memory that is portable, mapped and/or write-combined with no restrictions. + * + * The ::CU_MEMHOSTALLOC_DEVICEMAP flag may be specified on CUDA contexts for + * devices that do not support mapped pinned memory. The failure is deferred + * to ::cuMemHostGetDevicePointer() because the memory may be mapped into + * other CUDA contexts via the ::CU_MEMHOSTALLOC_PORTABLE flag. + * + * The memory allocated by this function must be freed with ::cuMemFreeHost(). + * + * Note all host memory allocated using ::cuMemHostAlloc() will automatically + * be immediately accessible to all contexts on all devices which support unified + * addressing (as may be queried using ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING). + * Unless the flag ::CU_MEMHOSTALLOC_WRITECOMBINED is specified, the device pointer + * that may be used to access this host memory from those contexts is always equal + * to the returned host pointer \p *pp. If the flag ::CU_MEMHOSTALLOC_WRITECOMBINED + * is specified, then the function ::cuMemHostGetDevicePointer() must be used + * to query the device pointer, even if the context supports unified addressing. + * See \ref CUDA_UNIFIED for additional details. + * + * \param pp - Returned host pointer to page-locked memory + * \param bytesize - Requested allocation size in bytes + * \param Flags - Flags for allocation request + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaHostAlloc + */ +CUresult CUDAAPI cuMemHostAlloc(void **pp, size_t bytesize, unsigned int Flags); + +/** + * \brief Passes back device pointer of mapped pinned memory + * + * Passes back the device pointer \p pdptr corresponding to the mapped, pinned + * host buffer \p p allocated by ::cuMemHostAlloc. + * + * ::cuMemHostGetDevicePointer() will fail if the ::CU_MEMHOSTALLOC_DEVICEMAP + * flag was not specified at the time the memory was allocated, or if the + * function is called on a GPU that does not support mapped pinned memory. + * + * For devices that have a non-zero value for the device attribute + * ::CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM, the memory + * can also be accessed from the device using the host pointer \p p. + * The device pointer returned by ::cuMemHostGetDevicePointer() may or may not + * match the original host pointer \p p and depends on the devices visible to the + * application. If all devices visible to the application have a non-zero value for the + * device attribute, the device pointer returned by ::cuMemHostGetDevicePointer() + * will match the original pointer \p p. If any device visible to the application + * has a zero value for the device attribute, the device pointer returned by + * ::cuMemHostGetDevicePointer() will not match the original host pointer \p p, + * but it will be suitable for use on all devices provided Unified Virtual Addressing + * is enabled. In such systems, it is valid to access the memory using either pointer + * on devices that have a non-zero value for the device attribute. Note however that + * such devices should access the memory using only one of the two pointers and not both. + * + * \p Flags provides for future releases. For now, it must be set to 0. + * + * \param pdptr - Returned device pointer + * \param p - Host pointer + * \param Flags - Options (must be 0) + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaHostGetDevicePointer + */ +CUresult CUDAAPI cuMemHostGetDevicePointer(CUdeviceptr *pdptr, void *p, unsigned int Flags); + +/** + * \brief Passes back flags that were used for a pinned allocation + * + * Passes back the flags \p pFlags that were specified when allocating + * the pinned host buffer \p p allocated by ::cuMemHostAlloc. + * + * ::cuMemHostGetFlags() will fail if the pointer does not reside in + * an allocation performed by ::cuMemAllocHost() or ::cuMemHostAlloc(). + * + * \param pFlags - Returned flags word + * \param p - Host pointer + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa + * ::cuMemAllocHost, + * ::cuMemHostAlloc, + * ::cudaHostGetFlags + */ +CUresult CUDAAPI cuMemHostGetFlags(unsigned int *pFlags, void *p); + +/** + * \brief Allocates memory that will be automatically managed by the Unified Memory system + * + * Allocates \p bytesize bytes of managed memory on the device and returns in + * \p *dptr a pointer to the allocated memory. If the device doesn't support + * allocating managed memory, ::CUDA_ERROR_NOT_SUPPORTED is returned. Support + * for managed memory can be queried using the device attribute + * ::CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY. The allocated memory is suitably + * aligned for any kind of variable. The memory is not cleared. If \p bytesize + * is 0, ::cuMemAllocManaged returns ::CUDA_ERROR_INVALID_VALUE. The pointer + * is valid on the CPU and on all GPUs in the system that support managed memory. + * All accesses to this pointer must obey the Unified Memory programming model. + * + * \p flags specifies the default stream association for this allocation. + * \p flags must be one of ::CU_MEM_ATTACH_GLOBAL or ::CU_MEM_ATTACH_HOST. If + * ::CU_MEM_ATTACH_GLOBAL is specified, then this memory is accessible from + * any stream on any device. If ::CU_MEM_ATTACH_HOST is specified, then the + * allocation should not be accessed from devices that have a zero value for the + * device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS; an explicit call to + * ::cuStreamAttachMemAsync will be required to enable access on such devices. + * + * If the association is later changed via ::cuStreamAttachMemAsync to + * a single stream, the default association as specified during ::cuMemAllocManaged + * is restored when that stream is destroyed. For __managed__ variables, the + * default association is always ::CU_MEM_ATTACH_GLOBAL. Note that destroying a + * stream is an asynchronous operation, and as a result, the change to default + * association won't happen until all work in the stream has completed. + * + * Memory allocated with ::cuMemAllocManaged should be released with ::cuMemFree. + * + * Device memory oversubscription is possible for GPUs that have a non-zero value for the + * device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. Managed memory on + * such GPUs may be evicted from device memory to host memory at any time by the Unified + * Memory driver in order to make room for other allocations. + * + * In a multi-GPU system where all GPUs have a non-zero value for the device attribute + * ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS, managed memory may not be populated when this + * API returns and instead may be populated on access. In such systems, managed memory can + * migrate to any processor's memory at any time. The Unified Memory driver will employ heuristics to + * maintain data locality and prevent excessive page faults to the extent possible. The application + * can also guide the driver about memory usage patterns via ::cuMemAdvise. The application + * can also explicitly migrate memory to a desired processor's memory via + * ::cuMemPrefetchAsync. + * + * In a multi-GPU system where all of the GPUs have a zero value for the device attribute + * ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS and all the GPUs have peer-to-peer support + * with each other, the physical storage for managed memory is created on the GPU which is active + * at the time ::cuMemAllocManaged is called. All other GPUs will reference the data at reduced + * bandwidth via peer mappings over the PCIe bus. The Unified Memory driver does not migrate + * memory among such GPUs. + * + * In a multi-GPU system where not all GPUs have peer-to-peer support with each other and + * where the value of the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS + * is zero for at least one of those GPUs, the location chosen for physical storage of managed + * memory is system-dependent. + * - On Linux, the location chosen will be device memory as long as the current set of active + * contexts are on devices that either have peer-to-peer support with each other or have a + * non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. + * If there is an active context on a GPU that does not have a non-zero value for that device + * attribute and it does not have peer-to-peer support with the other devices that have active + * contexts on them, then the location for physical storage will be 'zero-copy' or host memory. + * Note that this means that managed memory that is located in device memory is migrated to + * host memory if a new context is created on a GPU that doesn't have a non-zero value for + * the device attribute and does not support peer-to-peer with at least one of the other devices + * that has an active context. This in turn implies that context creation may fail if there is + * insufficient host memory to migrate all managed allocations. + * - On Windows, the physical storage is always created in 'zero-copy' or host memory. + * All GPUs will reference the data at reduced bandwidth over the PCIe bus. In these + * circumstances, use of the environment variable CUDA_VISIBLE_DEVICES is recommended to + * restrict CUDA to only use those GPUs that have peer-to-peer support. + * Alternatively, users can also set CUDA_MANAGED_FORCE_DEVICE_ALLOC to a + * non-zero value to force the driver to always use device memory for physical storage. + * When this environment variable is set to a non-zero value, all contexts created in + * that process on devices that support managed memory have to be peer-to-peer compatible + * with each other. Context creation will fail if a context is created on a device that + * supports managed memory and is not peer-to-peer compatible with any of the other + * managed memory supporting devices on which contexts were previously created, even if + * those contexts have been destroyed. These environment variables are described + * in the CUDA programming guide under the "CUDA environment variables" section. + * - On ARM, managed memory is not available on discrete gpu with Drive PX-2. + * + * \param dptr - Returned device pointer + * \param bytesize - Requested allocation size in bytes + * \param flags - Must be one of ::CU_MEM_ATTACH_GLOBAL or ::CU_MEM_ATTACH_HOST + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cuDeviceGetAttribute, ::cuStreamAttachMemAsync, + * ::cudaMallocManaged + */ +CUresult CUDAAPI cuMemAllocManaged(CUdeviceptr *dptr, size_t bytesize, unsigned int flags); + +/** + * \brief Returns a handle to a compute device + * + * Returns in \p *device a device handle given a PCI bus ID string. + * + * \param dev - Returned device handle + * + * \param pciBusId - String in one of the following forms: + * [domain]:[bus]:[device].[function] + * [domain]:[bus]:[device] + * [bus]:[device].[function] + * where \p domain, \p bus, \p device, and \p function are all hexadecimal values + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGet, + * ::cuDeviceGetAttribute, + * ::cuDeviceGetPCIBusId, + * ::cudaDeviceGetByPCIBusId + */ +CUresult CUDAAPI cuDeviceGetByPCIBusId(CUdevice *dev, const char *pciBusId); + +/** + * \brief Returns a PCI Bus Id string for the device + * + * Returns an ASCII string identifying the device \p dev in the NULL-terminated + * string pointed to by \p pciBusId. \p len specifies the maximum length of the + * string that may be returned. + * + * \param pciBusId - Returned identifier string for the device in the following format + * [domain]:[bus]:[device].[function] + * where \p domain, \p bus, \p device, and \p function are all hexadecimal values. + * pciBusId should be large enough to store 13 characters including the NULL-terminator. + * + * \param len - Maximum length of string to store in \p name + * + * \param dev - Device to get identifier string for + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGet, + * ::cuDeviceGetAttribute, + * ::cuDeviceGetByPCIBusId, + * ::cudaDeviceGetPCIBusId + */ +CUresult CUDAAPI cuDeviceGetPCIBusId(char *pciBusId, int len, CUdevice dev); + +/** + * \brief Gets an interprocess handle for a previously allocated event + * + * Takes as input a previously allocated event. This event must have been + * created with the ::CU_EVENT_INTERPROCESS and ::CU_EVENT_DISABLE_TIMING + * flags set. This opaque handle may be copied into other processes and + * opened with ::cuIpcOpenEventHandle to allow efficient hardware + * synchronization between GPU work in different processes. + * + * After the event has been opened in the importing process, + * ::cuEventRecord, ::cuEventSynchronize, ::cuStreamWaitEvent and + * ::cuEventQuery may be used in either process. Performing operations + * on the imported event after the exported event has been freed + * with ::cuEventDestroy will result in undefined behavior. + * + * IPC functionality is restricted to devices with support for unified + * addressing on Linux and Windows operating systems. + * IPC functionality on Windows is restricted to GPUs in TCC mode + * Users can test their device for IPC functionality by calling + * ::cuapiDeviceGetAttribute with ::CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED + * + * \param pHandle - Pointer to a user allocated CUipcEventHandle + * in which to return the opaque event handle + * \param event - Event allocated with ::CU_EVENT_INTERPROCESS and + * ::CU_EVENT_DISABLE_TIMING flags. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_MAP_FAILED, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuEventCreate, + * ::cuEventDestroy, + * ::cuEventSynchronize, + * ::cuEventQuery, + * ::cuStreamWaitEvent, + * ::cuIpcOpenEventHandle, + * ::cuIpcGetMemHandle, + * ::cuIpcOpenMemHandle, + * ::cuIpcCloseMemHandle, + * ::cudaIpcGetEventHandle + */ +CUresult CUDAAPI cuIpcGetEventHandle(CUipcEventHandle *pHandle, CUevent event); + +/** + * \brief Opens an interprocess event handle for use in the current process + * + * Opens an interprocess event handle exported from another process with + * ::cuIpcGetEventHandle. This function returns a ::CUevent that behaves like + * a locally created event with the ::CU_EVENT_DISABLE_TIMING flag specified. + * This event must be freed with ::cuEventDestroy. + * + * Performing operations on the imported event after the exported event has + * been freed with ::cuEventDestroy will result in undefined behavior. + * + * IPC functionality is restricted to devices with support for unified + * addressing on Linux and Windows operating systems. + * IPC functionality on Windows is restricted to GPUs in TCC mode + * Users can test their device for IPC functionality by calling + * ::cuapiDeviceGetAttribute with ::CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED + * + * \param phEvent - Returns the imported event + * \param handle - Interprocess handle to open + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_MAP_FAILED, + * ::CUDA_ERROR_PEER_ACCESS_UNSUPPORTED, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuEventCreate, + * ::cuEventDestroy, + * ::cuEventSynchronize, + * ::cuEventQuery, + * ::cuStreamWaitEvent, + * ::cuIpcGetEventHandle, + * ::cuIpcGetMemHandle, + * ::cuIpcOpenMemHandle, + * ::cuIpcCloseMemHandle, + * ::cudaIpcOpenEventHandle + */ +CUresult CUDAAPI cuIpcOpenEventHandle(CUevent *phEvent, CUipcEventHandle handle); + +/** + * \brief Gets an interprocess memory handle for an existing device memory + * allocation + * + * Takes a pointer to the base of an existing device memory allocation created + * with ::cuMemAlloc and exports it for use in another process. This is a + * lightweight operation and may be called multiple times on an allocation + * without adverse effects. + * + * If a region of memory is freed with ::cuMemFree and a subsequent call + * to ::cuMemAlloc returns memory with the same device address, + * ::cuIpcGetMemHandle will return a unique handle for the + * new memory. + * + * IPC functionality is restricted to devices with support for unified + * addressing on Linux and Windows operating systems. + * IPC functionality on Windows is restricted to GPUs in TCC mode + * Users can test their device for IPC functionality by calling + * ::cuapiDeviceGetAttribute with ::CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED + * + * \param pHandle - Pointer to user allocated ::CUipcMemHandle to return + * the handle in. + * \param dptr - Base pointer to previously allocated device memory + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_MAP_FAILED, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuMemAlloc, + * ::cuMemFree, + * ::cuIpcGetEventHandle, + * ::cuIpcOpenEventHandle, + * ::cuIpcOpenMemHandle, + * ::cuIpcCloseMemHandle, + * ::cudaIpcGetMemHandle + */ +CUresult CUDAAPI cuIpcGetMemHandle(CUipcMemHandle *pHandle, CUdeviceptr dptr); + +/** + * \brief Opens an interprocess memory handle exported from another process + * and returns a device pointer usable in the local process. + * + * Maps memory exported from another process with ::cuIpcGetMemHandle into + * the current device address space. For contexts on different devices + * ::cuIpcOpenMemHandle can attempt to enable peer access between the + * devices as if the user called ::cuCtxEnablePeerAccess. This behavior is + * controlled by the ::CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS flag. + * ::cuDeviceCanAccessPeer can determine if a mapping is possible. + * + * Contexts that may open ::CUipcMemHandles are restricted in the following way. + * ::CUipcMemHandles from each ::CUdevice in a given process may only be opened + * by one ::CUcontext per ::CUdevice per other process. + * + * If the memory handle has already been opened by the current context, the + * reference count on the handle is incremented by 1 and the existing device pointer + * is returned. + * + * Memory returned from ::cuIpcOpenMemHandle must be freed with + * ::cuIpcCloseMemHandle. + * + * Calling ::cuMemFree on an exported memory region before calling + * ::cuIpcCloseMemHandle in the importing context will result in undefined + * behavior. + * + * IPC functionality is restricted to devices with support for unified + * addressing on Linux and Windows operating systems. + * IPC functionality on Windows is restricted to GPUs in TCC mode + * Users can test their device for IPC functionality by calling + * ::cuapiDeviceGetAttribute with ::CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED + * + * \param pdptr - Returned device pointer + * \param handle - ::CUipcMemHandle to open + * \param Flags - Flags for this operation. Must be specified as ::CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_MAP_FAILED, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_TOO_MANY_PEERS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \note No guarantees are made about the address returned in \p *pdptr. + * In particular, multiple processes may not receive the same address for the same \p handle. + * + * \sa + * ::cuMemAlloc, + * ::cuMemFree, + * ::cuIpcGetEventHandle, + * ::cuIpcOpenEventHandle, + * ::cuIpcGetMemHandle, + * ::cuIpcCloseMemHandle, + * ::cuCtxEnablePeerAccess, + * ::cuDeviceCanAccessPeer, + * ::cudaIpcOpenMemHandle + */ +CUresult CUDAAPI cuIpcOpenMemHandle(CUdeviceptr *pdptr, CUipcMemHandle handle, unsigned int Flags); + +/** + * \brief Attempts to close memory mapped with ::cuIpcOpenMemHandle + * + * Decrements the reference count of the memory returned by ::cuIpcOpenMemHandle by 1. + * When the reference count reaches 0, this API unmaps the memory. The original allocation + * in the exporting process as well as imported mappings in other processes + * will be unaffected. + * + * Any resources used to enable peer access will be freed if this is the + * last mapping using them. + * + * IPC functionality is restricted to devices with support for unified + * addressing on Linux and Windows operating systems. + * IPC functionality on Windows is restricted to GPUs in TCC mode + * Users can test their device for IPC functionality by calling + * ::cuapiDeviceGetAttribute with ::CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED + * + * \param dptr - Device pointer returned by ::cuIpcOpenMemHandle + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_MAP_FAILED, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE + * \sa + * ::cuMemAlloc, + * ::cuMemFree, + * ::cuIpcGetEventHandle, + * ::cuIpcOpenEventHandle, + * ::cuIpcGetMemHandle, + * ::cuIpcOpenMemHandle, + * ::cudaIpcCloseMemHandle + */ +CUresult CUDAAPI cuIpcCloseMemHandle(CUdeviceptr dptr); + +/** + * \brief Registers an existing host memory range for use by CUDA + * + * Page-locks the memory range specified by \p p and \p bytesize and maps it + * for the device(s) as specified by \p Flags. This memory range also is added + * to the same tracking mechanism as ::cuMemHostAlloc to automatically accelerate + * calls to functions such as ::cuMemcpyHtoD(). Since the memory can be accessed + * directly by the device, it can be read or written with much higher bandwidth + * than pageable memory that has not been registered. Page-locking excessive + * amounts of memory may degrade system performance, since it reduces the amount + * of memory available to the system for paging. As a result, this function is + * best used sparingly to register staging areas for data exchange between + * host and device. + * + * On systems where ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES + * is true, ::cuMemHostRegister will not page-lock the memory range specified + * by \p ptr but only populate unpopulated pages. + * + * The \p Flags parameter enables different options to be specified that + * affect the allocation, as follows. + * + * - ::CU_MEMHOSTREGISTER_PORTABLE: The memory returned by this call will be + * considered as pinned memory by all CUDA contexts, not just the one that + * performed the allocation. + * + * - ::CU_MEMHOSTREGISTER_DEVICEMAP: Maps the allocation into the CUDA address + * space. The device pointer to the memory may be obtained by calling + * ::cuMemHostGetDevicePointer(). + * + * - ::CU_MEMHOSTREGISTER_IOMEMORY: The pointer is treated as pointing to some + * I/O memory space, e.g. the PCI Express resource of a 3rd party device. + * + * - ::CU_MEMHOSTREGISTER_READ_ONLY: The pointer is treated as pointing to memory + * that is considered read-only by the device. On platforms without + * ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, this flag is + * required in order to register memory mapped to the CPU as read-only. Support + * for the use of this flag can be queried from the device attribute + * ::CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED. Using this flag with + * a current context associated with a device that does not have this attribute + * set will cause ::cuMemHostRegister to error with CUDA_ERROR_NOT_SUPPORTED. + * + * All of these flags are orthogonal to one another: a developer may page-lock + * memory that is portable or mapped with no restrictions. + * + * The ::CU_MEMHOSTREGISTER_DEVICEMAP flag may be specified on CUDA contexts for + * devices that do not support mapped pinned memory. The failure is deferred + * to ::cuMemHostGetDevicePointer() because the memory may be mapped into + * other CUDA contexts via the ::CU_MEMHOSTREGISTER_PORTABLE flag. + * + * For devices that have a non-zero value for the device attribute + * ::CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM, the memory + * can also be accessed from the device using the host pointer \p p. + * The device pointer returned by ::cuMemHostGetDevicePointer() may or may not + * match the original host pointer \p ptr and depends on the devices visible to the + * application. If all devices visible to the application have a non-zero value for the + * device attribute, the device pointer returned by ::cuMemHostGetDevicePointer() + * will match the original pointer \p ptr. If any device visible to the application + * has a zero value for the device attribute, the device pointer returned by + * ::cuMemHostGetDevicePointer() will not match the original host pointer \p ptr, + * but it will be suitable for use on all devices provided Unified Virtual Addressing + * is enabled. In such systems, it is valid to access the memory using either pointer + * on devices that have a non-zero value for the device attribute. Note however that + * such devices should access the memory using only of the two pointers and not both. + * + * The memory page-locked by this function must be unregistered with + * ::cuMemHostUnregister(). + * + * \param p - Host pointer to memory to page-lock + * \param bytesize - Size in bytes of the address range to page-lock + * \param Flags - Flags for allocation request + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED, + * ::CUDA_ERROR_NOT_PERMITTED, + * ::CUDA_ERROR_NOT_SUPPORTED + * \notefnerr + * + * \sa + * ::cuMemHostUnregister, + * ::cuMemHostGetFlags, + * ::cuMemHostGetDevicePointer, + * ::cudaHostRegister + */ +CUresult CUDAAPI cuMemHostRegister(void *p, size_t bytesize, unsigned int Flags); + +/** + * \brief Unregisters a memory range that was registered with cuMemHostRegister. + * + * Unmaps the memory range whose base address is specified by \p p, and makes + * it pageable again. + * + * The base address must be the same one specified to ::cuMemHostRegister(). + * + * \param p - Host pointer to memory to unregister + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED, + * \notefnerr + * + * \sa + * ::cuMemHostRegister, + * ::cudaHostUnregister + */ +CUresult CUDAAPI cuMemHostUnregister(void *p); + +/** + * \brief Copies memory + * + * Copies data between two pointers. + * \p dst and \p src are base pointers of the destination and source, respectively. + * \p ByteCount specifies the number of bytes to copy. + * Note that this function infers the type of the transfer (host to host, host to + * device, device to device, or device to host) from the pointer values. This + * function is only allowed in contexts which support unified addressing. + * + * \param dst - Destination unified virtual address space pointer + * \param src - Source unified virtual address space pointer + * \param ByteCount - Size of memory copy in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * \note_memcpy + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemcpy, + * ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol + */ +CUresult CUDAAPI cuMemcpy(CUdeviceptr dst, CUdeviceptr src, size_t ByteCount); + +/** + * \brief Copies device memory between two contexts + * + * Copies from device memory in one context to device memory in another + * context. \p dstDevice is the base device pointer of the destination memory + * and \p dstContext is the destination context. \p srcDevice is the base + * device pointer of the source memory and \p srcContext is the source pointer. + * \p ByteCount specifies the number of bytes to copy. + * + * \param dstDevice - Destination device pointer + * \param dstContext - Destination context + * \param srcDevice - Source device pointer + * \param srcContext - Source context + * \param ByteCount - Size of memory copy in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * + * \sa ::cuMemcpyDtoD, ::cuMemcpy3DPeer, ::cuMemcpyDtoDAsync, ::cuMemcpyPeerAsync, + * ::cuMemcpy3DPeerAsync, + * ::cudaMemcpyPeer + */ +CUresult CUDAAPI cuMemcpyPeer(CUdeviceptr dstDevice, CUcontext dstContext, CUdeviceptr srcDevice, CUcontext srcContext, size_t ByteCount); + +/** + * \brief Copies memory from Host to Device + * + * Copies from host memory to device memory. \p dstDevice and \p srcHost are + * the base addresses of the destination and source, respectively. \p ByteCount + * specifies the number of bytes to copy. + * + * \param dstDevice - Destination device pointer + * \param srcHost - Source host pointer + * \param ByteCount - Size of memory copy in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * \note_memcpy + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemcpy, + * ::cudaMemcpyToSymbol + */ +CUresult CUDAAPI cuMemcpyHtoD(CUdeviceptr dstDevice, const void *srcHost, size_t ByteCount); + +/** + * \brief Copies memory from Device to Host + * + * Copies from device to host memory. \p dstHost and \p srcDevice specify the + * base pointers of the destination and source, respectively. \p ByteCount + * specifies the number of bytes to copy. + * + * \param dstHost - Destination host pointer + * \param srcDevice - Source device pointer + * \param ByteCount - Size of memory copy in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * \note_memcpy + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemcpy, + * ::cudaMemcpyFromSymbol + */ +CUresult CUDAAPI cuMemcpyDtoH(void *dstHost, CUdeviceptr srcDevice, size_t ByteCount); + +/** + * \brief Copies memory from Device to Device + * + * Copies from device memory to device memory. \p dstDevice and \p srcDevice + * are the base pointers of the destination and source, respectively. + * \p ByteCount specifies the number of bytes to copy. + * + * \param dstDevice - Destination device pointer + * \param srcDevice - Source device pointer + * \param ByteCount - Size of memory copy in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemcpy, + * ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol + */ +CUresult CUDAAPI cuMemcpyDtoD(CUdeviceptr dstDevice, CUdeviceptr srcDevice, size_t ByteCount); + +/** + * \brief Copies memory from Device to Array + * + * Copies from device memory to a 1D CUDA array. \p dstArray and \p dstOffset + * specify the CUDA array handle and starting index of the destination data. + * \p srcDevice specifies the base pointer of the source. \p ByteCount + * specifies the number of bytes to copy. + * + * \param dstArray - Destination array + * \param dstOffset - Offset in bytes of destination array + * \param srcDevice - Source device pointer + * \param ByteCount - Size of memory copy in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemcpyToArray + */ +CUresult CUDAAPI cuMemcpyDtoA(CUarray dstArray, size_t dstOffset, CUdeviceptr srcDevice, size_t ByteCount); + +/** + * \brief Copies memory from Array to Device + * + * Copies from one 1D CUDA array to device memory. \p dstDevice specifies the + * base pointer of the destination and must be naturally aligned with the CUDA + * array elements. \p srcArray and \p srcOffset specify the CUDA array handle + * and the offset in bytes into the array where the copy is to begin. + * \p ByteCount specifies the number of bytes to copy and must be evenly + * divisible by the array element size. + * + * \param dstDevice - Destination device pointer + * \param srcArray - Source array + * \param srcOffset - Offset in bytes of source array + * \param ByteCount - Size of memory copy in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemcpyFromArray + */ +CUresult CUDAAPI cuMemcpyAtoD(CUdeviceptr dstDevice, CUarray srcArray, size_t srcOffset, size_t ByteCount); + +/** + * \brief Copies memory from Host to Array + * + * Copies from host memory to a 1D CUDA array. \p dstArray and \p dstOffset + * specify the CUDA array handle and starting offset in bytes of the destination + * data. \p pSrc specifies the base address of the source. \p ByteCount specifies + * the number of bytes to copy. + * + * \param dstArray - Destination array + * \param dstOffset - Offset in bytes of destination array + * \param srcHost - Source host pointer + * \param ByteCount - Size of memory copy in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * \note_memcpy + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemcpyToArray + */ +CUresult CUDAAPI cuMemcpyHtoA(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount); + +/** + * \brief Copies memory from Array to Host + * + * Copies from one 1D CUDA array to host memory. \p dstHost specifies the base + * pointer of the destination. \p srcArray and \p srcOffset specify the CUDA + * array handle and starting offset in bytes of the source data. + * \p ByteCount specifies the number of bytes to copy. + * + * \param dstHost - Destination device pointer + * \param srcArray - Source array + * \param srcOffset - Offset in bytes of source array + * \param ByteCount - Size of memory copy in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * \note_memcpy + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemcpyFromArray + */ +CUresult CUDAAPI cuMemcpyAtoH(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount); + +/** + * \brief Copies memory from Array to Array + * + * Copies from one 1D CUDA array to another. \p dstArray and \p srcArray + * specify the handles of the destination and source CUDA arrays for the copy, + * respectively. \p dstOffset and \p srcOffset specify the destination and + * source offsets in bytes into the CUDA arrays. \p ByteCount is the number of + * bytes to be copied. The size of the elements in the CUDA arrays need not be + * the same format, but the elements must be the same size; and count must be + * evenly divisible by that size. + * + * \param dstArray - Destination array + * \param dstOffset - Offset in bytes of destination array + * \param srcArray - Source array + * \param srcOffset - Offset in bytes of source array + * \param ByteCount - Size of memory copy in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemcpyArrayToArray + */ +CUresult CUDAAPI cuMemcpyAtoA(CUarray dstArray, size_t dstOffset, CUarray srcArray, size_t srcOffset, size_t ByteCount); + +/** + * \brief Copies memory for 2D arrays + * + * Perform a 2D memory copy according to the parameters specified in \p pCopy. + * The ::CUDA_MEMCPY2D structure is defined as: + * + * \code + typedef struct CUDA_MEMCPY2D_st { + unsigned int srcXInBytes, srcY; + CUmemorytype srcMemoryType; + const void *srcHost; + CUdeviceptr srcDevice; + CUarray srcArray; + unsigned int srcPitch; + + unsigned int dstXInBytes, dstY; + CUmemorytype dstMemoryType; + void *dstHost; + CUdeviceptr dstDevice; + CUarray dstArray; + unsigned int dstPitch; + + unsigned int WidthInBytes; + unsigned int Height; + } CUDA_MEMCPY2D; + * \endcode + * where: + * - ::srcMemoryType and ::dstMemoryType specify the type of memory of the + * source and destination, respectively; ::CUmemorytype_enum is defined as: + * + * \code + typedef enum CUmemorytype_enum { + CU_MEMORYTYPE_HOST = 0x01, + CU_MEMORYTYPE_DEVICE = 0x02, + CU_MEMORYTYPE_ARRAY = 0x03, + CU_MEMORYTYPE_UNIFIED = 0x04 + } CUmemorytype; + * \endcode + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::srcDevice and ::srcPitch + * specify the (unified virtual address space) base address of the source data + * and the bytes per row to apply. ::srcArray is ignored. + * This value may be used only if unified addressing is supported in the calling + * context. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_HOST, ::srcHost and ::srcPitch + * specify the (host) base address of the source data and the bytes per row to + * apply. ::srcArray is ignored. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_DEVICE, ::srcDevice and ::srcPitch + * specify the (device) base address of the source data and the bytes per row + * to apply. ::srcArray is ignored. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_ARRAY, ::srcArray specifies the + * handle of the source data. ::srcHost, ::srcDevice and ::srcPitch are + * ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_HOST, ::dstHost and ::dstPitch + * specify the (host) base address of the destination data and the bytes per + * row to apply. ::dstArray is ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::dstDevice and ::dstPitch + * specify the (unified virtual address space) base address of the source data + * and the bytes per row to apply. ::dstArray is ignored. + * This value may be used only if unified addressing is supported in the calling + * context. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_DEVICE, ::dstDevice and ::dstPitch + * specify the (device) base address of the destination data and the bytes per + * row to apply. ::dstArray is ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_ARRAY, ::dstArray specifies the + * handle of the destination data. ::dstHost, ::dstDevice and ::dstPitch are + * ignored. + * + * - ::srcXInBytes and ::srcY specify the base address of the source data for + * the copy. + * + * \par + * For host pointers, the starting address is + * \code + void* Start = (void*)((char*)srcHost+srcY*srcPitch + srcXInBytes); + * \endcode + * + * \par + * For device pointers, the starting address is + * \code + CUdeviceptr Start = srcDevice+srcY*srcPitch+srcXInBytes; + * \endcode + * + * \par + * For CUDA arrays, ::srcXInBytes must be evenly divisible by the array + * element size. + * + * - ::dstXInBytes and ::dstY specify the base address of the destination data + * for the copy. + * + * \par + * For host pointers, the base address is + * \code + void* dstStart = (void*)((char*)dstHost+dstY*dstPitch + dstXInBytes); + * \endcode + * + * \par + * For device pointers, the starting address is + * \code + CUdeviceptr dstStart = dstDevice+dstY*dstPitch+dstXInBytes; + * \endcode + * + * \par + * For CUDA arrays, ::dstXInBytes must be evenly divisible by the array + * element size. + * + * - ::WidthInBytes and ::Height specify the width (in bytes) and height of + * the 2D copy being performed. + * - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes + + * ::srcXInBytes, and ::dstPitch must be greater than or equal to + * ::WidthInBytes + dstXInBytes. + * + * \par + * ::cuMemcpy2D() returns an error if any pitch is greater than the maximum + * allowed (::CU_DEVICE_ATTRIBUTE_MAX_PITCH). ::cuMemAllocPitch() passes back + * pitches that always work with ::cuMemcpy2D(). On intra-device memory copies + * (device to device, CUDA array to device, CUDA array to CUDA array), + * ::cuMemcpy2D() may fail for pitches not computed by ::cuMemAllocPitch(). + * ::cuMemcpy2DUnaligned() does not have this restriction, but may run + * significantly slower in the cases where ::cuMemcpy2D() would have returned + * an error code. + * + * \param pCopy - Parameters for the memory copy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, + * ::cudaMemcpy2DFromArray + */ +CUresult CUDAAPI cuMemcpy2D(const CUDA_MEMCPY2D *pCopy); + +/** + * \brief Copies memory for 2D arrays + * + * Perform a 2D memory copy according to the parameters specified in \p pCopy. + * The ::CUDA_MEMCPY2D structure is defined as: + * + * \code + typedef struct CUDA_MEMCPY2D_st { + unsigned int srcXInBytes, srcY; + CUmemorytype srcMemoryType; + const void *srcHost; + CUdeviceptr srcDevice; + CUarray srcArray; + unsigned int srcPitch; + unsigned int dstXInBytes, dstY; + CUmemorytype dstMemoryType; + void *dstHost; + CUdeviceptr dstDevice; + CUarray dstArray; + unsigned int dstPitch; + unsigned int WidthInBytes; + unsigned int Height; + } CUDA_MEMCPY2D; + * \endcode + * where: + * - ::srcMemoryType and ::dstMemoryType specify the type of memory of the + * source and destination, respectively; ::CUmemorytype_enum is defined as: + * + * \code + typedef enum CUmemorytype_enum { + CU_MEMORYTYPE_HOST = 0x01, + CU_MEMORYTYPE_DEVICE = 0x02, + CU_MEMORYTYPE_ARRAY = 0x03, + CU_MEMORYTYPE_UNIFIED = 0x04 + } CUmemorytype; + * \endcode + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::srcDevice and ::srcPitch + * specify the (unified virtual address space) base address of the source data + * and the bytes per row to apply. ::srcArray is ignored. + * This value may be used only if unified addressing is supported in the calling + * context. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_HOST, ::srcHost and ::srcPitch + * specify the (host) base address of the source data and the bytes per row to + * apply. ::srcArray is ignored. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_DEVICE, ::srcDevice and ::srcPitch + * specify the (device) base address of the source data and the bytes per row + * to apply. ::srcArray is ignored. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_ARRAY, ::srcArray specifies the + * handle of the source data. ::srcHost, ::srcDevice and ::srcPitch are + * ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::dstDevice and ::dstPitch + * specify the (unified virtual address space) base address of the source data + * and the bytes per row to apply. ::dstArray is ignored. + * This value may be used only if unified addressing is supported in the calling + * context. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_HOST, ::dstHost and ::dstPitch + * specify the (host) base address of the destination data and the bytes per + * row to apply. ::dstArray is ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_DEVICE, ::dstDevice and ::dstPitch + * specify the (device) base address of the destination data and the bytes per + * row to apply. ::dstArray is ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_ARRAY, ::dstArray specifies the + * handle of the destination data. ::dstHost, ::dstDevice and ::dstPitch are + * ignored. + * + * - ::srcXInBytes and ::srcY specify the base address of the source data for + * the copy. + * + * \par + * For host pointers, the starting address is + * \code + void* Start = (void*)((char*)srcHost+srcY*srcPitch + srcXInBytes); + * \endcode + * + * \par + * For device pointers, the starting address is + * \code + CUdeviceptr Start = srcDevice+srcY*srcPitch+srcXInBytes; + * \endcode + * + * \par + * For CUDA arrays, ::srcXInBytes must be evenly divisible by the array + * element size. + * + * - ::dstXInBytes and ::dstY specify the base address of the destination data + * for the copy. + * + * \par + * For host pointers, the base address is + * \code + void* dstStart = (void*)((char*)dstHost+dstY*dstPitch + dstXInBytes); + * \endcode + * + * \par + * For device pointers, the starting address is + * \code + CUdeviceptr dstStart = dstDevice+dstY*dstPitch+dstXInBytes; + * \endcode + * + * \par + * For CUDA arrays, ::dstXInBytes must be evenly divisible by the array + * element size. + * + * - ::WidthInBytes and ::Height specify the width (in bytes) and height of + * the 2D copy being performed. + * - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes + + * ::srcXInBytes, and ::dstPitch must be greater than or equal to + * ::WidthInBytes + dstXInBytes. + * + * \par + * ::cuMemcpy2D() returns an error if any pitch is greater than the maximum + * allowed (::CU_DEVICE_ATTRIBUTE_MAX_PITCH). ::cuMemAllocPitch() passes back + * pitches that always work with ::cuMemcpy2D(). On intra-device memory copies + * (device to device, CUDA array to device, CUDA array to CUDA array), + * ::cuMemcpy2D() may fail for pitches not computed by ::cuMemAllocPitch(). + * ::cuMemcpy2DUnaligned() does not have this restriction, but may run + * significantly slower in the cases where ::cuMemcpy2D() would have returned + * an error code. + * + * \param pCopy - Parameters for the memory copy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, + * ::cudaMemcpy2DFromArray + */ +CUresult CUDAAPI cuMemcpy2DUnaligned(const CUDA_MEMCPY2D *pCopy); + +/** + * \brief Copies memory for 3D arrays + * + * Perform a 3D memory copy according to the parameters specified in + * \p pCopy. The ::CUDA_MEMCPY3D structure is defined as: + * + * \code + typedef struct CUDA_MEMCPY3D_st { + + unsigned int srcXInBytes, srcY, srcZ; + unsigned int srcLOD; + CUmemorytype srcMemoryType; + const void *srcHost; + CUdeviceptr srcDevice; + CUarray srcArray; + unsigned int srcPitch; // ignored when src is array + unsigned int srcHeight; // ignored when src is array; may be 0 if Depth==1 + + unsigned int dstXInBytes, dstY, dstZ; + unsigned int dstLOD; + CUmemorytype dstMemoryType; + void *dstHost; + CUdeviceptr dstDevice; + CUarray dstArray; + unsigned int dstPitch; // ignored when dst is array + unsigned int dstHeight; // ignored when dst is array; may be 0 if Depth==1 + + unsigned int WidthInBytes; + unsigned int Height; + unsigned int Depth; + } CUDA_MEMCPY3D; + * \endcode + * where: + * - ::srcMemoryType and ::dstMemoryType specify the type of memory of the + * source and destination, respectively; ::CUmemorytype_enum is defined as: + * + * \code + typedef enum CUmemorytype_enum { + CU_MEMORYTYPE_HOST = 0x01, + CU_MEMORYTYPE_DEVICE = 0x02, + CU_MEMORYTYPE_ARRAY = 0x03, + CU_MEMORYTYPE_UNIFIED = 0x04 + } CUmemorytype; + * \endcode + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::srcDevice and ::srcPitch + * specify the (unified virtual address space) base address of the source data + * and the bytes per row to apply. ::srcArray is ignored. + * This value may be used only if unified addressing is supported in the calling + * context. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_HOST, ::srcHost, ::srcPitch and + * ::srcHeight specify the (host) base address of the source data, the bytes + * per row, and the height of each 2D slice of the 3D array. ::srcArray is + * ignored. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_DEVICE, ::srcDevice, ::srcPitch and + * ::srcHeight specify the (device) base address of the source data, the bytes + * per row, and the height of each 2D slice of the 3D array. ::srcArray is + * ignored. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_ARRAY, ::srcArray specifies the + * handle of the source data. ::srcHost, ::srcDevice, ::srcPitch and + * ::srcHeight are ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::dstDevice and ::dstPitch + * specify the (unified virtual address space) base address of the source data + * and the bytes per row to apply. ::dstArray is ignored. + * This value may be used only if unified addressing is supported in the calling + * context. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_HOST, ::dstHost and ::dstPitch + * specify the (host) base address of the destination data, the bytes per row, + * and the height of each 2D slice of the 3D array. ::dstArray is ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_DEVICE, ::dstDevice and ::dstPitch + * specify the (device) base address of the destination data, the bytes per + * row, and the height of each 2D slice of the 3D array. ::dstArray is ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_ARRAY, ::dstArray specifies the + * handle of the destination data. ::dstHost, ::dstDevice, ::dstPitch and + * ::dstHeight are ignored. + * + * - ::srcXInBytes, ::srcY and ::srcZ specify the base address of the source + * data for the copy. + * + * \par + * For host pointers, the starting address is + * \code + void* Start = (void*)((char*)srcHost+(srcZ*srcHeight+srcY)*srcPitch + srcXInBytes); + * \endcode + * + * \par + * For device pointers, the starting address is + * \code + CUdeviceptr Start = srcDevice+(srcZ*srcHeight+srcY)*srcPitch+srcXInBytes; + * \endcode + * + * \par + * For CUDA arrays, ::srcXInBytes must be evenly divisible by the array + * element size. + * + * - dstXInBytes, ::dstY and ::dstZ specify the base address of the + * destination data for the copy. + * + * \par + * For host pointers, the base address is + * \code + void* dstStart = (void*)((char*)dstHost+(dstZ*dstHeight+dstY)*dstPitch + dstXInBytes); + * \endcode + * + * \par + * For device pointers, the starting address is + * \code + CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes; + * \endcode + * + * \par + * For CUDA arrays, ::dstXInBytes must be evenly divisible by the array + * element size. + * + * - ::WidthInBytes, ::Height and ::Depth specify the width (in bytes), height + * and depth of the 3D copy being performed. + * - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes + + * ::srcXInBytes, and ::dstPitch must be greater than or equal to + * ::WidthInBytes + dstXInBytes. + * - If specified, ::srcHeight must be greater than or equal to ::Height + + * ::srcY, and ::dstHeight must be greater than or equal to ::Height + ::dstY. + * + * \par + * ::cuMemcpy3D() returns an error if any pitch is greater than the maximum + * allowed (::CU_DEVICE_ATTRIBUTE_MAX_PITCH). + * + * The ::srcLOD and ::dstLOD members of the ::CUDA_MEMCPY3D structure must be + * set to 0. + * + * \param pCopy - Parameters for the memory copy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemcpy3D + */ +CUresult CUDAAPI cuMemcpy3D(const CUDA_MEMCPY3D *pCopy); + +/** + * \brief Copies memory between contexts + * + * Perform a 3D memory copy according to the parameters specified in + * \p pCopy. See the definition of the ::CUDA_MEMCPY3D_PEER structure + * for documentation of its parameters. + * + * \param pCopy - Parameters for the memory copy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * + * \sa ::cuMemcpyDtoD, ::cuMemcpyPeer, ::cuMemcpyDtoDAsync, ::cuMemcpyPeerAsync, + * ::cuMemcpy3DPeerAsync, + * ::cudaMemcpy3DPeer + */ +CUresult CUDAAPI cuMemcpy3DPeer(const CUDA_MEMCPY3D_PEER *pCopy); + +/** + * \brief Copies memory asynchronously + * + * Copies data between two pointers. + * \p dst and \p src are base pointers of the destination and source, respectively. + * \p ByteCount specifies the number of bytes to copy. + * Note that this function infers the type of the transfer (host to host, host to + * device, device to device, or device to host) from the pointer values. This + * function is only allowed in contexts which support unified addressing. + * + * \param dst - Destination unified virtual address space pointer + * \param src - Source unified virtual address space pointer + * \param ByteCount - Size of memory copy in bytes + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * \note_async + * \note_null_stream + * \note_memcpy + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemcpyAsync, + * ::cudaMemcpyToSymbolAsync, + * ::cudaMemcpyFromSymbolAsync + */ +CUresult CUDAAPI cuMemcpyAsync(CUdeviceptr dst, CUdeviceptr src, size_t ByteCount, CUstream hStream); + +/** + * \brief Copies device memory between two contexts asynchronously. + * + * Copies from device memory in one context to device memory in another + * context. \p dstDevice is the base device pointer of the destination memory + * and \p dstContext is the destination context. \p srcDevice is the base + * device pointer of the source memory and \p srcContext is the source pointer. + * \p ByteCount specifies the number of bytes to copy. + * + * \param dstDevice - Destination device pointer + * \param dstContext - Destination context + * \param srcDevice - Source device pointer + * \param srcContext - Source context + * \param ByteCount - Size of memory copy in bytes + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * \note_async + * \note_null_stream + * + * \sa ::cuMemcpyDtoD, ::cuMemcpyPeer, ::cuMemcpy3DPeer, ::cuMemcpyDtoDAsync, + * ::cuMemcpy3DPeerAsync, + * ::cudaMemcpyPeerAsync + */ +CUresult CUDAAPI cuMemcpyPeerAsync(CUdeviceptr dstDevice, CUcontext dstContext, CUdeviceptr srcDevice, CUcontext srcContext, size_t ByteCount, CUstream hStream); + +/** + * \brief Copies memory from Host to Device + * + * Copies from host memory to device memory. \p dstDevice and \p srcHost are + * the base addresses of the destination and source, respectively. \p ByteCount + * specifies the number of bytes to copy. + * + * \param dstDevice - Destination device pointer + * \param srcHost - Source host pointer + * \param ByteCount - Size of memory copy in bytes + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * \note_async + * \note_null_stream + * \note_memcpy + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemcpyAsync, + * ::cudaMemcpyToSymbolAsync + */ +CUresult CUDAAPI cuMemcpyHtoDAsync(CUdeviceptr dstDevice, const void *srcHost, size_t ByteCount, CUstream hStream); + +/** + * \brief Copies memory from Device to Host + * + * Copies from device to host memory. \p dstHost and \p srcDevice specify the + * base pointers of the destination and source, respectively. \p ByteCount + * specifies the number of bytes to copy. + * + * \param dstHost - Destination host pointer + * \param srcDevice - Source device pointer + * \param ByteCount - Size of memory copy in bytes + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * \note_async + * \note_null_stream + * \note_memcpy + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemcpyAsync, + * ::cudaMemcpyFromSymbolAsync + */ +CUresult CUDAAPI cuMemcpyDtoHAsync(void *dstHost, CUdeviceptr srcDevice, size_t ByteCount, CUstream hStream); + +/** + * \brief Copies memory from Device to Device + * + * Copies from device memory to device memory. \p dstDevice and \p srcDevice + * are the base pointers of the destination and source, respectively. + * \p ByteCount specifies the number of bytes to copy. + * + * \param dstDevice - Destination device pointer + * \param srcDevice - Source device pointer + * \param ByteCount - Size of memory copy in bytes + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * \note_async + * \note_null_stream + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemcpyAsync, + * ::cudaMemcpyToSymbolAsync, + * ::cudaMemcpyFromSymbolAsync + */ +CUresult CUDAAPI cuMemcpyDtoDAsync(CUdeviceptr dstDevice, CUdeviceptr srcDevice, size_t ByteCount, CUstream hStream); + +/** + * \brief Copies memory from Host to Array + * + * Copies from host memory to a 1D CUDA array. \p dstArray and \p dstOffset + * specify the CUDA array handle and starting offset in bytes of the + * destination data. \p srcHost specifies the base address of the source. + * \p ByteCount specifies the number of bytes to copy. + * + * \param dstArray - Destination array + * \param dstOffset - Offset in bytes of destination array + * \param srcHost - Source host pointer + * \param ByteCount - Size of memory copy in bytes + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * \note_async + * \note_null_stream + * \note_memcpy + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemcpyToArrayAsync + */ +CUresult CUDAAPI cuMemcpyHtoAAsync(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount, CUstream hStream); + +/** + * \brief Copies memory from Array to Host + * + * Copies from one 1D CUDA array to host memory. \p dstHost specifies the base + * pointer of the destination. \p srcArray and \p srcOffset specify the CUDA + * array handle and starting offset in bytes of the source data. + * \p ByteCount specifies the number of bytes to copy. + * + * \param dstHost - Destination pointer + * \param srcArray - Source array + * \param srcOffset - Offset in bytes of source array + * \param ByteCount - Size of memory copy in bytes + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * \note_async + * \note_null_stream + * \note_memcpy + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemcpyFromArrayAsync + */ +CUresult CUDAAPI cuMemcpyAtoHAsync(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount, CUstream hStream); + +/** + * \brief Copies memory for 2D arrays + * + * Perform a 2D memory copy according to the parameters specified in \p pCopy. + * The ::CUDA_MEMCPY2D structure is defined as: + * + * \code + typedef struct CUDA_MEMCPY2D_st { + unsigned int srcXInBytes, srcY; + CUmemorytype srcMemoryType; + const void *srcHost; + CUdeviceptr srcDevice; + CUarray srcArray; + unsigned int srcPitch; + unsigned int dstXInBytes, dstY; + CUmemorytype dstMemoryType; + void *dstHost; + CUdeviceptr dstDevice; + CUarray dstArray; + unsigned int dstPitch; + unsigned int WidthInBytes; + unsigned int Height; + } CUDA_MEMCPY2D; + * \endcode + * where: + * - ::srcMemoryType and ::dstMemoryType specify the type of memory of the + * source and destination, respectively; ::CUmemorytype_enum is defined as: + * + * \code + typedef enum CUmemorytype_enum { + CU_MEMORYTYPE_HOST = 0x01, + CU_MEMORYTYPE_DEVICE = 0x02, + CU_MEMORYTYPE_ARRAY = 0x03, + CU_MEMORYTYPE_UNIFIED = 0x04 + } CUmemorytype; + * \endcode + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_HOST, ::srcHost and ::srcPitch + * specify the (host) base address of the source data and the bytes per row to + * apply. ::srcArray is ignored. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::srcDevice and ::srcPitch + * specify the (unified virtual address space) base address of the source data + * and the bytes per row to apply. ::srcArray is ignored. + * This value may be used only if unified addressing is supported in the calling + * context. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_DEVICE, ::srcDevice and ::srcPitch + * specify the (device) base address of the source data and the bytes per row + * to apply. ::srcArray is ignored. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_ARRAY, ::srcArray specifies the + * handle of the source data. ::srcHost, ::srcDevice and ::srcPitch are + * ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::dstDevice and ::dstPitch + * specify the (unified virtual address space) base address of the source data + * and the bytes per row to apply. ::dstArray is ignored. + * This value may be used only if unified addressing is supported in the calling + * context. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_HOST, ::dstHost and ::dstPitch + * specify the (host) base address of the destination data and the bytes per + * row to apply. ::dstArray is ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_DEVICE, ::dstDevice and ::dstPitch + * specify the (device) base address of the destination data and the bytes per + * row to apply. ::dstArray is ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_ARRAY, ::dstArray specifies the + * handle of the destination data. ::dstHost, ::dstDevice and ::dstPitch are + * ignored. + * + * - ::srcXInBytes and ::srcY specify the base address of the source data for + * the copy. + * + * \par + * For host pointers, the starting address is + * \code + void* Start = (void*)((char*)srcHost+srcY*srcPitch + srcXInBytes); + * \endcode + * + * \par + * For device pointers, the starting address is + * \code + CUdeviceptr Start = srcDevice+srcY*srcPitch+srcXInBytes; + * \endcode + * + * \par + * For CUDA arrays, ::srcXInBytes must be evenly divisible by the array + * element size. + * + * - ::dstXInBytes and ::dstY specify the base address of the destination data + * for the copy. + * + * \par + * For host pointers, the base address is + * \code + void* dstStart = (void*)((char*)dstHost+dstY*dstPitch + dstXInBytes); + * \endcode + * + * \par + * For device pointers, the starting address is + * \code + CUdeviceptr dstStart = dstDevice+dstY*dstPitch+dstXInBytes; + * \endcode + * + * \par + * For CUDA arrays, ::dstXInBytes must be evenly divisible by the array + * element size. + * + * - ::WidthInBytes and ::Height specify the width (in bytes) and height of + * the 2D copy being performed. + * - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes + + * ::srcXInBytes, and ::dstPitch must be greater than or equal to + * ::WidthInBytes + dstXInBytes. + * - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes + + * ::srcXInBytes, and ::dstPitch must be greater than or equal to + * ::WidthInBytes + dstXInBytes. + * - If specified, ::srcHeight must be greater than or equal to ::Height + + * ::srcY, and ::dstHeight must be greater than or equal to ::Height + ::dstY. + * + * \par + * ::cuMemcpy2DAsync() returns an error if any pitch is greater than the maximum + * allowed (::CU_DEVICE_ATTRIBUTE_MAX_PITCH). ::cuMemAllocPitch() passes back + * pitches that always work with ::cuMemcpy2D(). On intra-device memory copies + * (device to device, CUDA array to device, CUDA array to CUDA array), + * ::cuMemcpy2DAsync() may fail for pitches not computed by ::cuMemAllocPitch(). + * + * \param pCopy - Parameters for the memory copy + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * \note_async + * \note_null_stream + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync + */ +CUresult CUDAAPI cuMemcpy2DAsync(const CUDA_MEMCPY2D *pCopy, CUstream hStream); + +/** + * \brief Copies memory for 3D arrays + * + * Perform a 3D memory copy according to the parameters specified in + * \p pCopy. The ::CUDA_MEMCPY3D structure is defined as: + * + * \code + typedef struct CUDA_MEMCPY3D_st { + + unsigned int srcXInBytes, srcY, srcZ; + unsigned int srcLOD; + CUmemorytype srcMemoryType; + const void *srcHost; + CUdeviceptr srcDevice; + CUarray srcArray; + unsigned int srcPitch; // ignored when src is array + unsigned int srcHeight; // ignored when src is array; may be 0 if Depth==1 + + unsigned int dstXInBytes, dstY, dstZ; + unsigned int dstLOD; + CUmemorytype dstMemoryType; + void *dstHost; + CUdeviceptr dstDevice; + CUarray dstArray; + unsigned int dstPitch; // ignored when dst is array + unsigned int dstHeight; // ignored when dst is array; may be 0 if Depth==1 + + unsigned int WidthInBytes; + unsigned int Height; + unsigned int Depth; + } CUDA_MEMCPY3D; + * \endcode + * where: + * - ::srcMemoryType and ::dstMemoryType specify the type of memory of the + * source and destination, respectively; ::CUmemorytype_enum is defined as: + * + * \code + typedef enum CUmemorytype_enum { + CU_MEMORYTYPE_HOST = 0x01, + CU_MEMORYTYPE_DEVICE = 0x02, + CU_MEMORYTYPE_ARRAY = 0x03, + CU_MEMORYTYPE_UNIFIED = 0x04 + } CUmemorytype; + * \endcode + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::srcDevice and ::srcPitch + * specify the (unified virtual address space) base address of the source data + * and the bytes per row to apply. ::srcArray is ignored. + * This value may be used only if unified addressing is supported in the calling + * context. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_HOST, ::srcHost, ::srcPitch and + * ::srcHeight specify the (host) base address of the source data, the bytes + * per row, and the height of each 2D slice of the 3D array. ::srcArray is + * ignored. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_DEVICE, ::srcDevice, ::srcPitch and + * ::srcHeight specify the (device) base address of the source data, the bytes + * per row, and the height of each 2D slice of the 3D array. ::srcArray is + * ignored. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_ARRAY, ::srcArray specifies the + * handle of the source data. ::srcHost, ::srcDevice, ::srcPitch and + * ::srcHeight are ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::dstDevice and ::dstPitch + * specify the (unified virtual address space) base address of the source data + * and the bytes per row to apply. ::dstArray is ignored. + * This value may be used only if unified addressing is supported in the calling + * context. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_HOST, ::dstHost and ::dstPitch + * specify the (host) base address of the destination data, the bytes per row, + * and the height of each 2D slice of the 3D array. ::dstArray is ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_DEVICE, ::dstDevice and ::dstPitch + * specify the (device) base address of the destination data, the bytes per + * row, and the height of each 2D slice of the 3D array. ::dstArray is ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_ARRAY, ::dstArray specifies the + * handle of the destination data. ::dstHost, ::dstDevice, ::dstPitch and + * ::dstHeight are ignored. + * + * - ::srcXInBytes, ::srcY and ::srcZ specify the base address of the source + * data for the copy. + * + * \par + * For host pointers, the starting address is + * \code + void* Start = (void*)((char*)srcHost+(srcZ*srcHeight+srcY)*srcPitch + srcXInBytes); + * \endcode + * + * \par + * For device pointers, the starting address is + * \code + CUdeviceptr Start = srcDevice+(srcZ*srcHeight+srcY)*srcPitch+srcXInBytes; + * \endcode + * + * \par + * For CUDA arrays, ::srcXInBytes must be evenly divisible by the array + * element size. + * + * - dstXInBytes, ::dstY and ::dstZ specify the base address of the + * destination data for the copy. + * + * \par + * For host pointers, the base address is + * \code + void* dstStart = (void*)((char*)dstHost+(dstZ*dstHeight+dstY)*dstPitch + dstXInBytes); + * \endcode + * + * \par + * For device pointers, the starting address is + * \code + CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes; + * \endcode + * + * \par + * For CUDA arrays, ::dstXInBytes must be evenly divisible by the array + * element size. + * + * - ::WidthInBytes, ::Height and ::Depth specify the width (in bytes), height + * and depth of the 3D copy being performed. + * - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes + + * ::srcXInBytes, and ::dstPitch must be greater than or equal to + * ::WidthInBytes + dstXInBytes. + * - If specified, ::srcHeight must be greater than or equal to ::Height + + * ::srcY, and ::dstHeight must be greater than or equal to ::Height + ::dstY. + * + * \par + * ::cuMemcpy3DAsync() returns an error if any pitch is greater than the maximum + * allowed (::CU_DEVICE_ATTRIBUTE_MAX_PITCH). + * + * The ::srcLOD and ::dstLOD members of the ::CUDA_MEMCPY3D structure must be + * set to 0. + * + * \param pCopy - Parameters for the memory copy + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * \note_async + * \note_null_stream + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemcpy3DAsync + */ +CUresult CUDAAPI cuMemcpy3DAsync(const CUDA_MEMCPY3D *pCopy, CUstream hStream); + +/** + * \brief Copies memory between contexts asynchronously. + * + * Perform a 3D memory copy according to the parameters specified in + * \p pCopy. See the definition of the ::CUDA_MEMCPY3D_PEER structure + * for documentation of its parameters. + * + * \param pCopy - Parameters for the memory copy + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_async + * \note_null_stream + * + * \sa ::cuMemcpyDtoD, ::cuMemcpyPeer, ::cuMemcpyDtoDAsync, ::cuMemcpyPeerAsync, + * ::cuMemcpy3DPeerAsync, + * ::cudaMemcpy3DPeerAsync + */ +CUresult CUDAAPI cuMemcpy3DPeerAsync(const CUDA_MEMCPY3D_PEER *pCopy, CUstream hStream); + +/** + * \brief Initializes device memory + * + * Sets the memory range of \p N 8-bit values to the specified value + * \p uc. + * + * \param dstDevice - Destination device pointer + * \param uc - Value to set + * \param N - Number of elements + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_memset + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemset + */ +CUresult CUDAAPI cuMemsetD8(CUdeviceptr dstDevice, unsigned char uc, size_t N); + +/** + * \brief Initializes device memory + * + * Sets the memory range of \p N 16-bit values to the specified value + * \p us. The \p dstDevice pointer must be two byte aligned. + * + * \param dstDevice - Destination device pointer + * \param us - Value to set + * \param N - Number of elements + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_memset + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemset + */ +CUresult CUDAAPI cuMemsetD16(CUdeviceptr dstDevice, unsigned short us, size_t N); + +/** + * \brief Initializes device memory + * + * Sets the memory range of \p N 32-bit values to the specified value + * \p ui. The \p dstDevice pointer must be four byte aligned. + * + * \param dstDevice - Destination device pointer + * \param ui - Value to set + * \param N - Number of elements + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_memset + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32Async, + * ::cudaMemset + */ +CUresult CUDAAPI cuMemsetD32(CUdeviceptr dstDevice, unsigned int ui, size_t N); + +/** + * \brief Initializes device memory + * + * Sets the 2D memory range of \p Width 8-bit values to the specified value + * \p uc. \p Height specifies the number of rows to set, and \p dstPitch + * specifies the number of bytes between each row. This function performs + * fastest when the pitch is one that has been passed back by + * ::cuMemAllocPitch(). + * + * \param dstDevice - Destination device pointer + * \param dstPitch - Pitch of destination device pointer(Unused if \p Height is 1) + * \param uc - Value to set + * \param Width - Width of row + * \param Height - Number of rows + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_memset + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemset2D + */ +CUresult CUDAAPI cuMemsetD2D8(CUdeviceptr dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height); + +/** + * \brief Initializes device memory + * + * Sets the 2D memory range of \p Width 16-bit values to the specified value + * \p us. \p Height specifies the number of rows to set, and \p dstPitch + * specifies the number of bytes between each row. The \p dstDevice pointer + * and \p dstPitch offset must be two byte aligned. This function performs + * fastest when the pitch is one that has been passed back by + * ::cuMemAllocPitch(). + * + * \param dstDevice - Destination device pointer + * \param dstPitch - Pitch of destination device pointer(Unused if \p Height is 1) + * \param us - Value to set + * \param Width - Width of row + * \param Height - Number of rows + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_memset + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemset2D + */ +CUresult CUDAAPI cuMemsetD2D16(CUdeviceptr dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height); + +/** + * \brief Initializes device memory + * + * Sets the 2D memory range of \p Width 32-bit values to the specified value + * \p ui. \p Height specifies the number of rows to set, and \p dstPitch + * specifies the number of bytes between each row. The \p dstDevice pointer + * and \p dstPitch offset must be four byte aligned. This function performs + * fastest when the pitch is one that has been passed back by + * ::cuMemAllocPitch(). + * + * \param dstDevice - Destination device pointer + * \param dstPitch - Pitch of destination device pointer(Unused if \p Height is 1) + * \param ui - Value to set + * \param Width - Width of row + * \param Height - Number of rows + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_memset + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemset2D + */ +CUresult CUDAAPI cuMemsetD2D32(CUdeviceptr dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height); + +/** + * \brief Sets device memory + * + * Sets the memory range of \p N 8-bit values to the specified value + * \p uc. + * + * \param dstDevice - Destination device pointer + * \param uc - Value to set + * \param N - Number of elements + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_memset + * \note_null_stream + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemsetAsync + */ +CUresult CUDAAPI cuMemsetD8Async(CUdeviceptr dstDevice, unsigned char uc, size_t N, CUstream hStream); + +/** + * \brief Sets device memory + * + * Sets the memory range of \p N 16-bit values to the specified value + * \p us. The \p dstDevice pointer must be two byte aligned. + * + * \param dstDevice - Destination device pointer + * \param us - Value to set + * \param N - Number of elements + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_memset + * \note_null_stream + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemsetAsync + */ +CUresult CUDAAPI cuMemsetD16Async(CUdeviceptr dstDevice, unsigned short us, size_t N, CUstream hStream); + +/** + * \brief Sets device memory + * + * Sets the memory range of \p N 32-bit values to the specified value + * \p ui. The \p dstDevice pointer must be four byte aligned. + * + * \param dstDevice - Destination device pointer + * \param ui - Value to set + * \param N - Number of elements + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_memset + * \note_null_stream + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, ::cuMemsetD32, + * ::cudaMemsetAsync + */ +CUresult CUDAAPI cuMemsetD32Async(CUdeviceptr dstDevice, unsigned int ui, size_t N, CUstream hStream); + +/** + * \brief Sets device memory + * + * Sets the 2D memory range of \p Width 8-bit values to the specified value + * \p uc. \p Height specifies the number of rows to set, and \p dstPitch + * specifies the number of bytes between each row. This function performs + * fastest when the pitch is one that has been passed back by + * ::cuMemAllocPitch(). + * + * \param dstDevice - Destination device pointer + * \param dstPitch - Pitch of destination device pointer(Unused if \p Height is 1) + * \param uc - Value to set + * \param Width - Width of row + * \param Height - Number of rows + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_memset + * \note_null_stream + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemset2DAsync + */ +CUresult CUDAAPI cuMemsetD2D8Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height, CUstream hStream); + +/** + * \brief Sets device memory + * + * Sets the 2D memory range of \p Width 16-bit values to the specified value + * \p us. \p Height specifies the number of rows to set, and \p dstPitch + * specifies the number of bytes between each row. The \p dstDevice pointer + * and \p dstPitch offset must be two byte aligned. This function performs + * fastest when the pitch is one that has been passed back by + * ::cuMemAllocPitch(). + * + * \param dstDevice - Destination device pointer + * \param dstPitch - Pitch of destination device pointer(Unused if \p Height is 1) + * \param us - Value to set + * \param Width - Width of row + * \param Height - Number of rows + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_memset + * \note_null_stream + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemset2DAsync + */ +CUresult CUDAAPI cuMemsetD2D16Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height, CUstream hStream); + +/** + * \brief Sets device memory + * + * Sets the 2D memory range of \p Width 32-bit values to the specified value + * \p ui. \p Height specifies the number of rows to set, and \p dstPitch + * specifies the number of bytes between each row. The \p dstDevice pointer + * and \p dstPitch offset must be four byte aligned. This function performs + * fastest when the pitch is one that has been passed back by + * ::cuMemAllocPitch(). + * + * \param dstDevice - Destination device pointer + * \param dstPitch - Pitch of destination device pointer(Unused if \p Height is 1) + * \param ui - Value to set + * \param Width - Width of row + * \param Height - Number of rows + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_memset + * \note_null_stream + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemset2DAsync + */ +CUresult CUDAAPI cuMemsetD2D32Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height, CUstream hStream); + +/** + * \brief Creates a 1D or 2D CUDA array + * + * Creates a CUDA array according to the ::CUDA_ARRAY_DESCRIPTOR structure + * \p pAllocateArray and returns a handle to the new CUDA array in \p *pHandle. + * The ::CUDA_ARRAY_DESCRIPTOR is defined as: + * + * \code + typedef struct { + unsigned int Width; + unsigned int Height; + CUarray_format Format; + unsigned int NumChannels; + } CUDA_ARRAY_DESCRIPTOR; + * \endcode + * where: + * + * - \p Width, and \p Height are the width, and height of the CUDA array (in + * elements); the CUDA array is one-dimensional if height is 0, two-dimensional + * otherwise; + * - ::Format specifies the format of the elements; ::CUarray_format is + * defined as: + * \code + typedef enum CUarray_format_enum { + CU_AD_FORMAT_UNSIGNED_INT8 = 0x01, + CU_AD_FORMAT_UNSIGNED_INT16 = 0x02, + CU_AD_FORMAT_UNSIGNED_INT32 = 0x03, + CU_AD_FORMAT_SIGNED_INT8 = 0x08, + CU_AD_FORMAT_SIGNED_INT16 = 0x09, + CU_AD_FORMAT_SIGNED_INT32 = 0x0a, + CU_AD_FORMAT_HALF = 0x10, + CU_AD_FORMAT_FLOAT = 0x20 + } CUarray_format; + * \endcode + * - \p NumChannels specifies the number of packed components per CUDA array + * element; it may be 1, 2, or 4; + * + * Here are examples of CUDA array descriptions: + * + * Description for a CUDA array of 2048 floats: + * \code + CUDA_ARRAY_DESCRIPTOR desc; + desc.Format = CU_AD_FORMAT_FLOAT; + desc.NumChannels = 1; + desc.Width = 2048; + desc.Height = 1; + * \endcode + * + * Description for a 64 x 64 CUDA array of floats: + * \code + CUDA_ARRAY_DESCRIPTOR desc; + desc.Format = CU_AD_FORMAT_FLOAT; + desc.NumChannels = 1; + desc.Width = 64; + desc.Height = 64; + * \endcode + * + * Description for a \p width x \p height CUDA array of 64-bit, 4x16-bit + * float16's: + * \code + CUDA_ARRAY_DESCRIPTOR desc; + desc.Format = CU_AD_FORMAT_HALF; + desc.NumChannels = 4; + desc.Width = width; + desc.Height = height; + * \endcode + * + * Description for a \p width x \p height CUDA array of 16-bit elements, each + * of which is two 8-bit unsigned chars: + * \code + CUDA_ARRAY_DESCRIPTOR arrayDesc; + desc.Format = CU_AD_FORMAT_UNSIGNED_INT8; + desc.NumChannels = 2; + desc.Width = width; + desc.Height = height; + * \endcode + * + * \param pHandle - Returned array + * \param pAllocateArray - Array descriptor + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMallocArray + */ +CUresult CUDAAPI cuArrayCreate(CUarray *pHandle, const CUDA_ARRAY_DESCRIPTOR *pAllocateArray); + +/** + * \brief Get a 1D or 2D CUDA array descriptor + * + * Returns in \p *pArrayDescriptor a descriptor containing information on the + * format and dimensions of the CUDA array \p hArray. It is useful for + * subroutines that have been passed a CUDA array, but need to know the CUDA + * array parameters for validation or other purposes. + * + * \param pArrayDescriptor - Returned array descriptor + * \param hArray - Array to get descriptor of + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaArrayGetInfo + */ +CUresult CUDAAPI cuArrayGetDescriptor(CUDA_ARRAY_DESCRIPTOR *pArrayDescriptor, CUarray hArray); + +/** + * \brief Returns the layout properties of a sparse CUDA array + * + * Returns the layout properties of a sparse CUDA array in \p sparseProperties + * If the CUDA array is not allocated with flag ::CUDA_ARRAY3D_SPARSE + * ::CUDA_ERROR_INVALID_VALUE will be returned. + * + * If the returned value in ::CUDA_ARRAY_SPARSE_PROPERTIES::flags contains ::CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL, + * then ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailSize represents the total size of the array. Otherwise, it will be zero. + * Also, the returned value in ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailFirstLevel is always zero. + * Note that the \p array must have been allocated using ::cuArrayCreate or ::cuArray3DCreate. For CUDA arrays obtained + * using ::cuMipmappedArrayGetLevel, ::CUDA_ERROR_INVALID_VALUE will be returned. Instead, ::cuMipmappedArrayGetSparseProperties + * must be used to obtain the sparse properties of the entire CUDA mipmapped array to which \p array belongs to. + * + * \return + * ::CUDA_SUCCESS + * ::CUDA_ERROR_INVALID_VALUE + * + * \param[out] sparseProperties - Pointer to ::CUDA_ARRAY_SPARSE_PROPERTIES + * \param[in] array - CUDA array to get the sparse properties of + * \sa ::cuMipmappedArrayGetSparseProperties, ::cuMemMapArrayAsync + */ +CUresult CUDAAPI cuArrayGetSparseProperties(CUDA_ARRAY_SPARSE_PROPERTIES *sparseProperties, CUarray array); + +/** + * \brief Returns the layout properties of a sparse CUDA mipmapped array + * + * Returns the sparse array layout properties in \p sparseProperties + * If the CUDA mipmapped array is not allocated with flag ::CUDA_ARRAY3D_SPARSE + * ::CUDA_ERROR_INVALID_VALUE will be returned. + * + * For non-layered CUDA mipmapped arrays, ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailSize returns the + * size of the mip tail region. The mip tail region includes all mip levels whose width, height or depth + * is less than that of the tile. + * For layered CUDA mipmapped arrays, if ::CUDA_ARRAY_SPARSE_PROPERTIES::flags contains ::CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL, + * then ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailSize specifies the size of the mip tail of all layers combined. + * Otherwise, ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailSize specifies mip tail size per layer. + * The returned value of ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailFirstLevel is valid only if ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailSize is non-zero. + * + * \return + * ::CUDA_SUCCESS + * ::CUDA_ERROR_INVALID_VALUE + * + * \param[out] sparseProperties - Pointer to ::CUDA_ARRAY_SPARSE_PROPERTIES + * \param[in] mipmap - CUDA mipmapped array to get the sparse properties of + * \sa ::cuArrayGetSparseProperties, ::cuMemMapArrayAsync + */ +CUresult CUDAAPI cuMipmappedArrayGetSparseProperties(CUDA_ARRAY_SPARSE_PROPERTIES *sparseProperties, CUmipmappedArray mipmap); + +/** + * \brief Returns the memory requirements of a CUDA array + * + * Returns the memory requirements of a CUDA array in \p memoryRequirements + * If the CUDA array is not allocated with flag ::CUDA_ARRAY3D_DEFERRED_MAPPING + * ::CUDA_ERROR_INVALID_VALUE will be returned. + * + * The returned value in ::CUDA_ARRAY_MEMORY_REQUIREMENTS::size + * represents the total size of the CUDA array. + * The returned value in ::CUDA_ARRAY_MEMORY_REQUIREMENTS::alignment + * represents the alignment necessary for mapping the CUDA array. + * + * \return + * ::CUDA_SUCCESS + * ::CUDA_ERROR_INVALID_VALUE + * + * \param[out] memoryRequirements - Pointer to ::CUDA_ARRAY_MEMORY_REQUIREMENTS + * \param[in] array - CUDA array to get the memory requirements of + * \param[in] device - Device to get the memory requirements for + * \sa ::cuMipmappedArrayGetMemoryRequirements, ::cuMemMapArrayAsync + */ +CUresult CUDAAPI cuArrayGetMemoryRequirements(CUDA_ARRAY_MEMORY_REQUIREMENTS *memoryRequirements, CUarray array, CUdevice device); + +/** + * \brief Returns the memory requirements of a CUDA mipmapped array + * + * Returns the memory requirements of a CUDA mipmapped array in \p memoryRequirements + * If the CUDA mipmapped array is not allocated with flag ::CUDA_ARRAY3D_DEFERRED_MAPPING + * ::CUDA_ERROR_INVALID_VALUE will be returned. + * + * The returned value in ::CUDA_ARRAY_MEMORY_REQUIREMENTS::size + * represents the total size of the CUDA mipmapped array. + * The returned value in ::CUDA_ARRAY_MEMORY_REQUIREMENTS::alignment + * represents the alignment necessary for mapping the CUDA mipmapped + * array. + * + * \return + * ::CUDA_SUCCESS + * ::CUDA_ERROR_INVALID_VALUE + * + * \param[out] memoryRequirements - Pointer to ::CUDA_ARRAY_MEMORY_REQUIREMENTS + * \param[in] mipmap - CUDA mipmapped array to get the memory requirements of + * \param[in] device - Device to get the memory requirements for + * \sa ::cuArrayGetMemoryRequirements, ::cuMemMapArrayAsync + */ +CUresult CUDAAPI cuMipmappedArrayGetMemoryRequirements(CUDA_ARRAY_MEMORY_REQUIREMENTS *memoryRequirements, CUmipmappedArray mipmap, CUdevice device); + +/** + * \brief Gets a CUDA array plane from a CUDA array + * + * Returns in \p pPlaneArray a CUDA array that represents a single format plane + * of the CUDA array \p hArray. + * + * If \p planeIdx is greater than the maximum number of planes in this array or if the array does + * not have a multi-planar format e.g: ::CU_AD_FORMAT_NV12, then ::CUDA_ERROR_INVALID_VALUE is returned. + * + * Note that if the \p hArray has format ::CU_AD_FORMAT_NV12, then passing in 0 for \p planeIdx returns + * a CUDA array of the same size as \p hArray but with one channel and ::CU_AD_FORMAT_UNSIGNED_INT8 as its format. + * If 1 is passed for \p planeIdx, then the returned CUDA array has half the height and width + * of \p hArray with two channels and ::CU_AD_FORMAT_UNSIGNED_INT8 as its format. + * + * \param pPlaneArray - Returned CUDA array referenced by the \p planeIdx + * \param hArray - Multiplanar CUDA array + * \param planeIdx - Plane index + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa + * ::cuArrayCreate, + * ::cudaArrayGetPlane + */ +CUresult CUDAAPI cuArrayGetPlane(CUarray *pPlaneArray, CUarray hArray, unsigned int planeIdx); + +/** + * \brief Destroys a CUDA array + * + * Destroys the CUDA array \p hArray. + * + * \param hArray - Array to destroy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_ARRAY_IS_MAPPED, + * ::CUDA_ERROR_CONTEXT_IS_DESTROYED + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaFreeArray + */ +CUresult CUDAAPI cuArrayDestroy(CUarray hArray); + +/** + * \brief Creates a 3D CUDA array + * + * Creates a CUDA array according to the ::CUDA_ARRAY3D_DESCRIPTOR structure + * \p pAllocateArray and returns a handle to the new CUDA array in \p *pHandle. + * The ::CUDA_ARRAY3D_DESCRIPTOR is defined as: + * + * \code + typedef struct { + unsigned int Width; + unsigned int Height; + unsigned int Depth; + CUarray_format Format; + unsigned int NumChannels; + unsigned int Flags; + } CUDA_ARRAY3D_DESCRIPTOR; + * \endcode + * where: + * + * - \p Width, \p Height, and \p Depth are the width, height, and depth of the + * CUDA array (in elements); the following types of CUDA arrays can be allocated: + * - A 1D array is allocated if \p Height and \p Depth extents are both zero. + * - A 2D array is allocated if only \p Depth extent is zero. + * - A 3D array is allocated if all three extents are non-zero. + * - A 1D layered CUDA array is allocated if only \p Height is zero and the + * ::CUDA_ARRAY3D_LAYERED flag is set. Each layer is a 1D array. The number + * of layers is determined by the depth extent. + * - A 2D layered CUDA array is allocated if all three extents are non-zero and + * the ::CUDA_ARRAY3D_LAYERED flag is set. Each layer is a 2D array. The number + * of layers is determined by the depth extent. + * - A cubemap CUDA array is allocated if all three extents are non-zero and the + * ::CUDA_ARRAY3D_CUBEMAP flag is set. \p Width must be equal to \p Height, and + * \p Depth must be six. A cubemap is a special type of 2D layered CUDA array, + * where the six layers represent the six faces of a cube. The order of the six + * layers in memory is the same as that listed in ::CUarray_cubemap_face. + * - A cubemap layered CUDA array is allocated if all three extents are non-zero, + * and both, ::CUDA_ARRAY3D_CUBEMAP and ::CUDA_ARRAY3D_LAYERED flags are set. + * \p Width must be equal to \p Height, and \p Depth must be a multiple of six. + * A cubemap layered CUDA array is a special type of 2D layered CUDA array that + * consists of a collection of cubemaps. The first six layers represent the first + * cubemap, the next six layers form the second cubemap, and so on. + * + * - ::Format specifies the format of the elements; ::CUarray_format is + * defined as: + * \code + typedef enum CUarray_format_enum { + CU_AD_FORMAT_UNSIGNED_INT8 = 0x01, + CU_AD_FORMAT_UNSIGNED_INT16 = 0x02, + CU_AD_FORMAT_UNSIGNED_INT32 = 0x03, + CU_AD_FORMAT_SIGNED_INT8 = 0x08, + CU_AD_FORMAT_SIGNED_INT16 = 0x09, + CU_AD_FORMAT_SIGNED_INT32 = 0x0a, + CU_AD_FORMAT_HALF = 0x10, + CU_AD_FORMAT_FLOAT = 0x20 + } CUarray_format; + * \endcode + * + * - \p NumChannels specifies the number of packed components per CUDA array + * element; it may be 1, 2, or 4; + * + * - ::Flags may be set to + * - ::CUDA_ARRAY3D_LAYERED to enable creation of layered CUDA arrays. If this flag is set, + * \p Depth specifies the number of layers, not the depth of a 3D array. + * - ::CUDA_ARRAY3D_SURFACE_LDST to enable surface references to be bound to the CUDA array. + * If this flag is not set, ::cuSurfRefSetArray will fail when attempting to bind the CUDA array + * to a surface reference. + * - ::CUDA_ARRAY3D_CUBEMAP to enable creation of cubemaps. If this flag is set, \p Width must be + * equal to \p Height, and \p Depth must be six. If the ::CUDA_ARRAY3D_LAYERED flag is also set, + * then \p Depth must be a multiple of six. + * - ::CUDA_ARRAY3D_TEXTURE_GATHER to indicate that the CUDA array will be used for texture gather. + * Texture gather can only be performed on 2D CUDA arrays. + * + * \p Width, \p Height and \p Depth must meet certain size requirements as listed in the following table. + * All values are specified in elements. Note that for brevity's sake, the full name of the device attribute + * is not specified. For ex., TEXTURE1D_WIDTH refers to the device attribute + * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH. + * + * Note that 2D CUDA arrays have different size requirements if the ::CUDA_ARRAY3D_TEXTURE_GATHER flag + * is set. \p Width and \p Height must not be greater than ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH + * and ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT respectively, in that case. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
CUDA array typeValid extents that must always be met
{(width range in elements), (height range), + * (depth range)}
Valid extents with CUDA_ARRAY3D_SURFACE_LDST set
+ * {(width range in elements), (height range), (depth range)}
1D{ (1,TEXTURE1D_WIDTH), 0, 0 }{ (1,SURFACE1D_WIDTH), 0, 0 }
2D{ (1,TEXTURE2D_WIDTH), (1,TEXTURE2D_HEIGHT), 0 }{ (1,SURFACE2D_WIDTH), (1,SURFACE2D_HEIGHT), 0 }
3D{ (1,TEXTURE3D_WIDTH), (1,TEXTURE3D_HEIGHT), (1,TEXTURE3D_DEPTH) } + *
OR
{ (1,TEXTURE3D_WIDTH_ALTERNATE), (1,TEXTURE3D_HEIGHT_ALTERNATE), + * (1,TEXTURE3D_DEPTH_ALTERNATE) }
{ (1,SURFACE3D_WIDTH), (1,SURFACE3D_HEIGHT), + * (1,SURFACE3D_DEPTH) }
1D Layered{ (1,TEXTURE1D_LAYERED_WIDTH), 0, + * (1,TEXTURE1D_LAYERED_LAYERS) }{ (1,SURFACE1D_LAYERED_WIDTH), 0, + * (1,SURFACE1D_LAYERED_LAYERS) }
2D Layered{ (1,TEXTURE2D_LAYERED_WIDTH), (1,TEXTURE2D_LAYERED_HEIGHT), + * (1,TEXTURE2D_LAYERED_LAYERS) }{ (1,SURFACE2D_LAYERED_WIDTH), (1,SURFACE2D_LAYERED_HEIGHT), + * (1,SURFACE2D_LAYERED_LAYERS) }
Cubemap{ (1,TEXTURECUBEMAP_WIDTH), (1,TEXTURECUBEMAP_WIDTH), 6 }{ (1,SURFACECUBEMAP_WIDTH), + * (1,SURFACECUBEMAP_WIDTH), 6 }
Cubemap Layered{ (1,TEXTURECUBEMAP_LAYERED_WIDTH), (1,TEXTURECUBEMAP_LAYERED_WIDTH), + * (1,TEXTURECUBEMAP_LAYERED_LAYERS) }{ (1,SURFACECUBEMAP_LAYERED_WIDTH), (1,SURFACECUBEMAP_LAYERED_WIDTH), + * (1,SURFACECUBEMAP_LAYERED_LAYERS) }
+ * + * Here are examples of CUDA array descriptions: + * + * Description for a CUDA array of 2048 floats: + * \code + CUDA_ARRAY3D_DESCRIPTOR desc; + desc.Format = CU_AD_FORMAT_FLOAT; + desc.NumChannels = 1; + desc.Width = 2048; + desc.Height = 0; + desc.Depth = 0; + * \endcode + * + * Description for a 64 x 64 CUDA array of floats: + * \code + CUDA_ARRAY3D_DESCRIPTOR desc; + desc.Format = CU_AD_FORMAT_FLOAT; + desc.NumChannels = 1; + desc.Width = 64; + desc.Height = 64; + desc.Depth = 0; + * \endcode + * + * Description for a \p width x \p height x \p depth CUDA array of 64-bit, + * 4x16-bit float16's: + * \code + CUDA_ARRAY3D_DESCRIPTOR desc; + desc.Format = CU_AD_FORMAT_HALF; + desc.NumChannels = 4; + desc.Width = width; + desc.Height = height; + desc.Depth = depth; + * \endcode + * + * \param pHandle - Returned array + * \param pAllocateArray - 3D array descriptor + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMalloc3DArray + */ +CUresult CUDAAPI cuArray3DCreate(CUarray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR *pAllocateArray); + +/** + * \brief Get a 3D CUDA array descriptor + * + * Returns in \p *pArrayDescriptor a descriptor containing information on the + * format and dimensions of the CUDA array \p hArray. It is useful for + * subroutines that have been passed a CUDA array, but need to know the CUDA + * array parameters for validation or other purposes. + * + * This function may be called on 1D and 2D arrays, in which case the \p Height + * and/or \p Depth members of the descriptor struct will be set to 0. + * + * \param pArrayDescriptor - Returned 3D array descriptor + * \param hArray - 3D array to get descriptor of + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_CONTEXT_IS_DESTROYED + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaArrayGetInfo + */ +CUresult CUDAAPI cuArray3DGetDescriptor(CUDA_ARRAY3D_DESCRIPTOR *pArrayDescriptor, CUarray hArray); + +/** + * \brief Creates a CUDA mipmapped array + * + * Creates a CUDA mipmapped array according to the ::CUDA_ARRAY3D_DESCRIPTOR structure + * \p pMipmappedArrayDesc and returns a handle to the new CUDA mipmapped array in \p *pHandle. + * \p numMipmapLevels specifies the number of mipmap levels to be allocated. This value is + * clamped to the range [1, 1 + floor(log2(max(width, height, depth)))]. + * + * The ::CUDA_ARRAY3D_DESCRIPTOR is defined as: + * + * \code + typedef struct { + unsigned int Width; + unsigned int Height; + unsigned int Depth; + CUarray_format Format; + unsigned int NumChannels; + unsigned int Flags; + } CUDA_ARRAY3D_DESCRIPTOR; + * \endcode + * where: + * + * - \p Width, \p Height, and \p Depth are the width, height, and depth of the + * CUDA array (in elements); the following types of CUDA arrays can be allocated: + * - A 1D mipmapped array is allocated if \p Height and \p Depth extents are both zero. + * - A 2D mipmapped array is allocated if only \p Depth extent is zero. + * - A 3D mipmapped array is allocated if all three extents are non-zero. + * - A 1D layered CUDA mipmapped array is allocated if only \p Height is zero and the + * ::CUDA_ARRAY3D_LAYERED flag is set. Each layer is a 1D array. The number + * of layers is determined by the depth extent. + * - A 2D layered CUDA mipmapped array is allocated if all three extents are non-zero and + * the ::CUDA_ARRAY3D_LAYERED flag is set. Each layer is a 2D array. The number + * of layers is determined by the depth extent. + * - A cubemap CUDA mipmapped array is allocated if all three extents are non-zero and the + * ::CUDA_ARRAY3D_CUBEMAP flag is set. \p Width must be equal to \p Height, and + * \p Depth must be six. A cubemap is a special type of 2D layered CUDA array, + * where the six layers represent the six faces of a cube. The order of the six + * layers in memory is the same as that listed in ::CUarray_cubemap_face. + * - A cubemap layered CUDA mipmapped array is allocated if all three extents are non-zero, + * and both, ::CUDA_ARRAY3D_CUBEMAP and ::CUDA_ARRAY3D_LAYERED flags are set. + * \p Width must be equal to \p Height, and \p Depth must be a multiple of six. + * A cubemap layered CUDA array is a special type of 2D layered CUDA array that + * consists of a collection of cubemaps. The first six layers represent the first + * cubemap, the next six layers form the second cubemap, and so on. + * + * - ::Format specifies the format of the elements; ::CUarray_format is + * defined as: + * \code + typedef enum CUarray_format_enum { + CU_AD_FORMAT_UNSIGNED_INT8 = 0x01, + CU_AD_FORMAT_UNSIGNED_INT16 = 0x02, + CU_AD_FORMAT_UNSIGNED_INT32 = 0x03, + CU_AD_FORMAT_SIGNED_INT8 = 0x08, + CU_AD_FORMAT_SIGNED_INT16 = 0x09, + CU_AD_FORMAT_SIGNED_INT32 = 0x0a, + CU_AD_FORMAT_HALF = 0x10, + CU_AD_FORMAT_FLOAT = 0x20 + } CUarray_format; + * \endcode + * + * - \p NumChannels specifies the number of packed components per CUDA array + * element; it may be 1, 2, or 4; + * + * - ::Flags may be set to + * - ::CUDA_ARRAY3D_LAYERED to enable creation of layered CUDA mipmapped arrays. If this flag is set, + * \p Depth specifies the number of layers, not the depth of a 3D array. + * - ::CUDA_ARRAY3D_SURFACE_LDST to enable surface references to be bound to individual mipmap levels of + * the CUDA mipmapped array. If this flag is not set, ::cuSurfRefSetArray will fail when attempting to + * bind a mipmap level of the CUDA mipmapped array to a surface reference. + * - ::CUDA_ARRAY3D_CUBEMAP to enable creation of mipmapped cubemaps. If this flag is set, \p Width must be + * equal to \p Height, and \p Depth must be six. If the ::CUDA_ARRAY3D_LAYERED flag is also set, + * then \p Depth must be a multiple of six. + * - ::CUDA_ARRAY3D_TEXTURE_GATHER to indicate that the CUDA mipmapped array will be used for texture gather. + * Texture gather can only be performed on 2D CUDA mipmapped arrays. + * + * \p Width, \p Height and \p Depth must meet certain size requirements as listed in the following table. + * All values are specified in elements. Note that for brevity's sake, the full name of the device attribute + * is not specified. For ex., TEXTURE1D_MIPMAPPED_WIDTH refers to the device attribute + * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
CUDA array typeValid extents that must always be met
{(width range in elements), (height range), + * (depth range)}
Valid extents with CUDA_ARRAY3D_SURFACE_LDST set
+ * {(width range in elements), (height range), (depth range)}
1D{ (1,TEXTURE1D_MIPMAPPED_WIDTH), 0, 0 }{ (1,SURFACE1D_WIDTH), 0, 0 }
2D{ (1,TEXTURE2D_MIPMAPPED_WIDTH), (1,TEXTURE2D_MIPMAPPED_HEIGHT), 0 }{ (1,SURFACE2D_WIDTH), (1,SURFACE2D_HEIGHT), 0 }
3D{ (1,TEXTURE3D_WIDTH), (1,TEXTURE3D_HEIGHT), (1,TEXTURE3D_DEPTH) } + *
OR
{ (1,TEXTURE3D_WIDTH_ALTERNATE), (1,TEXTURE3D_HEIGHT_ALTERNATE), + * (1,TEXTURE3D_DEPTH_ALTERNATE) }
{ (1,SURFACE3D_WIDTH), (1,SURFACE3D_HEIGHT), + * (1,SURFACE3D_DEPTH) }
1D Layered{ (1,TEXTURE1D_LAYERED_WIDTH), 0, + * (1,TEXTURE1D_LAYERED_LAYERS) }{ (1,SURFACE1D_LAYERED_WIDTH), 0, + * (1,SURFACE1D_LAYERED_LAYERS) }
2D Layered{ (1,TEXTURE2D_LAYERED_WIDTH), (1,TEXTURE2D_LAYERED_HEIGHT), + * (1,TEXTURE2D_LAYERED_LAYERS) }{ (1,SURFACE2D_LAYERED_WIDTH), (1,SURFACE2D_LAYERED_HEIGHT), + * (1,SURFACE2D_LAYERED_LAYERS) }
Cubemap{ (1,TEXTURECUBEMAP_WIDTH), (1,TEXTURECUBEMAP_WIDTH), 6 }{ (1,SURFACECUBEMAP_WIDTH), + * (1,SURFACECUBEMAP_WIDTH), 6 }
Cubemap Layered{ (1,TEXTURECUBEMAP_LAYERED_WIDTH), (1,TEXTURECUBEMAP_LAYERED_WIDTH), + * (1,TEXTURECUBEMAP_LAYERED_LAYERS) }{ (1,SURFACECUBEMAP_LAYERED_WIDTH), (1,SURFACECUBEMAP_LAYERED_WIDTH), + * (1,SURFACECUBEMAP_LAYERED_LAYERS) }
+ * + * + * \param pHandle - Returned mipmapped array + * \param pMipmappedArrayDesc - mipmapped array descriptor + * \param numMipmapLevels - Number of mipmap levels + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa + * ::cuMipmappedArrayDestroy, + * ::cuMipmappedArrayGetLevel, + * ::cuArrayCreate, + * ::cudaMallocMipmappedArray + */ +CUresult CUDAAPI cuMipmappedArrayCreate(CUmipmappedArray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR *pMipmappedArrayDesc, unsigned int numMipmapLevels); + +/** + * \brief Gets a mipmap level of a CUDA mipmapped array + * + * Returns in \p *pLevelArray a CUDA array that represents a single mipmap level + * of the CUDA mipmapped array \p hMipmappedArray. + * + * If \p level is greater than the maximum number of levels in this mipmapped array, + * ::CUDA_ERROR_INVALID_VALUE is returned. + * + * \param pLevelArray - Returned mipmap level CUDA array + * \param hMipmappedArray - CUDA mipmapped array + * \param level - Mipmap level + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa + * ::cuMipmappedArrayCreate, + * ::cuMipmappedArrayDestroy, + * ::cuArrayCreate, + * ::cudaGetMipmappedArrayLevel + */ +CUresult CUDAAPI cuMipmappedArrayGetLevel(CUarray *pLevelArray, CUmipmappedArray hMipmappedArray, unsigned int level); + +/** + * \brief Destroys a CUDA mipmapped array + * + * Destroys the CUDA mipmapped array \p hMipmappedArray. + * + * \param hMipmappedArray - Mipmapped array to destroy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_ARRAY_IS_MAPPED, + * ::CUDA_ERROR_CONTEXT_IS_DESTROYED + * \notefnerr + * + * \sa + * ::cuMipmappedArrayCreate, + * ::cuMipmappedArrayGetLevel, + * ::cuArrayCreate, + * ::cudaFreeMipmappedArray + */ +CUresult CUDAAPI cuMipmappedArrayDestroy(CUmipmappedArray hMipmappedArray); + +/** +* \brief Retrieve handle for an address range +* +* Get a handle of the specified type to an address range. The address range +* must have been obtained by a prior call to either ::cuMemAlloc or ::cuMemAddressReserve. +* If the address range was obtained via ::cuMemAddressReserve, it must also be fully mapped via ::cuMemMap. +* +* Users must ensure the \p dptr and \p size are aligned to the host page size. +* +* When requesting CUmemRangeHandleType::CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD, +* users are expected to query for dma_buf support for the platform +* by using ::CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED device attribute before calling +* this API. The \p handle will be interpreted as a pointer to an integer to store the dma_buf file descriptor. +* Users must ensure the entire address range is backed and mapped when +* the address range is allocated by ::cuMemAddressReserve. All the physical +* allocations backing the address range must be resident on the same device and +* have identical allocation properties. Users are also expected to retrieve a +* new handle every time the underlying physical allocation(s) corresponding +* to a previously queried VA range are changed. +* +* \param[out] handle - Pointer to the location where the returned handle will be stored. +* \param[in] dptr - Pointer to a valid CUDA device allocation. Must be aligned to host page size. +* \param[in] size - Length of the address range. Must be aligned to host page size. +* \param[in] handleType - Type of handle requested (defines type and size of the \p handle output parameter) +* \param[in] flags - Reserved, must be zero +* +* \return +* CUDA_SUCCESS +* CUDA_ERROR_INVALID_VALUE +* CUDA_ERROR_NOT_SUPPORTED +*/ +CUresult CUDAAPI cuMemGetHandleForAddressRange(void *handle, CUdeviceptr dptr, size_t size, CUmemRangeHandleType handleType, unsigned long long flags); + +/** @} */ /* END CUDA_MEM */ + +/** + * \defgroup CUDA_VA Virtual Memory Management + * + * ___MANBRIEF___ virtual memory management functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the virtual memory management functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** +* \brief Allocate an address range reservation. +* +* Reserves a virtual address range based on the given parameters, giving +* the starting address of the range in \p ptr. This API requires a system that +* supports UVA. The size and address parameters must be a multiple of the +* host page size and the alignment must be a power of two or zero for default +* alignment. +* +* \param[out] ptr - Resulting pointer to start of virtual address range allocated +* \param[in] size - Size of the reserved virtual address range requested +* \param[in] alignment - Alignment of the reserved virtual address range requested +* \param[in] addr - Fixed starting address range requested +* \param[in] flags - Currently unused, must be zero +* \return +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_OUT_OF_MEMORY, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* +* \sa ::cuMemAddressFree +*/ +CUresult CUDAAPI cuMemAddressReserve(CUdeviceptr *ptr, size_t size, size_t alignment, CUdeviceptr addr, unsigned long long flags); + +/** +* \brief Free an address range reservation. +* +* Frees a virtual address range reserved by cuMemAddressReserve. The size +* must match what was given to memAddressReserve and the ptr given must +* match what was returned from memAddressReserve. +* +* \param[in] ptr - Starting address of the virtual address range to free +* \param[in] size - Size of the virtual address region to free +* \return +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* +* \sa ::cuMemAddressReserve +*/ +CUresult CUDAAPI cuMemAddressFree(CUdeviceptr ptr, size_t size); + +/** +* \brief Create a CUDA memory handle representing a memory allocation of a given size described by the given properties +* +* This creates a memory allocation on the target device specified through the +* \p prop structure. The created allocation will not have any device or host +* mappings. The generic memory \p handle for the allocation can be +* mapped to the address space of calling process via ::cuMemMap. This handle +* cannot be transmitted directly to other processes (see +* ::cuMemExportToShareableHandle). On Windows, the caller must also pass +* an LPSECURITYATTRIBUTE in \p prop to be associated with this handle which +* limits or allows access to this handle for a recipient process (see +* ::CUmemAllocationProp::win32HandleMetaData for more). The \p size of this +* allocation must be a multiple of the the value given via +* ::cuMemGetAllocationGranularity with the ::CU_MEM_ALLOC_GRANULARITY_MINIMUM +* flag. +* If ::CUmemAllocationProp::allocFlags::usage contains ::CU_MEM_CREATE_USAGE_TILE_POOL flag then +* the memory allocation is intended only to be used as backing tile pool for sparse CUDA arrays +* and sparse CUDA mipmapped arrays. +* (see ::cuMemMapArrayAsync). +* +* \param[out] handle - Value of handle returned. All operations on this allocation are to be performed using this handle. +* \param[in] size - Size of the allocation requested +* \param[in] prop - Properties of the allocation to create. +* \param[in] flags - flags for future use, must be zero now. +* \return +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_OUT_OF_MEMORY, +* ::CUDA_ERROR_INVALID_DEVICE, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* \notefnerr +* +* \sa ::cuMemRelease, ::cuMemExportToShareableHandle, ::cuMemImportFromShareableHandle +*/ +CUresult CUDAAPI cuMemCreate(CUmemGenericAllocationHandle *handle, size_t size, const CUmemAllocationProp *prop, unsigned long long flags); + +/** +* \brief Release a memory handle representing a memory allocation which was previously allocated through cuMemCreate. +* +* Frees the memory that was allocated on a device through cuMemCreate. +* +* The memory allocation will be freed when all outstanding mappings to the memory +* are unmapped and when all outstanding references to the handle (including it's +* shareable counterparts) are also released. The generic memory handle can be +* freed when there are still outstanding mappings made with this handle. Each +* time a recipient process imports a shareable handle, it needs to pair it with +* ::cuMemRelease for the handle to be freed. If \p handle is not a valid handle +* the behavior is undefined. +* +* \param[in] handle Value of handle which was returned previously by cuMemCreate. +* \return +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* \notefnerr +* +* \sa ::cuMemCreate +*/ +CUresult CUDAAPI cuMemRelease(CUmemGenericAllocationHandle handle); + +/** +* \brief Maps an allocation handle to a reserved virtual address range. +* +* Maps bytes of memory represented by \p handle starting from byte \p offset to +* \p size to address range [\p addr, \p addr + \p size]. This range must be an +* address reservation previously reserved with ::cuMemAddressReserve, and +* \p offset + \p size must be less than the size of the memory allocation. +* Both \p ptr, \p size, and \p offset must be a multiple of the value given via +* ::cuMemGetAllocationGranularity with the ::CU_MEM_ALLOC_GRANULARITY_MINIMUM flag. +* If \p handle represents a multicast object, \p ptr, \p size and \p offset must +* be aligned to the value returned by ::cuMulticastGetGranularity with the flag +* ::CU_MULTICAST_MINIMUM_GRANULARITY. For best performance however, it is +* recommended that \p ptr, \p size and \p offset be aligned to the value +* returned by ::cuMulticastGetGranularity with the flag +* ::CU_MULTICAST_RECOMMENDED_GRANULARITY. +* +* Please note calling ::cuMemMap does not make the address accessible, +* the caller needs to update accessibility of a contiguous mapped VA +* range by calling ::cuMemSetAccess. +* +* Once a recipient process obtains a shareable memory handle +* from ::cuMemImportFromShareableHandle, the process must +* use ::cuMemMap to map the memory into its address ranges before +* setting accessibility with ::cuMemSetAccess. +* +* ::cuMemMap can only create mappings on VA range reservations +* that are not currently mapped. +* +* \param[in] ptr - Address where memory will be mapped. +* \param[in] size - Size of the memory mapping. +* \param[in] offset - Offset into the memory represented by +* - \p handle from which to start mapping +* - Note: currently must be zero. +* \param[in] handle - Handle to a shareable memory +* \param[in] flags - flags for future use, must be zero now. +* \return +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_INVALID_DEVICE, +* ::CUDA_ERROR_OUT_OF_MEMORY, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* \notefnerr +* +* \sa ::cuMemUnmap, ::cuMemSetAccess, ::cuMemCreate, ::cuMemAddressReserve, ::cuMemImportFromShareableHandle +*/ +CUresult CUDAAPI cuMemMap(CUdeviceptr ptr, size_t size, size_t offset, CUmemGenericAllocationHandle handle, unsigned long long flags); + +/** + * \brief Maps or unmaps subregions of sparse CUDA arrays and sparse CUDA mipmapped arrays + * + * Performs map or unmap operations on subregions of sparse CUDA arrays and sparse CUDA mipmapped arrays. + * Each operation is specified by a ::CUarrayMapInfo entry in the \p mapInfoList array of size \p count. + * The structure ::CUarrayMapInfo is defined as follow: + \code + typedef struct CUarrayMapInfo_st { + CUresourcetype resourceType; + union { + CUmipmappedArray mipmap; + CUarray array; + } resource; + + CUarraySparseSubresourceType subresourceType; + union { + struct { + unsigned int level; + unsigned int layer; + unsigned int offsetX; + unsigned int offsetY; + unsigned int offsetZ; + unsigned int extentWidth; + unsigned int extentHeight; + unsigned int extentDepth; + } sparseLevel; + struct { + unsigned int layer; + unsigned long long offset; + unsigned long long size; + } miptail; + } subresource; + + CUmemOperationType memOperationType; + + CUmemHandleType memHandleType; + union { + CUmemGenericAllocationHandle memHandle; + } memHandle; + + unsigned long long offset; + unsigned int deviceBitMask; + unsigned int flags; + unsigned int reserved[2]; + } CUarrayMapInfo; + \endcode + * + * where ::CUarrayMapInfo::resourceType specifies the type of resource to be operated on. + * If ::CUarrayMapInfo::resourceType is set to ::CUresourcetype::CU_RESOURCE_TYPE_ARRAY then + * ::CUarrayMapInfo::resource::array must be set to a valid sparse CUDA array handle. + * The CUDA array must be either a 2D, 2D layered or 3D CUDA array and must have been allocated using + * ::cuArrayCreate or ::cuArray3DCreate with the flag ::CUDA_ARRAY3D_SPARSE + * or ::CUDA_ARRAY3D_DEFERRED_MAPPING. + * For CUDA arrays obtained using ::cuMipmappedArrayGetLevel, ::CUDA_ERROR_INVALID_VALUE will be returned. + * If ::CUarrayMapInfo::resourceType is set to ::CUresourcetype::CU_RESOURCE_TYPE_MIPMAPPED_ARRAY + * then ::CUarrayMapInfo::resource::mipmap must be set to a valid sparse CUDA mipmapped array handle. + * The CUDA mipmapped array must be either a 2D, 2D layered or 3D CUDA mipmapped array and must have been + * allocated using ::cuMipmappedArrayCreate with the flag ::CUDA_ARRAY3D_SPARSE + * or ::CUDA_ARRAY3D_DEFERRED_MAPPING. + * + * ::CUarrayMapInfo::subresourceType specifies the type of subresource within the resource. + * ::CUarraySparseSubresourceType_enum is defined as: + \code + typedef enum CUarraySparseSubresourceType_enum { + CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL = 0, + CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL = 1 + } CUarraySparseSubresourceType; + \endcode + * + * where ::CUarraySparseSubresourceType::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL indicates a + * sparse-miplevel which spans at least one tile in every dimension. The remaining miplevels which + * are too small to span at least one tile in any dimension constitute the mip tail region as indicated by + * ::CUarraySparseSubresourceType::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL subresource type. + * + * If ::CUarrayMapInfo::subresourceType is set to ::CUarraySparseSubresourceType::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL + * then ::CUarrayMapInfo::subresource::sparseLevel struct must contain valid array subregion offsets and extents. + * The ::CUarrayMapInfo::subresource::sparseLevel::offsetX, ::CUarrayMapInfo::subresource::sparseLevel::offsetY + * and ::CUarrayMapInfo::subresource::sparseLevel::offsetZ must specify valid X, Y and Z offsets respectively. + * The ::CUarrayMapInfo::subresource::sparseLevel::extentWidth, ::CUarrayMapInfo::subresource::sparseLevel::extentHeight + * and ::CUarrayMapInfo::subresource::sparseLevel::extentDepth must specify valid width, height and depth extents respectively. + * These offsets and extents must be aligned to the corresponding tile dimension. + * For CUDA mipmapped arrays ::CUarrayMapInfo::subresource::sparseLevel::level must specify a valid mip level index. Otherwise, + * must be zero. + * For layered CUDA arrays and layered CUDA mipmapped arrays ::CUarrayMapInfo::subresource::sparseLevel::layer must specify a valid layer index. Otherwise, + * must be zero. + * ::CUarrayMapInfo::subresource::sparseLevel::offsetZ must be zero and ::CUarrayMapInfo::subresource::sparseLevel::extentDepth + * must be set to 1 for 2D and 2D layered CUDA arrays and CUDA mipmapped arrays. + * Tile extents can be obtained by calling ::cuArrayGetSparseProperties and ::cuMipmappedArrayGetSparseProperties + * + * If ::CUarrayMapInfo::subresourceType is set to ::CUarraySparseSubresourceType::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL + * then ::CUarrayMapInfo::subresource::miptail struct must contain valid mip tail offset in + * ::CUarrayMapInfo::subresource::miptail::offset and size in ::CUarrayMapInfo::subresource::miptail::size. + * Both, mip tail offset and mip tail size must be aligned to the tile size. + * For layered CUDA mipmapped arrays which don't have the flag ::CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL set in ::CUDA_ARRAY_SPARSE_PROPERTIES::flags + * as returned by ::cuMipmappedArrayGetSparseProperties, ::CUarrayMapInfo::subresource::miptail::layer must specify a valid layer index. + * Otherwise, must be zero. + * + * If ::CUarrayMapInfo::resource::array or ::CUarrayMapInfo::resource::mipmap was created with ::CUDA_ARRAY3D_DEFERRED_MAPPING + * flag set the ::CUarrayMapInfo::subresourceType and the contents of ::CUarrayMapInfo::subresource will be ignored. + * + * ::CUarrayMapInfo::memOperationType specifies the type of operation. ::CUmemOperationType is defined as: + \code + typedef enum CUmemOperationType_enum { + CU_MEM_OPERATION_TYPE_MAP = 1, + CU_MEM_OPERATION_TYPE_UNMAP = 2 + } CUmemOperationType; + \endcode + * If ::CUarrayMapInfo::memOperationType is set to ::CUmemOperationType::CU_MEM_OPERATION_TYPE_MAP then the subresource + * will be mapped onto the tile pool memory specified by ::CUarrayMapInfo::memHandle at offset ::CUarrayMapInfo::offset. + * The tile pool allocation has to be created by specifying the ::CU_MEM_CREATE_USAGE_TILE_POOL flag when calling ::cuMemCreate. Also, + * ::CUarrayMapInfo::memHandleType must be set to ::CUmemHandleType::CU_MEM_HANDLE_TYPE_GENERIC. + * + * If ::CUarrayMapInfo::memOperationType is set to ::CUmemOperationType::CU_MEM_OPERATION_TYPE_UNMAP then an unmapping operation + * is performed. ::CUarrayMapInfo::memHandle must be NULL. + * + * ::CUarrayMapInfo::deviceBitMask specifies the list of devices that must map or unmap physical memory. + * Currently, this mask must have exactly one bit set, and the corresponding device must match the device associated with the stream. + * If ::CUarrayMapInfo::memOperationType is set to ::CUmemOperationType::CU_MEM_OPERATION_TYPE_MAP, the device must also match + * the device associated with the tile pool memory allocation as specified by ::CUarrayMapInfo::memHandle. + * + * ::CUarrayMapInfo::flags and ::CUarrayMapInfo::reserved[] are unused and must be set to zero. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * + * \param[in] mapInfoList - List of ::CUarrayMapInfo + * \param[in] count - Count of ::CUarrayMapInfo in \p mapInfoList + * \param[in] hStream - Stream identifier for the stream to use for map or unmap operations + * + * \sa ::cuMipmappedArrayCreate, ::cuArrayCreate, ::cuArray3DCreate, ::cuMemCreate, ::cuArrayGetSparseProperties, ::cuMipmappedArrayGetSparseProperties + */ +CUresult CUDAAPI cuMemMapArrayAsync(CUarrayMapInfo *mapInfoList, unsigned int count, CUstream hStream); + +/** +* \brief Unmap the backing memory of a given address range. +* +* The range must be the entire contiguous address range that was mapped to. In +* other words, ::cuMemUnmap cannot unmap a sub-range of an address range mapped +* by ::cuMemCreate / ::cuMemMap. Any backing memory allocations will be freed +* if there are no existing mappings and there are no unreleased memory handles. +* +* When ::cuMemUnmap returns successfully the address range is converted to an +* address reservation and can be used for a future calls to ::cuMemMap. Any new +* mapping to this virtual address will need to have access granted through +* ::cuMemSetAccess, as all mappings start with no accessibility setup. +* +* \param[in] ptr - Starting address for the virtual address range to unmap +* \param[in] size - Size of the virtual address range to unmap +* \returns +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* \notefnerr +* \note_sync +* +* \sa ::cuMemCreate, ::cuMemAddressReserve +*/ +CUresult CUDAAPI cuMemUnmap(CUdeviceptr ptr, size_t size); + +/** +* \brief Set the access flags for each location specified in \p desc for the given virtual address range +* +* Given the virtual address range via \p ptr and \p size, and the locations +* in the array given by \p desc and \p count, set the access flags for the +* target locations. The range must be a fully mapped address range +* containing all allocations created by ::cuMemMap / ::cuMemCreate. +* When setting the access flags for a virtual address range mapping a multicast +* object, \p ptr and \p size must be aligned to the value returned by +* ::cuMulticastGetGranularity with the flag ::CU_MULTICAST_MINIMUM_GRANULARITY. +* For best performance however, it is recommended that \p ptr and \p size be +* aligned to the value returned by ::cuMulticastGetGranularity with the flag +* ::CU_MULTICAST_RECOMMENDED_GRANULARITY. +* +* \param[in] ptr - Starting address for the virtual address range +* \param[in] size - Length of the virtual address range +* \param[in] desc - Array of ::CUmemAccessDesc that describe how to change the +* - mapping for each location specified +* \param[in] count - Number of ::CUmemAccessDesc in \p desc +* \returns +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_INVALID_DEVICE, +* ::CUDA_ERROR_NOT_SUPPORTED +* \notefnerr +* \note_sync +* +* \sa ::cuMemSetAccess, ::cuMemCreate, :cuMemMap +*/ +CUresult CUDAAPI cuMemSetAccess(CUdeviceptr ptr, size_t size, const CUmemAccessDesc *desc, size_t count); + +/** +* \brief Get the access \p flags set for the given \p location and \p ptr +* +* \param[out] flags - Flags set for this location +* \param[in] location - Location in which to check the flags for +* \param[in] ptr - Address in which to check the access flags for +* \returns +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_INVALID_DEVICE, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* +* \sa ::cuMemSetAccess +*/ +CUresult CUDAAPI cuMemGetAccess(unsigned long long *flags, const CUmemLocation *location, CUdeviceptr ptr); + +/** +* \brief Exports an allocation to a requested shareable handle type +* +* Given a CUDA memory handle, create a shareable memory +* allocation handle that can be used to share the memory with other +* processes. The recipient process can convert the shareable handle back into a +* CUDA memory handle using ::cuMemImportFromShareableHandle and map +* it with ::cuMemMap. The implementation of what this handle is and how it +* can be transferred is defined by the requested handle type in \p handleType +* +* Once all shareable handles are closed and the allocation is released, the allocated +* memory referenced will be released back to the OS and uses of the CUDA handle afterward +* will lead to undefined behavior. +* +* This API can also be used in conjunction with other APIs (e.g. Vulkan, OpenGL) +* that support importing memory from the shareable type +* +* \param[out] shareableHandle - Pointer to the location in which to store the requested handle type +* \param[in] handle - CUDA handle for the memory allocation +* \param[in] handleType - Type of shareable handle requested (defines type and size of the \p shareableHandle output parameter) +* \param[in] flags - Reserved, must be zero +* \returns +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* +* \sa ::cuMemImportFromShareableHandle +*/ +CUresult CUDAAPI cuMemExportToShareableHandle(void *shareableHandle, CUmemGenericAllocationHandle handle, CUmemAllocationHandleType handleType, unsigned long long flags); + +/** +* \brief Imports an allocation from a requested shareable handle type. +* +* If the current process cannot support the memory described by this shareable +* handle, this API will error as CUDA_ERROR_NOT_SUPPORTED. +* +* \note Importing shareable handles exported from some graphics APIs(VUlkan, OpenGL, etc) +* created on devices under an SLI group may not be supported, and thus this API will +* return CUDA_ERROR_NOT_SUPPORTED. +* There is no guarantee that the contents of \p handle will be the same CUDA memory handle +* for the same given OS shareable handle, or the same underlying allocation. +* +* \param[out] handle - CUDA Memory handle for the memory allocation. +* \param[in] osHandle - Shareable Handle representing the memory allocation that is to be imported. +* \param[in] shHandleType - handle type of the exported handle ::CUmemAllocationHandleType. +* \returns +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* +* \sa ::cuMemExportToShareableHandle, ::cuMemMap, ::cuMemRelease +*/ +CUresult CUDAAPI cuMemImportFromShareableHandle(CUmemGenericAllocationHandle *handle, void *osHandle, CUmemAllocationHandleType shHandleType); + +/** +* \brief Calculates either the minimal or recommended granularity +* +* Calculates either the minimal or recommended granularity +* for a given allocation specification and returns it in granularity. This +* granularity can be used as a multiple for alignment, size, or address mapping. +* +* \param[out] granularity Returned granularity. +* \param[in] prop Property for which to determine the granularity for +* \param[in] option Determines which granularity to return +* \returns +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* +* \sa ::cuMemCreate, ::cuMemMap +*/ +CUresult CUDAAPI cuMemGetAllocationGranularity(size_t *granularity, const CUmemAllocationProp *prop, CUmemAllocationGranularity_flags option); + +/** +* \brief Retrieve the contents of the property structure defining properties for this handle +* +* \param[out] prop - Pointer to a properties structure which will hold the information about this handle +* \param[in] handle - Handle which to perform the query on +* \returns +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* +* \sa ::cuMemCreate, ::cuMemImportFromShareableHandle +*/ +CUresult CUDAAPI cuMemGetAllocationPropertiesFromHandle(CUmemAllocationProp *prop, CUmemGenericAllocationHandle handle); + +/** +* \brief Given an address \p addr, returns the allocation handle of the backing memory allocation. +* +* The handle is guaranteed to be the same handle value used to map the memory. If the address +* requested is not mapped, the function will fail. The returned handle must be released with +* corresponding number of calls to ::cuMemRelease. +* +* \note The address \p addr, can be any address in a range previously mapped +* by ::cuMemMap, and not necessarily the start address. +* +* \param[out] handle CUDA Memory handle for the backing memory allocation. +* \param[in] addr Memory address to query, that has been mapped previously. +* \returns +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* +* \sa ::cuMemCreate, ::cuMemRelease, ::cuMemMap +*/ +CUresult CUDAAPI cuMemRetainAllocationHandle(CUmemGenericAllocationHandle *handle, void *addr); + +/** @} */ /* END CUDA_VA */ + +/** + * \defgroup CUDA_MALLOC_ASYNC Stream Ordered Memory Allocator + * + * ___MANBRIEF___ Functions for performing allocation and free operations in stream order. + * Functions for controlling the behavior of the underlying allocator. + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the stream ordered memory allocator exposed by the + * low-level CUDA driver application programming interface. + * + * @{ + * + * \section CUDA_MALLOC_ASYNC_overview overview + * + * The asynchronous allocator allows the user to allocate and free in stream order. + * All asynchronous accesses of the allocation must happen between + * the stream executions of the allocation and the free. If the memory is accessed + * outside of the promised stream order, a use before allocation / use after free error + * will cause undefined behavior. + * + * The allocator is free to reallocate the memory as long as it can guarantee + * that compliant memory accesses will not overlap temporally. + * The allocator may refer to internal stream ordering as well as inter-stream dependencies + * (such as CUDA events and null stream dependencies) when establishing the temporal guarantee. + * The allocator may also insert inter-stream dependencies to establish the temporal guarantee. + * + * \section CUDA_MALLOC_ASYNC_support Supported Platforms + * + * Whether or not a device supports the integrated stream ordered memory allocator + * may be queried by calling ::cuDeviceGetAttribute() with the device attribute + * ::CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED + */ + +/** + * \brief Frees memory with stream ordered semantics + * + * Inserts a free operation into \p hStream. + * The allocation must not be accessed after stream execution reaches the free. + * After this API returns, accessing the memory from any subsequent work launched on the GPU + * or querying its pointer attributes results in undefined behavior. + * + * \note During stream capture, this function results in the creation of a free node and + * must therefore be passed the address of a graph allocation. + * + * \param dptr - memory to free + * \param hStream - The stream establishing the stream ordering contract. + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT (default stream specified with no current context), + * ::CUDA_ERROR_NOT_SUPPORTED + */ +CUresult CUDAAPI cuMemFreeAsync(CUdeviceptr dptr, CUstream hStream); + +/** + * \brief Allocates memory with stream ordered semantics + * + * Inserts an allocation operation into \p hStream. + * A pointer to the allocated memory is returned immediately in *dptr. + * The allocation must not be accessed until the the allocation operation completes. + * The allocation comes from the memory pool current to the stream's device. + * + * \note The default memory pool of a device contains device memory from that device. + * \note Basic stream ordering allows future work submitted into the same stream to use the allocation. + * Stream query, stream synchronize, and CUDA events can be used to guarantee that the allocation + * operation completes before work submitted in a separate stream runs. + * \note During stream capture, this function results in the creation of an allocation node. In this case, + * the allocation is owned by the graph instead of the memory pool. The memory pool's properties + * are used to set the node's creation parameters. + * + * \param[out] dptr - Returned device pointer + * \param[in] bytesize - Number of bytes to allocate + * \param[in] hStream - The stream establishing the stream ordering contract and the memory pool to allocate from + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT (default stream specified with no current context), + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_OUT_OF_MEMORY + * + * \sa ::cuMemAllocFromPoolAsync, ::cuMemFreeAsync, ::cuDeviceSetMemPool, + * ::cuDeviceGetDefaultMemPool, ::cuDeviceGetMemPool, ::cuMemPoolCreate, + * ::cuMemPoolSetAccess, ::cuMemPoolSetAttribute + */ +CUresult CUDAAPI cuMemAllocAsync(CUdeviceptr *dptr, size_t bytesize, CUstream hStream); + +/** + * \brief Tries to release memory back to the OS + * + * Releases memory back to the OS until the pool contains fewer than minBytesToKeep + * reserved bytes, or there is no more memory that the allocator can safely release. + * The allocator cannot release OS allocations that back outstanding asynchronous allocations. + * The OS allocations may happen at different granularity from the user allocations. + * + * \note: Allocations that have not been freed count as outstanding. + * \note: Allocations that have been asynchronously freed but whose completion has + * not been observed on the host (eg. by a synchronize) can count as outstanding. + * + * \param[in] pool - The memory pool to trim + * \param[in] minBytesToKeep - If the pool has less than minBytesToKeep reserved, + * the TrimTo operation is a no-op. Otherwise the pool will be guaranteed to have + * at least minBytesToKeep bytes reserved after the operation. + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool, + * ::cuDeviceGetMemPool, ::cuMemPoolCreate + */ +CUresult CUDAAPI cuMemPoolTrimTo(CUmemoryPool pool, size_t minBytesToKeep); + +/** + * \brief Sets attributes of a memory pool + * + * Supported attributes are: + * - ::CU_MEMPOOL_ATTR_RELEASE_THRESHOLD: (value type = cuuint64_t) + * Amount of reserved memory in bytes to hold onto before trying + * to release memory back to the OS. When more than the release + * threshold bytes of memory are held by the memory pool, the + * allocator will try to release memory back to the OS on the + * next call to stream, event or context synchronize. (default 0) + * - ::CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES: (value type = int) + * Allow ::cuMemAllocAsync to use memory asynchronously freed + * in another stream as long as a stream ordering dependency + * of the allocating stream on the free action exists. + * Cuda events and null stream interactions can create the required + * stream ordered dependencies. (default enabled) + * - ::CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC: (value type = int) + * Allow reuse of already completed frees when there is no dependency + * between the free and allocation. (default enabled) + * - ::CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES: (value type = int) + * Allow ::cuMemAllocAsync to insert new stream dependencies + * in order to establish the stream ordering required to reuse + * a piece of memory released by ::cuMemFreeAsync (default enabled). + * - ::CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH: (value type = cuuint64_t) + * Reset the high watermark that tracks the amount of backing memory that was + * allocated for the memory pool. It is illegal to set this attribute to a non-zero value. + * - ::CU_MEMPOOL_ATTR_USED_MEM_HIGH: (value type = cuuint64_t) + * Reset the high watermark that tracks the amount of used memory that was + * allocated for the memory pool. + * + * \param[in] pool - The memory pool to modify + * \param[in] attr - The attribute to modify + * \param[in] value - Pointer to the value to assign + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool, + * ::cuDeviceGetMemPool, ::cuMemPoolCreate + */ +CUresult CUDAAPI cuMemPoolSetAttribute(CUmemoryPool pool, CUmemPool_attribute attr, void *value); + +/** + * \brief Gets attributes of a memory pool + * + * Supported attributes are: + * - ::CU_MEMPOOL_ATTR_RELEASE_THRESHOLD: (value type = cuuint64_t) + * Amount of reserved memory in bytes to hold onto before trying + * to release memory back to the OS. When more than the release + * threshold bytes of memory are held by the memory pool, the + * allocator will try to release memory back to the OS on the + * next call to stream, event or context synchronize. (default 0) + * - ::CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES: (value type = int) + * Allow ::cuMemAllocAsync to use memory asynchronously freed + * in another stream as long as a stream ordering dependency + * of the allocating stream on the free action exists. + * Cuda events and null stream interactions can create the required + * stream ordered dependencies. (default enabled) + * - ::CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC: (value type = int) + * Allow reuse of already completed frees when there is no dependency + * between the free and allocation. (default enabled) + * - ::CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES: (value type = int) + * Allow ::cuMemAllocAsync to insert new stream dependencies + * in order to establish the stream ordering required to reuse + * a piece of memory released by ::cuMemFreeAsync (default enabled). + * - ::CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT: (value type = cuuint64_t) + * Amount of backing memory currently allocated for the mempool + * - ::CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH: (value type = cuuint64_t) + * High watermark of backing memory allocated for the mempool since the + * last time it was reset. + * - ::CU_MEMPOOL_ATTR_USED_MEM_CURRENT: (value type = cuuint64_t) + * Amount of memory from the pool that is currently in use by the application. + * - ::CU_MEMPOOL_ATTR_USED_MEM_HIGH: (value type = cuuint64_t) + * High watermark of the amount of memory from the pool that was in use by the application. + * + * \param[in] pool - The memory pool to get attributes of + * \param[in] attr - The attribute to get + * \param[out] value - Retrieved value + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool, + * ::cuDeviceGetMemPool, ::cuMemPoolCreate + */ +CUresult CUDAAPI cuMemPoolGetAttribute(CUmemoryPool pool, CUmemPool_attribute attr, void *value); + +/** + * \brief Controls visibility of pools between devices + * + * \param[in] pool - The pool being modified + * \param[in] map - Array of access descriptors. Each descriptor instructs the access to enable for a single gpu. + * \param[in] count - Number of descriptors in the map array. + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool, + * ::cuDeviceGetMemPool, ::cuMemPoolCreate + */ +CUresult CUDAAPI cuMemPoolSetAccess(CUmemoryPool pool, const CUmemAccessDesc *map, size_t count); + +/** + * \brief Returns the accessibility of a pool from a device + * + * Returns the accessibility of the pool's memory from the specified location. + * + * \param[out] flags - the accessibility of the pool from the specified location + * \param[in] memPool - the pool being queried + * \param[in] location - the location accessing the pool + * + * \sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool, + * ::cuDeviceGetMemPool, ::cuMemPoolCreate + */ +CUresult CUDAAPI cuMemPoolGetAccess(CUmemAccess_flags *flags, CUmemoryPool memPool, CUmemLocation *location); + +/** + * \brief Creates a memory pool + * + * Creates a CUDA memory pool and returns the handle in \p pool. The \p poolProps determines + * the properties of the pool such as the backing device and IPC capabilities. + * + * By default, the pool's memory will be accessible from the device it is allocated on. + * + * \note Specifying CU_MEM_HANDLE_TYPE_NONE creates a memory pool that will not support IPC. + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_NOT_SUPPORTED + * + * \sa ::cuDeviceSetMemPool, ::cuDeviceGetMemPool, ::cuDeviceGetDefaultMemPool, + * ::cuMemAllocFromPoolAsync, ::cuMemPoolExportToShareableHandle + */ +CUresult CUDAAPI cuMemPoolCreate(CUmemoryPool *pool, const CUmemPoolProps *poolProps); + +/** + * \brief Destroys the specified memory pool + * + * If any pointers obtained from this pool haven't been freed or + * the pool has free operations that haven't completed + * when ::cuMemPoolDestroy is invoked, the function will return immediately and the + * resources associated with the pool will be released automatically + * once there are no more outstanding allocations. + * + * Destroying the current mempool of a device sets the default mempool of + * that device as the current mempool for that device. + * + * \note A device's default memory pool cannot be destroyed. + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuMemFreeAsync, ::cuDeviceSetMemPool, ::cuDeviceGetMemPool, + * ::cuDeviceGetDefaultMemPool, ::cuMemPoolCreate + */ +CUresult CUDAAPI cuMemPoolDestroy(CUmemoryPool pool); + +/** + * \brief Allocates memory from a specified pool with stream ordered semantics. + * + * Inserts an allocation operation into \p hStream. + * A pointer to the allocated memory is returned immediately in *dptr. + * The allocation must not be accessed until the the allocation operation completes. + * The allocation comes from the specified memory pool. + * + * \note + * - The specified memory pool may be from a device different than that of the specified \p hStream. + * + * - Basic stream ordering allows future work submitted into the same stream to use the allocation. + * Stream query, stream synchronize, and CUDA events can be used to guarantee that the allocation + * operation completes before work submitted in a separate stream runs. + * + * \note During stream capture, this function results in the creation of an allocation node. In this case, + * the allocation is owned by the graph instead of the memory pool. The memory pool's properties + * are used to set the node's creation parameters. + * + * \param[out] dptr - Returned device pointer + * \param[in] bytesize - Number of bytes to allocate + * \param[in] pool - The pool to allocate from + * \param[in] hStream - The stream establishing the stream ordering semantic + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT (default stream specified with no current context), + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_OUT_OF_MEMORY + * + * \sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool, + * ::cuDeviceGetMemPool, ::cuMemPoolCreate, ::cuMemPoolSetAccess, + * ::cuMemPoolSetAttribute + */ +CUresult CUDAAPI cuMemAllocFromPoolAsync(CUdeviceptr *dptr, size_t bytesize, CUmemoryPool pool, CUstream hStream); + +/** + * \brief Exports a memory pool to the requested handle type. + * + * Given an IPC capable mempool, create an OS handle to share the pool with another process. + * A recipient process can convert the shareable handle into a mempool with ::cuMemPoolImportFromShareableHandle. + * Individual pointers can then be shared with the ::cuMemPoolExportPointer and ::cuMemPoolImportPointer APIs. + * The implementation of what the shareable handle is and how it can be transferred is defined by the requested + * handle type. + * + * \note: To create an IPC capable mempool, create a mempool with a CUmemAllocationHandleType other than CU_MEM_HANDLE_TYPE_NONE. + * + * \param[out] handle_out - Returned OS handle + * \param[in] pool - pool to export + * \param[in] handleType - the type of handle to create + * \param[in] flags - must be 0 + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_OUT_OF_MEMORY + * + * \sa ::cuMemPoolImportFromShareableHandle, ::cuMemPoolExportPointer, + * ::cuMemPoolImportPointer, ::cuMemAllocAsync, ::cuMemFreeAsync, + * ::cuDeviceGetDefaultMemPool, ::cuDeviceGetMemPool, ::cuMemPoolCreate, + * ::cuMemPoolSetAccess, ::cuMemPoolSetAttribute + */ +CUresult CUDAAPI cuMemPoolExportToShareableHandle(void *handle_out, CUmemoryPool pool, CUmemAllocationHandleType handleType, unsigned long long flags); + +/** + * \brief imports a memory pool from a shared handle. + * + * Specific allocations can be imported from the imported pool with cuMemPoolImportPointer. + * + * \note Imported memory pools do not support creating new allocations. + * As such imported memory pools may not be used in cuDeviceSetMemPool + * or ::cuMemAllocFromPoolAsync calls. + * + * \param[out] pool_out - Returned memory pool + * \param[in] handle - OS handle of the pool to open + * \param[in] handleType - The type of handle being imported + * \param[in] flags - must be 0 + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_OUT_OF_MEMORY + * + * \sa ::cuMemPoolExportToShareableHandle, ::cuMemPoolExportPointer, ::cuMemPoolImportPointer + */ +CUresult CUDAAPI cuMemPoolImportFromShareableHandle( + CUmemoryPool *pool_out, + void *handle, + CUmemAllocationHandleType handleType, + unsigned long long flags); + +/** + * \brief Export data to share a memory pool allocation between processes. + * + * Constructs \p shareData_out for sharing a specific allocation from an already shared memory pool. + * The recipient process can import the allocation with the ::cuMemPoolImportPointer api. + * The data is not a handle and may be shared through any IPC mechanism. + * + * \param[out] shareData_out - Returned export data + * \param[in] ptr - pointer to memory being exported + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_OUT_OF_MEMORY + * + * \sa ::cuMemPoolExportToShareableHandle, ::cuMemPoolImportFromShareableHandle, ::cuMemPoolImportPointer + */ +CUresult CUDAAPI cuMemPoolExportPointer(CUmemPoolPtrExportData *shareData_out, CUdeviceptr ptr); + +/** + * \brief Import a memory pool allocation from another process. + * + * Returns in \p ptr_out a pointer to the imported memory. + * The imported memory must not be accessed before the allocation operation completes + * in the exporting process. The imported memory must be freed from all importing processes before + * being freed in the exporting process. The pointer may be freed with cuMemFree + * or cuMemFreeAsync. If cuMemFreeAsync is used, the free must be completed + * on the importing process before the free operation on the exporting process. + * + * \note The cuMemFreeAsync api may be used in the exporting process before + * the cuMemFreeAsync operation completes in its stream as long as the + * cuMemFreeAsync in the exporting process specifies a stream with + * a stream dependency on the importing process's cuMemFreeAsync. + * + * \param[out] ptr_out - pointer to imported memory + * \param[in] pool - pool from which to import + * \param[in] shareData - data specifying the memory to import + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_OUT_OF_MEMORY + * + * \sa ::cuMemPoolExportToShareableHandle, ::cuMemPoolImportFromShareableHandle, ::cuMemPoolExportPointer + */ +CUresult CUDAAPI cuMemPoolImportPointer(CUdeviceptr *ptr_out, CUmemoryPool pool, CUmemPoolPtrExportData *shareData); + +/** @} */ /* END CUDA_MALLOC_ASYNC */ + +/** + * \defgroup CUDA_MULTICAST Multicast Object Management + * + * ___MANBRIEF___ Functions for creating multicast objects, adding devices to them and binding/unbinding memory + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the CUDA multicast object operations exposed by the + * low-level CUDA driver application programming interface. + * + * @{ + * + * \section CUDA_MULTICAST_overview overview + * + * A multicast object created via ::cuMulticastCreate enables certain memory + * operations to be broadcasted to a team of devices. Devices can be added to a + * multicast object via ::cuMulticastAddDevice. Memory can be bound on each + * participating device via either ::cuMulticastBindMem or ::cuMulticastBindAddr. + * Multicast objects can be mapped into a device's virtual address space using + * the virtual memmory management APIs (see ::cuMemMap and ::cuMemSetAccess). + * + * \section CUDA_MULTICAST_support Supported Platforms + * + * Support for multicast on a specific device can be queried using the device + * attribute ::CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED + */ + +/** + * \brief Create a generic allocation handle representing a multicast object described by the given properties. + * + * This creates a multicast object as described by \p prop. The number of + * participating devices is specified by ::CUmulticastObjectProp::numDevices. + * Devices can be added to the multicast object via ::cuMulticastAddDevice. + * All participating devices must be added to the multicast object before memory + * can be bound to it. Memory is bound to the multicast object via either + * ::cuMulticastBindMem or ::cuMulticastBindAddr, and can be unbound via + * ::cuMulticastUnbind. The total amount of memory that can be bound per device + * is specified by :CUmulticastObjectProp::size. This size must be a multiple of + * the value returned by ::cuMulticastGetGranularity with the flag + * ::CU_MULTICAST_GRANULARITY_MINIMUM. For best performance however, the size + * should be aligned to the value returned by ::cuMulticastGetGranularity with + * the flag ::CU_MULTICAST_GRANULARITY_RECOMMENDED. + * + * After all participating devices have been added, multicast objects can also + * be mapped to a device's virtual address space using the virtual memory + * management APIs (see ::cuMemMap and ::cuMemSetAccess). Multicast objects can + * also be shared with other processes by requesting a shareable handle via + * ::cuMemExportToShareableHandle. Note that the desired types of shareable + * handles must be specified in the bitmask ::CUmulticastObjectProp::handleTypes. + * Multicast objects can be released using the virtual memory management API + * ::cuMemRelease. + * + * \param[out] mcHandle Value of handle returned. + * \param[in] prop Properties of the multicast object to create. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_PERMITTED, + * ::CUDA_ERROR_NOT_SUPPORTED + * + * \sa ::cuMulticastAddDevice, ::cuMulticastBindMem, ::cuMulticastBindAddr, ::cuMulticastUnbind + * \sa ::cuMemCreate, ::cuMemRelease, ::cuMemExportToShareableHandle, ::cuMemImportFromShareableHandle + */ +CUresult CUDAAPI cuMulticastCreate(CUmemGenericAllocationHandle *mcHandle, const CUmulticastObjectProp *prop); + +/** + * \brief Associate a device to a multicast object. + * + * Associates a device to a multicast object. The added device will be a part of + * the multicast team of size specified by CUmulticastObjectProp::numDevices + * during ::cuMulticastCreate. + * The association of the device to the multicast object is permanent during + * the life time of the multicast object. + * All devices must be added to the multicast team before any memory can be + * bound to any device in the team. Any calls to ::cuMulticastBindMem or + * ::cuMulticastBindAddr will block until all devices have been added. + * Similarly all devices must be added to the multicast team before a virtual + * address range can be mapped to the multicast object. A call to ::cuMemMap + * will block until all devices have been added. + * + * \param[in] mcHandle Handle representing a multicast object. + * \param[in] dev Device that will be associated to the multicast + * object. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_PERMITTED, + * ::CUDA_ERROR_NOT_SUPPORTED + * + * \sa ::cuMulticastCreate, ::cuMulticastBindMem, ::cuMulticastBindAddr + */ +CUresult CUDAAPI cuMulticastAddDevice(CUmemGenericAllocationHandle mcHandle, CUdevice dev); + +/** + * \brief Bind a memory allocation represented by a handle to a multicast object. + * + * Binds a memory allocation specified by \p memHandle and created via + * ::cuMemCreate to a multicast object represented by \p mcHandle and created + * via ::cuMulticastCreate. The intended \p size of the bind, the offset in the + * multicast range \p mcOffset as well as the offset in the memory \p memOffset + * must be a multiple of the value returned by ::cuMulticastGetGranularity with + * the flag ::CU_MULTICAST_GRANULARITY_MINIMUM. For best performance however, + * \p size, \p mcOffset and \p memOffset should be aligned to the granularity of + * the memory allocation(see ::cuMemGetAllocationGranularity) or to the value + * returned by ::cuMulticastGetGranularity with the flag + * ::CU_MULTICAST_GRANULARITY_RECOMMENDED. + * + * The \p size + \p memOffset must be smaller than the size of the allocated + * memory. Similarly the \p size + \p mcOffset must be smaller than the size + * of the multicast object. + * The memory allocation must have beeen created on one of the devices + * that was added to the multicast team via ::cuMulticastAddDevice. + * Externally shareable as well as imported multicast objects can be bound only + * to externally shareable memory. + * Note that this call will return CUDA_ERROR_OUT_OF_MEMORY if there are + * insufficient resources required to perform the bind. This call may also + * return CUDA_ERROR_SYSTEM_NOT_READY if the necessary system software is not + * initialized or running. + * + * \param[in] mcHandle Handle representing a multicast object. + * \param[in] mcOffset Offset into the multicast object for attachment. + * \param[in] memHandle Handle representing a memory allocation. + * \param[in] memOffset Offset into the memory for attachment. + * \param[in] size Size of the memory that will be bound to the + * multicast object. + * \param[in] flags Flags for future use, must be zero for now. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_PERMITTED, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_SYSTEM_NOT_READY + * + * \sa ::cuMulticastCreate, ::cuMulticastAddDevice, ::cuMemCreate + */ +CUresult CUDAAPI cuMulticastBindMem(CUmemGenericAllocationHandle mcHandle, size_t mcOffset, CUmemGenericAllocationHandle memHandle, size_t memOffset, size_t size, unsigned long long flags); + +/** + * \brief Bind a memory allocation represented by a virtual address to a multicast object. + * + * Binds a memory allocation specified by its mapped address \p memptr to a + * multicast object represented by \p mcHandle. + * The memory must have been allocated via ::cuMemCreate or ::cudaMallocAsync. + * The intended \p size of the bind, the offset in the multicast range + * \p mcOffset and \p memptr must be a multiple of the value returned by + * ::cuMulticastGetGranularity with the flag ::CU_MULTICAST_GRANULARITY_MINIMUM. + * For best performance however, \p size, \p mcOffset and \p memptr should be + * aligned to the value returned by ::cuMulticastGetGranularity with the flag + * ::CU_MULTICAST_GRANULARITY_RECOMMENDED. + * + * The \p size must be smaller than the size of the allocated memory. + * Similarly the \p size + \p mcOffset must be smaller than the total size + * of the multicast object. + * The memory allocation must have beeen created on one of the devices + * that was added to the multicast team via ::cuMulticastAddDevice. + * Externally shareable as well as imported multicast objects can be bound only + * to externally shareable memory. + * Note that this call will return CUDA_ERROR_OUT_OF_MEMORY if there are + * insufficient resources required to perform the bind. This call may also + * return CUDA_ERROR_SYSTEM_NOT_READY if the necessary system software is not + * initialized or running. + * + * \param[in] mcHandle Handle representing a multicast object. + * \param[in] mcOffset Offset into multicast va range for attachment. + * \param[in] memptr Virtual address of the memory allocation. + * \param[in] size Size of memory that will be bound to the + * multicast object. + * \param[in] flags Flags for future use, must be zero now. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_PERMITTED, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_SYSTEM_NOT_READY + * + * \sa ::cuMulticastCreate, ::cuMulticastAddDevice, ::cuMemCreate + */ +CUresult CUDAAPI cuMulticastBindAddr(CUmemGenericAllocationHandle mcHandle, size_t mcOffset, CUdeviceptr memptr, size_t size, unsigned long long flags); + +/** + * \brief Unbind any memory allocations bound to a multicast object at a given offset and upto a given size. + * + * Unbinds any memory allocations hosted on \p dev and bound to a multicast + * object at \p mcOffset and upto a given \p size. + * The intended \p size of the unbind and the offset in the multicast range + * ( \p mcOffset ) must be a multiple of the value returned by + * ::cuMulticastGetGranularity flag ::CU_MULTICAST_GRANULARITY_MINIMUM. + * The \p size + \p mcOffset must be smaller than the total size of the + * multicast object. + * + * \note + * Warning: + * The \p mcOffset and the \p size must match the corresponding values specified + * during the bind call. Any other values may result in undefined behavior. + * + * \param[in] mcHandle Handle representing a multicast object. + * \param[in] dev Device that hosts the memory allocation. + * \param[in] mcOffset Offset into the multicast object. + * \param[in] size Desired size to unbind. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_PERMITTED, + * ::CUDA_ERROR_NOT_SUPPORTED + * + * \sa ::cuMulticastBindMem, ::cuMulticastBindAddr + */ +CUresult CUDAAPI cuMulticastUnbind(CUmemGenericAllocationHandle mcHandle, CUdevice dev, size_t mcOffset, size_t size); + +/** +* \brief Calculates either the minimal or recommended granularity for multicast object +* +* Calculates either the minimal or recommended granularity for a given set of +* multicast object properties and returns it in granularity. This granularity +* can be used as a multiple for size, bind offsets and address mappings of the +* multicast object. +* +* \param[out] granularity Returned granularity. +* \param[in] prop Properties of the multicast object. +* \param[in] option Determines which granularity to return. +* +* \returns +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* +* \sa ::cuMulticastCreate, ::cuMulticastBindMem, ::cuMulticastBindAddr, ::cuMulticastUnbind +*/ +CUresult CUDAAPI cuMulticastGetGranularity(size_t *granularity, const CUmulticastObjectProp *prop, CUmulticastGranularity_flags option); + +/** @} */ /* END CUDA_MULTICAST */ + +/** + * \defgroup CUDA_UNIFIED Unified Addressing + * + * ___MANBRIEF___ unified addressing functions of the low-level CUDA driver + * API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the unified addressing functions of the + * low-level CUDA driver application programming interface. + * + * @{ + * + * \section CUDA_UNIFIED_overview Overview + * + * CUDA devices can share a unified address space with the host. + * For these devices there is no distinction between a device + * pointer and a host pointer -- the same pointer value may be + * used to access memory from the host program and from a kernel + * running on the device (with exceptions enumerated below). + * + * \section CUDA_UNIFIED_support Supported Platforms + * + * Whether or not a device supports unified addressing may be + * queried by calling ::cuDeviceGetAttribute() with the device + * attribute ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING. + * + * Unified addressing is automatically enabled in 64-bit processes + * + * \section CUDA_UNIFIED_lookup Looking Up Information from Pointer Values + * + * It is possible to look up information about the memory which backs a + * pointer value. For instance, one may want to know if a pointer points + * to host or device memory. As another example, in the case of device + * memory, one may want to know on which CUDA device the memory + * resides. These properties may be queried using the function + * ::cuPointerGetAttribute() + * + * Since pointers are unique, it is not necessary to specify information + * about the pointers specified to the various copy functions in the + * CUDA API. The function ::cuMemcpy() may be used to perform a copy + * between two pointers, ignoring whether they point to host or device + * memory (making ::cuMemcpyHtoD(), ::cuMemcpyDtoD(), and ::cuMemcpyDtoH() + * unnecessary for devices supporting unified addressing). For + * multidimensional copies, the memory type ::CU_MEMORYTYPE_UNIFIED may be + * used to specify that the CUDA driver should infer the location of the + * pointer from its value. + * + * \section CUDA_UNIFIED_automaphost Automatic Mapping of Host Allocated Host Memory + * + * All host memory allocated in all contexts using ::cuMemAllocHost() and + * ::cuMemHostAlloc() is always directly accessible from all contexts on + * all devices that support unified addressing. This is the case regardless + * of whether or not the flags ::CU_MEMHOSTALLOC_PORTABLE and + * ::CU_MEMHOSTALLOC_DEVICEMAP are specified. + * + * The pointer value through which allocated host memory may be accessed + * in kernels on all devices that support unified addressing is the same + * as the pointer value through which that memory is accessed on the host, + * so it is not necessary to call ::cuMemHostGetDevicePointer() to get the device + * pointer for these allocations. + * + * Note that this is not the case for memory allocated using the flag + * ::CU_MEMHOSTALLOC_WRITECOMBINED, as discussed below. + * + * \section CUDA_UNIFIED_autopeerregister Automatic Registration of Peer Memory + * + * Upon enabling direct access from a context that supports unified addressing + * to another peer context that supports unified addressing using + * ::cuCtxEnablePeerAccess() all memory allocated in the peer context using + * ::cuMemAlloc() and ::cuMemAllocPitch() will immediately be accessible + * by the current context. The device pointer value through + * which any peer memory may be accessed in the current context + * is the same pointer value through which that memory may be + * accessed in the peer context. + * + * \section CUDA_UNIFIED_exceptions Exceptions, Disjoint Addressing + * + * Not all memory may be accessed on devices through the same pointer + * value through which they are accessed on the host. These exceptions + * are host memory registered using ::cuMemHostRegister() and host memory + * allocated using the flag ::CU_MEMHOSTALLOC_WRITECOMBINED. For these + * exceptions, there exists a distinct host and device address for the + * memory. The device address is guaranteed to not overlap any valid host + * pointer range and is guaranteed to have the same value across all + * contexts that support unified addressing. + * + * This device address may be queried using ::cuMemHostGetDevicePointer() + * when a context using unified addressing is current. Either the host + * or the unified device pointer value may be used to refer to this memory + * through ::cuMemcpy() and similar functions using the + * ::CU_MEMORYTYPE_UNIFIED memory type. + * + */ + +/** + * \brief Returns information about a pointer + * + * The supported attributes are: + * + * - ::CU_POINTER_ATTRIBUTE_CONTEXT: + * + * Returns in \p *data the ::CUcontext in which \p ptr was allocated or + * registered. + * The type of \p data must be ::CUcontext *. + * + * If \p ptr was not allocated by, mapped by, or registered with + * a ::CUcontext which uses unified virtual addressing then + * ::CUDA_ERROR_INVALID_VALUE is returned. + * + * - ::CU_POINTER_ATTRIBUTE_MEMORY_TYPE: + * + * Returns in \p *data the physical memory type of the memory that + * \p ptr addresses as a ::CUmemorytype enumerated value. + * The type of \p data must be unsigned int. + * + * If \p ptr addresses device memory then \p *data is set to + * ::CU_MEMORYTYPE_DEVICE. The particular ::CUdevice on which the + * memory resides is the ::CUdevice of the ::CUcontext returned by the + * ::CU_POINTER_ATTRIBUTE_CONTEXT attribute of \p ptr. + * + * If \p ptr addresses host memory then \p *data is set to + * ::CU_MEMORYTYPE_HOST. + * + * If \p ptr was not allocated by, mapped by, or registered with + * a ::CUcontext which uses unified virtual addressing then + * ::CUDA_ERROR_INVALID_VALUE is returned. + * + * If the current ::CUcontext does not support unified virtual + * addressing then ::CUDA_ERROR_INVALID_CONTEXT is returned. + * + * - ::CU_POINTER_ATTRIBUTE_DEVICE_POINTER: + * + * Returns in \p *data the device pointer value through which + * \p ptr may be accessed by kernels running in the current + * ::CUcontext. + * The type of \p data must be CUdeviceptr *. + * + * If there exists no device pointer value through which + * kernels running in the current ::CUcontext may access + * \p ptr then ::CUDA_ERROR_INVALID_VALUE is returned. + * + * If there is no current ::CUcontext then + * ::CUDA_ERROR_INVALID_CONTEXT is returned. + * + * Except in the exceptional disjoint addressing cases discussed + * below, the value returned in \p *data will equal the input + * value \p ptr. + * + * - ::CU_POINTER_ATTRIBUTE_HOST_POINTER: + * + * Returns in \p *data the host pointer value through which + * \p ptr may be accessed by by the host program. + * The type of \p data must be void **. + * If there exists no host pointer value through which + * the host program may directly access \p ptr then + * ::CUDA_ERROR_INVALID_VALUE is returned. + * + * Except in the exceptional disjoint addressing cases discussed + * below, the value returned in \p *data will equal the input + * value \p ptr. + * + * - ::CU_POINTER_ATTRIBUTE_P2P_TOKENS: + * + * Returns in \p *data two tokens for use with the nv-p2p.h Linux + * kernel interface. \p data must be a struct of type + * CUDA_POINTER_ATTRIBUTE_P2P_TOKENS. + * + * \p ptr must be a pointer to memory obtained from :cuMemAlloc(). + * Note that p2pToken and vaSpaceToken are only valid for the + * lifetime of the source allocation. A subsequent allocation at + * the same address may return completely different tokens. + * Querying this attribute has a side effect of setting the attribute + * ::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS for the region of memory that + * \p ptr points to. + * + * - ::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS: + * + * A boolean attribute which when set, ensures that synchronous memory operations + * initiated on the region of memory that \p ptr points to will always synchronize. + * See further documentation in the section titled "API synchronization behavior" + * to learn more about cases when synchronous memory operations can + * exhibit asynchronous behavior. + * + * - ::CU_POINTER_ATTRIBUTE_BUFFER_ID: + * + * Returns in \p *data a buffer ID which is guaranteed to be unique within the process. + * \p data must point to an unsigned long long. + * + * \p ptr must be a pointer to memory obtained from a CUDA memory allocation API. + * Every memory allocation from any of the CUDA memory allocation APIs will + * have a unique ID over a process lifetime. Subsequent allocations do not reuse IDs + * from previous freed allocations. IDs are only unique within a single process. + * + * + * - ::CU_POINTER_ATTRIBUTE_IS_MANAGED: + * + * Returns in \p *data a boolean that indicates whether the pointer points to + * managed memory or not. + * + * If \p ptr is not a valid CUDA pointer then ::CUDA_ERROR_INVALID_VALUE is returned. + * + * - ::CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL: + * + * Returns in \p *data an integer representing a device ordinal of a device against + * which the memory was allocated or registered. + * + * - ::CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE: + * + * Returns in \p *data a boolean that indicates if this pointer maps to + * an allocation that is suitable for ::cudaIpcGetMemHandle. + * + * - ::CU_POINTER_ATTRIBUTE_RANGE_START_ADDR: + * + * Returns in \p *data the starting address for the allocation referenced + * by the device pointer \p ptr. Note that this is not necessarily the + * address of the mapped region, but the address of the mappable address + * range \p ptr references (e.g. from ::cuMemAddressReserve). + * + * - ::CU_POINTER_ATTRIBUTE_RANGE_SIZE: + * + * Returns in \p *data the size for the allocation referenced by the device + * pointer \p ptr. Note that this is not necessarily the size of the mapped + * region, but the size of the mappable address range \p ptr references + * (e.g. from ::cuMemAddressReserve). To retrieve the size of the mapped + * region, see ::cuMemGetAddressRange + * + * - ::CU_POINTER_ATTRIBUTE_MAPPED: + * + * Returns in \p *data a boolean that indicates if this pointer is in a + * valid address range that is mapped to a backing allocation. + * + * - ::CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES: + * + * Returns a bitmask of the allowed handle types for an allocation that may + * be passed to ::cuMemExportToShareableHandle. + * + * - ::CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE: + * + * Returns in \p *data the handle to the mempool that the allocation was obtained from. + * + * \par + * + * Note that for most allocations in the unified virtual address space + * the host and device pointer for accessing the allocation will be the + * same. The exceptions to this are + * - user memory registered using ::cuMemHostRegister + * - host memory allocated using ::cuMemHostAlloc with the + * ::CU_MEMHOSTALLOC_WRITECOMBINED flag + * For these types of allocation there will exist separate, disjoint host + * and device addresses for accessing the allocation. In particular + * - The host address will correspond to an invalid unmapped device address + * (which will result in an exception if accessed from the device) + * - The device address will correspond to an invalid unmapped host address + * (which will result in an exception if accessed from the host). + * For these types of allocations, querying ::CU_POINTER_ATTRIBUTE_HOST_POINTER + * and ::CU_POINTER_ATTRIBUTE_DEVICE_POINTER may be used to retrieve the host + * and device addresses from either address. + * + * \param data - Returned pointer attribute value + * \param attribute - Pointer attribute to query + * \param ptr - Pointer + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuPointerSetAttribute, + * ::cuMemAlloc, + * ::cuMemFree, + * ::cuMemAllocHost, + * ::cuMemFreeHost, + * ::cuMemHostAlloc, + * ::cuMemHostRegister, + * ::cuMemHostUnregister, + * ::cudaPointerGetAttributes + */ +CUresult CUDAAPI cuPointerGetAttribute(void *data, CUpointer_attribute attribute, CUdeviceptr ptr); + +/** + * \brief Prefetches memory to the specified destination device + * + * Prefetches memory to the specified destination device. \p devPtr is the + * base device pointer of the memory to be prefetched and \p dstDevice is the + * destination device. \p count specifies the number of bytes to copy. \p hStream + * is the stream in which the operation is enqueued. The memory range must refer + * to managed memory allocated via ::cuMemAllocManaged or declared via __managed__ variables. + * + * Passing in CU_DEVICE_CPU for \p dstDevice will prefetch the data to host memory. If + * \p dstDevice is a GPU, then the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS + * must be non-zero. Additionally, \p hStream must be associated with a device that has a + * non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. + * + * The start address and end address of the memory range will be rounded down and rounded up + * respectively to be aligned to CPU page size before the prefetch operation is enqueued + * in the stream. + * + * If no physical memory has been allocated for this region, then this memory region + * will be populated and mapped on the destination device. If there's insufficient + * memory to prefetch the desired region, the Unified Memory driver may evict pages from other + * ::cuMemAllocManaged allocations to host memory in order to make room. Device memory + * allocated using ::cuMemAlloc or ::cuArrayCreate will not be evicted. + * + * By default, any mappings to the previous location of the migrated pages are removed and + * mappings for the new location are only setup on \p dstDevice. The exact behavior however + * also depends on the settings applied to this memory range via ::cuMemAdvise as described + * below: + * + * If ::CU_MEM_ADVISE_SET_READ_MOSTLY was set on any subset of this memory range, + * then that subset will create a read-only copy of the pages on \p dstDevice. + * + * If ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION was called on any subset of this memory + * range, then the pages will be migrated to \p dstDevice even if \p dstDevice is not the + * preferred location of any pages in the memory range. + * + * If ::CU_MEM_ADVISE_SET_ACCESSED_BY was called on any subset of this memory range, + * then mappings to those pages from all the appropriate processors are updated to + * refer to the new location if establishing such a mapping is possible. Otherwise, + * those mappings are cleared. + * + * Note that this API is not required for functionality and only serves to improve performance + * by allowing the application to migrate data to a suitable location before it is accessed. + * Memory accesses to this range are always coherent and are allowed even when the data is + * actively being migrated. + * + * Note that this function is asynchronous with respect to the host and all work + * on other devices. + * + * \param devPtr - Pointer to be prefetched + * \param count - Size in bytes + * \param dstDevice - Destination device to prefetch to + * \param hStream - Stream to enqueue prefetch operation + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * \note_async + * \note_null_stream + * + * \sa ::cuMemcpy, ::cuMemcpyPeer, ::cuMemcpyAsync, + * ::cuMemcpy3DPeerAsync, ::cuMemAdvise, + * ::cudaMemPrefetchAsync + */ +CUresult CUDAAPI cuMemPrefetchAsync(CUdeviceptr devPtr, size_t count, CUdevice dstDevice, CUstream hStream); + +/** + * \brief Advise about the usage of a given memory range + * + * Advise the Unified Memory subsystem about the usage pattern for the memory range + * starting at \p devPtr with a size of \p count bytes. The start address and end address of the memory + * range will be rounded down and rounded up respectively to be aligned to CPU page size before the + * advice is applied. The memory range must refer to managed memory allocated via ::cuMemAllocManaged + * or declared via __managed__ variables. The memory range could also refer to system-allocated pageable + * memory provided it represents a valid, host-accessible region of memory and all additional constraints + * imposed by \p advice as outlined below are also satisfied. Specifying an invalid system-allocated pageable + * memory range results in an error being returned. + * + * The \p advice parameter can take the following values: + * - ::CU_MEM_ADVISE_SET_READ_MOSTLY: This implies that the data is mostly going to be read + * from and only occasionally written to. Any read accesses from any processor to this region will create a + * read-only copy of at least the accessed pages in that processor's memory. Additionally, if ::cuMemPrefetchAsync + * is called on this region, it will create a read-only copy of the data on the destination processor. + * If any processor writes to this region, all copies of the corresponding page will be invalidated + * except for the one where the write occurred. The \p device argument is ignored for this advice. + * Note that for a page to be read-duplicated, the accessing processor must either be the CPU or a GPU + * that has a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. + * Also, if a context is created on a device that does not have the device attribute + * ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS set, then read-duplication will not occur until + * all such contexts are destroyed. + * If the memory region refers to valid system-allocated pageable memory, then the accessing device must + * have a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS for a read-only + * copy to be created on that device. Note however that if the accessing device also has a non-zero value for the + * device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, then setting this advice + * will not create a read-only copy when that device accesses this memory region. + * + * - ::CU_MEM_ADVISE_UNSET_READ_MOSTLY: Undoes the effect of ::CU_MEM_ADVISE_SET_READ_MOSTLY and also prevents the + * Unified Memory driver from attempting heuristic read-duplication on the memory range. Any read-duplicated + * copies of the data will be collapsed into a single copy. The location for the collapsed + * copy will be the preferred location if the page has a preferred location and one of the read-duplicated + * copies was resident at that location. Otherwise, the location chosen is arbitrary. + * + * - ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION: This advice sets the preferred location for the + * data to be the memory belonging to \p device. Passing in CU_DEVICE_CPU for \p device sets the + * preferred location as host memory. If \p device is a GPU, then it must have a non-zero value for the + * device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. Setting the preferred location + * does not cause data to migrate to that location immediately. Instead, it guides the migration policy + * when a fault occurs on that memory region. If the data is already in its preferred location and the + * faulting processor can establish a mapping without requiring the data to be migrated, then + * data migration will be avoided. On the other hand, if the data is not in its preferred location + * or if a direct mapping cannot be established, then it will be migrated to the processor accessing + * it. It is important to note that setting the preferred location does not prevent data prefetching + * done using ::cuMemPrefetchAsync. + * Having a preferred location can override the page thrash detection and resolution logic in the Unified + * Memory driver. Normally, if a page is detected to be constantly thrashing between for example host and device + * memory, the page may eventually be pinned to host memory by the Unified Memory driver. But + * if the preferred location is set as device memory, then the page will continue to thrash indefinitely. + * If ::CU_MEM_ADVISE_SET_READ_MOSTLY is also set on this memory region or any subset of it, then the + * policies associated with that advice will override the policies of this advice, unless read accesses from + * \p device will not result in a read-only copy being created on that device as outlined in description for + * the advice ::CU_MEM_ADVISE_SET_READ_MOSTLY. + * If the memory region refers to valid system-allocated pageable memory, then \p device must have a non-zero + * value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. Additionally, if \p device has + * a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, + * then this call has no effect. Note however that this behavior may change in the future. + * + * - ::CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION: Undoes the effect of ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION + * and changes the preferred location to none. + * + * - ::CU_MEM_ADVISE_SET_ACCESSED_BY: This advice implies that the data will be accessed by \p device. + * Passing in ::CU_DEVICE_CPU for \p device will set the advice for the CPU. If \p device is a GPU, then + * the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS must be non-zero. + * This advice does not cause data migration and has no impact on the location of the data per se. Instead, + * it causes the data to always be mapped in the specified processor's page tables, as long as the + * location of the data permits a mapping to be established. If the data gets migrated for any reason, + * the mappings are updated accordingly. + * This advice is recommended in scenarios where data locality is not important, but avoiding faults is. + * Consider for example a system containing multiple GPUs with peer-to-peer access enabled, where the + * data located on one GPU is occasionally accessed by peer GPUs. In such scenarios, migrating data + * over to the other GPUs is not as important because the accesses are infrequent and the overhead of + * migration may be too high. But preventing faults can still help improve performance, and so having + * a mapping set up in advance is useful. Note that on CPU access of this data, the data may be migrated + * to host memory because the CPU typically cannot access device memory directly. Any GPU that had the + * ::CU_MEM_ADVISE_SET_ACCESSED_BY flag set for this data will now have its mapping updated to point to the + * page in host memory. + * If ::CU_MEM_ADVISE_SET_READ_MOSTLY is also set on this memory region or any subset of it, then the + * policies associated with that advice will override the policies of this advice. Additionally, if the + * preferred location of this memory region or any subset of it is also \p device, then the policies + * associated with ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION will override the policies of this advice. + * If the memory region refers to valid system-allocated pageable memory, then \p device must have a non-zero + * value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. Additionally, if \p device has + * a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, + * then this call has no effect. + * + * - ::CU_MEM_ADVISE_UNSET_ACCESSED_BY: Undoes the effect of ::CU_MEM_ADVISE_SET_ACCESSED_BY. Any mappings to + * the data from \p device may be removed at any time causing accesses to result in non-fatal page faults. + * If the memory region refers to valid system-allocated pageable memory, then \p device must have a non-zero + * value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. Additionally, if \p device has + * a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, + * then this call has no effect. + * + * \param devPtr - Pointer to memory to set the advice for + * \param count - Size in bytes of the memory range + * \param advice - Advice to be applied for the specified memory range + * \param device - Device to apply the advice for + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * \note_async + * \note_null_stream + * + * \sa ::cuMemcpy, ::cuMemcpyPeer, ::cuMemcpyAsync, + * ::cuMemcpy3DPeerAsync, ::cuMemPrefetchAsync, + * ::cudaMemAdvise + */ +CUresult CUDAAPI cuMemAdvise(CUdeviceptr devPtr, size_t count, CUmem_advise advice, CUdevice device); + +/** + * \brief Query an attribute of a given memory range + * + * Query an attribute about the memory range starting at \p devPtr with a size of \p count bytes. The + * memory range must refer to managed memory allocated via ::cuMemAllocManaged or declared via + * __managed__ variables. + * + * The \p attribute parameter can take the following values: + * - ::CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY: If this attribute is specified, \p data will be interpreted + * as a 32-bit integer, and \p dataSize must be 4. The result returned will be 1 if all pages in the given + * memory range have read-duplication enabled, or 0 otherwise. + * - ::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION: If this attribute is specified, \p data will be + * interpreted as a 32-bit integer, and \p dataSize must be 4. The result returned will be a GPU device + * id if all pages in the memory range have that GPU as their preferred location, or it will be CU_DEVICE_CPU + * if all pages in the memory range have the CPU as their preferred location, or it will be CU_DEVICE_INVALID + * if either all the pages don't have the same preferred location or some of the pages don't have a + * preferred location at all. Note that the actual location of the pages in the memory range at the time of + * the query may be different from the preferred location. + * - ::CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY: If this attribute is specified, \p data will be interpreted + * as an array of 32-bit integers, and \p dataSize must be a non-zero multiple of 4. The result returned + * will be a list of device ids that had ::CU_MEM_ADVISE_SET_ACCESSED_BY set for that entire memory range. + * If any device does not have that advice set for the entire memory range, that device will not be included. + * If \p data is larger than the number of devices that have that advice set for that memory range, + * CU_DEVICE_INVALID will be returned in all the extra space provided. For ex., if \p dataSize is 12 + * (i.e. \p data has 3 elements) and only device 0 has the advice set, then the result returned will be + * { 0, CU_DEVICE_INVALID, CU_DEVICE_INVALID }. If \p data is smaller than the number of devices that have + * that advice set, then only as many devices will be returned as can fit in the array. There is no + * guarantee on which specific devices will be returned, however. + * - ::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION: If this attribute is specified, \p data will be + * interpreted as a 32-bit integer, and \p dataSize must be 4. The result returned will be the last location + * to which all pages in the memory range were prefetched explicitly via ::cuMemPrefetchAsync. This will either be + * a GPU id or CU_DEVICE_CPU depending on whether the last location for prefetch was a GPU or the CPU + * respectively. If any page in the memory range was never explicitly prefetched or if all pages were not + * prefetched to the same location, CU_DEVICE_INVALID will be returned. Note that this simply returns the + * last location that the application requested to prefetch the memory range to. It gives no indication as to + * whether the prefetch operation to that location has completed or even begun. + * + * \param data - A pointers to a memory location where the result + * of each attribute query will be written to. + * \param dataSize - Array containing the size of data + * \param attribute - The attribute to query + * \param devPtr - Start of the range to query + * \param count - Size of the range to query + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * \note_async + * \note_null_stream + * + * \sa ::cuMemRangeGetAttributes, ::cuMemPrefetchAsync, + * ::cuMemAdvise, + * ::cudaMemRangeGetAttribute + */ +CUresult CUDAAPI cuMemRangeGetAttribute(void *data, size_t dataSize, CUmem_range_attribute attribute, CUdeviceptr devPtr, size_t count); + +/** + * \brief Query attributes of a given memory range. + * + * Query attributes of the memory range starting at \p devPtr with a size of \p count bytes. The + * memory range must refer to managed memory allocated via ::cuMemAllocManaged or declared via + * __managed__ variables. The \p attributes array will be interpreted to have \p numAttributes + * entries. The \p dataSizes array will also be interpreted to have \p numAttributes entries. + * The results of the query will be stored in \p data. + * + * The list of supported attributes are given below. Please refer to ::cuMemRangeGetAttribute for + * attribute descriptions and restrictions. + * + * - ::CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY + * - ::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION + * - ::CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY + * - ::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION + * + * \param data - A two-dimensional array containing pointers to memory + * locations where the result of each attribute query will be written to. + * \param dataSizes - Array containing the sizes of each result + * \param attributes - An array of attributes to query + * (numAttributes and the number of attributes in this array should match) + * \param numAttributes - Number of attributes to query + * \param devPtr - Start of the range to query + * \param count - Size of the range to query + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa ::cuMemRangeGetAttribute, ::cuMemAdvise, + * ::cuMemPrefetchAsync, + * ::cudaMemRangeGetAttributes + */ +CUresult CUDAAPI cuMemRangeGetAttributes(void **data, size_t *dataSizes, CUmem_range_attribute *attributes, size_t numAttributes, CUdeviceptr devPtr, size_t count); + +/** + * \brief Set attributes on a previously allocated memory region + * + * The supported attributes are: + * + * - ::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS: + * + * A boolean attribute that can either be set (1) or unset (0). When set, + * the region of memory that \p ptr points to is guaranteed to always synchronize + * memory operations that are synchronous. If there are some previously initiated + * synchronous memory operations that are pending when this attribute is set, the + * function does not return until those memory operations are complete. + * See further documentation in the section titled "API synchronization behavior" + * to learn more about cases when synchronous memory operations can + * exhibit asynchronous behavior. + * \p value will be considered as a pointer to an unsigned integer to which this attribute is to be set. + * + * \param value - Pointer to memory containing the value to be set + * \param attribute - Pointer attribute to set + * \param ptr - Pointer to a memory region allocated using CUDA memory allocation APIs + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa ::cuPointerGetAttribute, + * ::cuPointerGetAttributes, + * ::cuMemAlloc, + * ::cuMemFree, + * ::cuMemAllocHost, + * ::cuMemFreeHost, + * ::cuMemHostAlloc, + * ::cuMemHostRegister, + * ::cuMemHostUnregister + */ +CUresult CUDAAPI cuPointerSetAttribute(const void *value, CUpointer_attribute attribute, CUdeviceptr ptr); + +/** + * \brief Returns information about a pointer. + * + * The supported attributes are (refer to ::cuPointerGetAttribute for attribute descriptions and restrictions): + * + * - ::CU_POINTER_ATTRIBUTE_CONTEXT + * - ::CU_POINTER_ATTRIBUTE_MEMORY_TYPE + * - ::CU_POINTER_ATTRIBUTE_DEVICE_POINTER + * - ::CU_POINTER_ATTRIBUTE_HOST_POINTER + * - ::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS + * - ::CU_POINTER_ATTRIBUTE_BUFFER_ID + * - ::CU_POINTER_ATTRIBUTE_IS_MANAGED + * - ::CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL + * - ::CU_POINTER_ATTRIBUTE_RANGE_START_ADDR + * - ::CU_POINTER_ATTRIBUTE_RANGE_SIZE + * - ::CU_POINTER_ATTRIBUTE_MAPPED + * - ::CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE + * - ::CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES + * - ::CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE + * + * \param numAttributes - Number of attributes to query + * \param attributes - An array of attributes to query + * (numAttributes and the number of attributes in this array should match) + * \param data - A two-dimensional array containing pointers to memory + * locations where the result of each attribute query will be written to. + * \param ptr - Pointer to query + * + * Unlike ::cuPointerGetAttribute, this function will not return an error when the \p ptr + * encountered is not a valid CUDA pointer. Instead, the attributes are assigned default NULL values + * and CUDA_SUCCESS is returned. + * + * If \p ptr was not allocated by, mapped by, or registered with a ::CUcontext which uses UVA + * (Unified Virtual Addressing), ::CUDA_ERROR_INVALID_CONTEXT is returned. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuPointerGetAttribute, + * ::cuPointerSetAttribute, + * ::cudaPointerGetAttributes + */ +CUresult CUDAAPI cuPointerGetAttributes(unsigned int numAttributes, CUpointer_attribute *attributes, void **data, CUdeviceptr ptr); + +/** @} */ /* END CUDA_UNIFIED */ + +/** + * \defgroup CUDA_STREAM Stream Management + * + * ___MANBRIEF___ stream management functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the stream management functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** + * \brief Create a stream + * + * Creates a stream and returns a handle in \p phStream. The \p Flags argument + * determines behaviors of the stream. + * + * Valid values for \p Flags are: + * - ::CU_STREAM_DEFAULT: Default stream creation flag. + * - ::CU_STREAM_NON_BLOCKING: Specifies that work running in the created + * stream may run concurrently with work in stream 0 (the NULL stream), and that + * the created stream should perform no implicit synchronization with stream 0. + * + * \param phStream - Returned newly created stream + * \param Flags - Parameters for stream creation + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \notefnerr + * + * \sa ::cuStreamDestroy, + * ::cuStreamCreateWithPriority, + * ::cuStreamGetPriority, + * ::cuStreamGetFlags, + * ::cuStreamWaitEvent, + * ::cuStreamQuery, + * ::cuStreamSynchronize, + * ::cuStreamAddCallback, + * ::cudaStreamCreate, + * ::cudaStreamCreateWithFlags + */ +CUresult CUDAAPI cuStreamCreate(CUstream *phStream, unsigned int Flags); + +/** + * \brief Create a stream with the given priority + * + * Creates a stream with the specified priority and returns a handle in \p phStream. + * This API alters the scheduler priority of work in the stream. Work in a higher + * priority stream may preempt work already executing in a low priority stream. + * + * \p priority follows a convention where lower numbers represent higher priorities. + * '0' represents default priority. The range of meaningful numerical priorities can + * be queried using ::cuCtxGetStreamPriorityRange. If the specified priority is + * outside the numerical range returned by ::cuCtxGetStreamPriorityRange, + * it will automatically be clamped to the lowest or the highest number in the range. + * + * \param phStream - Returned newly created stream + * \param flags - Flags for stream creation. See ::cuStreamCreate for a list of + * valid flags + * \param priority - Stream priority. Lower numbers represent higher priorities. + * See ::cuCtxGetStreamPriorityRange for more information about + * meaningful stream priorities that can be passed. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \notefnerr + * + * \note Stream priorities are supported only on GPUs + * with compute capability 3.5 or higher. + * + * \note In the current implementation, only compute kernels launched in + * priority streams are affected by the stream's priority. Stream priorities have + * no effect on host-to-device and device-to-host memory operations. + * + * \sa ::cuStreamDestroy, + * ::cuStreamCreate, + * ::cuStreamGetPriority, + * ::cuCtxGetStreamPriorityRange, + * ::cuStreamGetFlags, + * ::cuStreamWaitEvent, + * ::cuStreamQuery, + * ::cuStreamSynchronize, + * ::cuStreamAddCallback, + * ::cudaStreamCreateWithPriority + */ +CUresult CUDAAPI cuStreamCreateWithPriority(CUstream *phStream, unsigned int flags, int priority); + + +/** + * \brief Query the priority of a given stream + * + * Query the priority of a stream created using ::cuStreamCreate or ::cuStreamCreateWithPriority + * and return the priority in \p priority. Note that if the stream was created with a + * priority outside the numerical range returned by ::cuCtxGetStreamPriorityRange, + * this function returns the clamped priority. + * See ::cuStreamCreateWithPriority for details about priority clamping. + * + * \param hStream - Handle to the stream to be queried + * \param priority - Pointer to a signed integer in which the stream's priority is returned + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \notefnerr + * + * \sa ::cuStreamDestroy, + * ::cuStreamCreate, + * ::cuStreamCreateWithPriority, + * ::cuCtxGetStreamPriorityRange, + * ::cuStreamGetFlags, + * ::cudaStreamGetPriority + */ +CUresult CUDAAPI cuStreamGetPriority(CUstream hStream, int *priority); + +/** + * \brief Query the flags of a given stream + * + * Query the flags of a stream created using ::cuStreamCreate or ::cuStreamCreateWithPriority + * and return the flags in \p flags. + * + * \param hStream - Handle to the stream to be queried + * \param flags - Pointer to an unsigned integer in which the stream's flags are returned + * The value returned in \p flags is a logical 'OR' of all flags that + * were used while creating this stream. See ::cuStreamCreate for the list + * of valid flags + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \notefnerr + * + * \sa ::cuStreamDestroy, + * ::cuStreamCreate, + * ::cuStreamGetPriority, + * ::cudaStreamGetFlags + */ +CUresult CUDAAPI cuStreamGetFlags(CUstream hStream, unsigned int *flags); + +/** + * \brief Returns the unique Id associated with the stream handle supplied + * + * Returns in \p streamId the unique Id which is associated with the given stream handle. + * The Id is unique for the life of the program. + * + * The stream handle \p hStream can refer to any of the following: + *
    + *
  • a stream created via any of the CUDA driver APIs such as ::cuStreamCreate + * and ::cuStreamCreateWithPriority, or their runtime API equivalents such as + * ::cudaStreamCreate, ::cudaStreamCreateWithFlags and ::cudaStreamCreateWithPriority. + * Passing an invalid handle will result in undefined behavior.
  • + *
  • any of the special streams such as the NULL stream, ::CU_STREAM_LEGACY and + * ::CU_STREAM_PER_THREAD. The runtime API equivalents of these are also accepted, + * which are NULL, ::cudaStreamLegacy and ::cudaStreamPerThread respectively.
  • + *
+ * + * \param hStream - Handle to the stream to be queried + * \param streamId - Pointer to store the Id of the stream + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa ::cuStreamDestroy, + * ::cuStreamCreate, + * ::cuStreamGetPriority, + * ::cudaStreamGetId + */ +CUresult CUDAAPI cuStreamGetId(CUstream hStream, unsigned long long *streamId); + +/** + * \brief Query the context associated with a stream + * + * Returns the CUDA context that the stream is associated with. + * + * The stream handle \p hStream can refer to any of the following: + *
    + *
  • a stream created via any of the CUDA driver APIs such as ::cuStreamCreate + * and ::cuStreamCreateWithPriority, or their runtime API equivalents such as + * ::cudaStreamCreate, ::cudaStreamCreateWithFlags and ::cudaStreamCreateWithPriority. + * The returned context is the context that was active in the calling thread when the + * stream was created. Passing an invalid handle will result in undefined behavior.
  • + *
  • any of the special streams such as the NULL stream, ::CU_STREAM_LEGACY and + * ::CU_STREAM_PER_THREAD. The runtime API equivalents of these are also accepted, + * which are NULL, ::cudaStreamLegacy and ::cudaStreamPerThread respectively. + * Specifying any of the special handles will return the context current to the + * calling thread. If no context is current to the calling thread, + * ::CUDA_ERROR_INVALID_CONTEXT is returned.
  • + *
+ * + * \param hStream - Handle to the stream to be queried + * \param pctx - Returned context associated with the stream + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * \notefnerr + * + * \sa ::cuStreamDestroy, + * ::cuStreamCreateWithPriority, + * ::cuStreamGetPriority, + * ::cuStreamGetFlags, + * ::cuStreamWaitEvent, + * ::cuStreamQuery, + * ::cuStreamSynchronize, + * ::cuStreamAddCallback, + * ::cudaStreamCreate, + * ::cudaStreamCreateWithFlags + */ +CUresult CUDAAPI cuStreamGetCtx(CUstream hStream, CUcontext *pctx); + +/** + * \brief Make a compute stream wait on an event + * + * Makes all future work submitted to \p hStream wait for all work captured in + * \p hEvent. See ::cuEventRecord() for details on what is captured by an event. + * The synchronization will be performed efficiently on the device when applicable. + * \p hEvent may be from a different context or device than \p hStream. + * + * flags include: + * - ::CU_EVENT_WAIT_DEFAULT: Default event creation flag. + * - ::CU_EVENT_WAIT_EXTERNAL: Event is captured in the graph as an external + * event node when performing stream capture. This flag is invalid outside + * of stream capture. + * + * \param hStream - Stream to wait + * \param hEvent - Event to wait on (may not be NULL) + * \param Flags - See ::CUevent_capture_flags + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * \note_null_stream + * \notefnerr + * + * \sa ::cuStreamCreate, + * ::cuEventRecord, + * ::cuStreamQuery, + * ::cuStreamSynchronize, + * ::cuStreamAddCallback, + * ::cuStreamDestroy, + * ::cudaStreamWaitEvent + */ +CUresult CUDAAPI cuStreamWaitEvent(CUstream hStream, CUevent hEvent, unsigned int Flags); + +/** + * \brief Add a callback to a compute stream + * + * \note This function is slated for eventual deprecation and removal. If + * you do not require the callback to execute in case of a device error, + * consider using ::cuLaunchHostFunc. Additionally, this function is not + * supported with ::cuStreamBeginCapture and ::cuStreamEndCapture, unlike + * ::cuLaunchHostFunc. + * + * Adds a callback to be called on the host after all currently enqueued + * items in the stream have completed. For each + * cuStreamAddCallback call, the callback will be executed exactly once. + * The callback will block later work in the stream until it is finished. + * + * The callback may be passed ::CUDA_SUCCESS or an error code. In the event + * of a device error, all subsequently executed callbacks will receive an + * appropriate ::CUresult. + * + * Callbacks must not make any CUDA API calls. Attempting to use a CUDA API + * will result in ::CUDA_ERROR_NOT_PERMITTED. Callbacks must not perform any + * synchronization that may depend on outstanding device work or other callbacks + * that are not mandated to run earlier. Callbacks without a mandated order + * (in independent streams) execute in undefined order and may be serialized. + * + * For the purposes of Unified Memory, callback execution makes a number of + * guarantees: + *
    + *
  • The callback stream is considered idle for the duration of the + * callback. Thus, for example, a callback may always use memory attached + * to the callback stream.
  • + *
  • The start of execution of a callback has the same effect as + * synchronizing an event recorded in the same stream immediately prior to + * the callback. It thus synchronizes streams which have been "joined" + * prior to the callback.
  • + *
  • Adding device work to any stream does not have the effect of making + * the stream active until all preceding host functions and stream callbacks + * have executed. Thus, for + * example, a callback might use global attached memory even if work has + * been added to another stream, if the work has been ordered behind the + * callback with an event.
  • + *
  • Completion of a callback does not cause a stream to become + * active except as described above. The callback stream will remain idle + * if no device work follows the callback, and will remain idle across + * consecutive callbacks without device work in between. Thus, for example, + * stream synchronization can be done by signaling from a callback at the + * end of the stream.
  • + *
+ * + * \param hStream - Stream to add callback to + * \param callback - The function to call once preceding stream operations are complete + * \param userData - User specified data to be passed to the callback function + * \param flags - Reserved for future use, must be 0 + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_SUPPORTED + * \note_null_stream + * \notefnerr + * + * \sa ::cuStreamCreate, + * ::cuStreamQuery, + * ::cuStreamSynchronize, + * ::cuStreamWaitEvent, + * ::cuStreamDestroy, + * ::cuMemAllocManaged, + * ::cuStreamAttachMemAsync, + * ::cuLaunchHostFunc, + * ::cudaStreamAddCallback + */ +CUresult CUDAAPI cuStreamAddCallback(CUstream hStream, CUstreamCallback callback, void *userData, unsigned int flags); + +/** + * \brief Begins graph capture on a stream + * + * Begin graph capture on \p hStream. When a stream is in capture mode, all operations + * pushed into the stream will not be executed, but will instead be captured into + * a graph, which will be returned via ::cuStreamEndCapture. Capture may not be initiated + * if \p stream is CU_STREAM_LEGACY. Capture must be ended on the same stream in which + * it was initiated, and it may only be initiated if the stream is not already in capture + * mode. The capture mode may be queried via ::cuStreamIsCapturing. A unique id + * representing the capture sequence may be queried via ::cuStreamGetCaptureInfo. + * + * If \p mode is not ::CU_STREAM_CAPTURE_MODE_RELAXED, ::cuStreamEndCapture must be + * called on this stream from the same thread. + * + * \param hStream - Stream in which to initiate capture + * \param mode - Controls the interaction of this capture sequence with other API + * calls that are potentially unsafe. For more details see + * ::cuThreadExchangeStreamCaptureMode. + * + * \note Kernels captured using this API must not use texture and surface references. + * Reading or writing through any texture or surface reference is undefined + * behavior. This restriction does not apply to texture and surface objects. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa + * ::cuStreamCreate, + * ::cuStreamIsCapturing, + * ::cuStreamEndCapture, + * ::cuThreadExchangeStreamCaptureMode + */ +CUresult CUDAAPI cuStreamBeginCapture(CUstream hStream, CUstreamCaptureMode mode); + +/** + * \brief Swaps the stream capture interaction mode for a thread + * + * Sets the calling thread's stream capture interaction mode to the value contained + * in \p *mode, and overwrites \p *mode with the previous mode for the thread. To + * facilitate deterministic behavior across function or module boundaries, callers + * are encouraged to use this API in a push-pop fashion: \code + CUstreamCaptureMode mode = desiredMode; + cuThreadExchangeStreamCaptureMode(&mode); + ... + cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode + * \endcode + * + * During stream capture (see ::cuStreamBeginCapture), some actions, such as a call + * to ::cudaMalloc, may be unsafe. In the case of ::cudaMalloc, the operation is + * not enqueued asynchronously to a stream, and is not observed by stream capture. + * Therefore, if the sequence of operations captured via ::cuStreamBeginCapture + * depended on the allocation being replayed whenever the graph is launched, the + * captured graph would be invalid. + * + * Therefore, stream capture places restrictions on API calls that can be made within + * or concurrently to a ::cuStreamBeginCapture-::cuStreamEndCapture sequence. This + * behavior can be controlled via this API and flags to ::cuStreamBeginCapture. + * + * A thread's mode is one of the following: + * - \p CU_STREAM_CAPTURE_MODE_GLOBAL: This is the default mode. If the local thread has + * an ongoing capture sequence that was not initiated with + * \p CU_STREAM_CAPTURE_MODE_RELAXED at \p cuStreamBeginCapture, or if any other thread + * has a concurrent capture sequence initiated with \p CU_STREAM_CAPTURE_MODE_GLOBAL, + * this thread is prohibited from potentially unsafe API calls. + * - \p CU_STREAM_CAPTURE_MODE_THREAD_LOCAL: If the local thread has an ongoing capture + * sequence not initiated with \p CU_STREAM_CAPTURE_MODE_RELAXED, it is prohibited + * from potentially unsafe API calls. Concurrent capture sequences in other threads + * are ignored. + * - \p CU_STREAM_CAPTURE_MODE_RELAXED: The local thread is not prohibited from potentially + * unsafe API calls. Note that the thread is still prohibited from API calls which + * necessarily conflict with stream capture, for example, attempting ::cuEventQuery + * on an event that was last recorded inside a capture sequence. + * + * \param mode - Pointer to mode value to swap with the current mode + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa + * ::cuStreamBeginCapture + */ +CUresult CUDAAPI cuThreadExchangeStreamCaptureMode(CUstreamCaptureMode *mode); + +/** + * \brief Ends capture on a stream, returning the captured graph + * + * End capture on \p hStream, returning the captured graph via \p phGraph. + * Capture must have been initiated on \p hStream via a call to ::cuStreamBeginCapture. + * If capture was invalidated, due to a violation of the rules of stream capture, then + * a NULL graph will be returned. + * + * If the \p mode argument to ::cuStreamBeginCapture was not + * ::CU_STREAM_CAPTURE_MODE_RELAXED, this call must be from the same thread as + * ::cuStreamBeginCapture. + * + * \param hStream - Stream to query + * \param phGraph - The captured graph + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD + * \notefnerr + * + * \sa + * ::cuStreamCreate, + * ::cuStreamBeginCapture, + * ::cuStreamIsCapturing + */ +CUresult CUDAAPI cuStreamEndCapture(CUstream hStream, CUgraph *phGraph); + +/** + * \brief Returns a stream's capture status + * + * Return the capture status of \p hStream via \p captureStatus. After a successful + * call, \p *captureStatus will contain one of the following: + * - ::CU_STREAM_CAPTURE_STATUS_NONE: The stream is not capturing. + * - ::CU_STREAM_CAPTURE_STATUS_ACTIVE: The stream is capturing. + * - ::CU_STREAM_CAPTURE_STATUS_INVALIDATED: The stream was capturing but an error + * has invalidated the capture sequence. The capture sequence must be terminated + * with ::cuStreamEndCapture on the stream where it was initiated in order to + * continue using \p hStream. + * + * Note that, if this is called on ::CU_STREAM_LEGACY (the "null stream") while + * a blocking stream in the same context is capturing, it will return + * ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT and \p *captureStatus is unspecified + * after the call. The blocking stream capture is not invalidated. + * + * When a blocking stream is capturing, the legacy stream is in an + * unusable state until the blocking stream capture is terminated. The legacy + * stream is not supported for stream capture, but attempted use would have an + * implicit dependency on the capturing stream(s). + * + * \param hStream - Stream to query + * \param captureStatus - Returns the stream's capture status + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT + * \notefnerr + * + * \sa + * ::cuStreamCreate, + * ::cuStreamBeginCapture, + * ::cuStreamEndCapture + */ +CUresult CUDAAPI cuStreamIsCapturing(CUstream hStream, CUstreamCaptureStatus *captureStatus); + +/** + * \brief Query a stream's capture state + * + * Query stream state related to stream capture. + * + * If called on ::CU_STREAM_LEGACY (the "null stream") while a stream not created + * with ::CU_STREAM_NON_BLOCKING is capturing, returns ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT. + * + * Valid data (other than capture status) is returned only if both of the following are true: + * - the call returns CUDA_SUCCESS + * - the returned capture status is ::CU_STREAM_CAPTURE_STATUS_ACTIVE + * + * \param hStream - The stream to query + * \param captureStatus_out - Location to return the capture status of the stream; required + * \param id_out - Optional location to return an id for the capture sequence, which is + * unique over the lifetime of the process + * \param graph_out - Optional location to return the graph being captured into. All + * operations other than destroy and node removal are permitted on the graph + * while the capture sequence is in progress. This API does not transfer + * ownership of the graph, which is transferred or destroyed at + * ::cuStreamEndCapture. Note that the graph handle may be invalidated before + * end of capture for certain errors. Nodes that are or become + * unreachable from the original stream at ::cuStreamEndCapture due to direct + * actions on the graph do not trigger ::CUDA_ERROR_STREAM_CAPTURE_UNJOINED. + * \param dependencies_out - Optional location to store a pointer to an array of nodes. + * The next node to be captured in the stream will depend on this set of nodes, + * absent operations such as event wait which modify this set. The array pointer + * is valid until the next API call which operates on the stream or until end of + * capture. The node handles may be copied out and are valid until they or the + * graph is destroyed. The driver-owned array may also be passed directly to + * APIs that operate on the graph (not the stream) without copying. + * \param numDependencies_out - Optional location to store the size of the array + * returned in dependencies_out. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuStreamBeginCapture, + * ::cuStreamIsCapturing, + * ::cuStreamUpdateCaptureDependencies + */ +CUresult CUDAAPI cuStreamGetCaptureInfo(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, + cuuint64_t *id_out, CUgraph *graph_out, const CUgraphNode **dependencies_out, size_t *numDependencies_out); + +/** + * \brief Update the set of dependencies in a capturing stream (11.3+) + * + * Modifies the dependency set of a capturing stream. The dependency set is the set + * of nodes that the next captured node in the stream will depend on. + * + * Valid flags are ::CU_STREAM_ADD_CAPTURE_DEPENDENCIES and + * ::CU_STREAM_SET_CAPTURE_DEPENDENCIES. These control whether the set passed to + * the API is added to the existing set or replaces it. A flags value of 0 defaults + * to ::CU_STREAM_ADD_CAPTURE_DEPENDENCIES. + * + * Nodes that are removed from the dependency set via this API do not result in + * ::CUDA_ERROR_STREAM_CAPTURE_UNJOINED if they are unreachable from the stream at + * ::cuStreamEndCapture. + * + * Returns ::CUDA_ERROR_ILLEGAL_STATE if the stream is not capturing. + * + * This API is new in CUDA 11.3. Developers requiring compatibility across minor + * versions to CUDA 11.0 should not use this API or provide a fallback. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_ILLEGAL_STATE + * + * \sa + * ::cuStreamBeginCapture, + * ::cuStreamGetCaptureInfo, + */ +CUresult CUDAAPI cuStreamUpdateCaptureDependencies(CUstream hStream, CUgraphNode *dependencies, size_t numDependencies, unsigned int flags); + +/** + * \brief Attach memory to a stream asynchronously + * + * Enqueues an operation in \p hStream to specify stream association of + * \p length bytes of memory starting from \p dptr. This function is a + * stream-ordered operation, meaning that it is dependent on, and will + * only take effect when, previous work in stream has completed. Any + * previous association is automatically replaced. + * + * \p dptr must point to one of the following types of memories: + * - managed memory declared using the __managed__ keyword or allocated with + * ::cuMemAllocManaged. + * - a valid host-accessible region of system-allocated pageable memory. This + * type of memory may only be specified if the device associated with the + * stream reports a non-zero value for the device attribute + * ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. + * + * For managed allocations, \p length must be either zero or the entire + * allocation's size. Both indicate that the entire allocation's stream + * association is being changed. Currently, it is not possible to change stream + * association for a portion of a managed allocation. + * + * For pageable host allocations, \p length must be non-zero. + * + * The stream association is specified using \p flags which must be + * one of ::CUmemAttach_flags. + * If the ::CU_MEM_ATTACH_GLOBAL flag is specified, the memory can be accessed + * by any stream on any device. + * If the ::CU_MEM_ATTACH_HOST flag is specified, the program makes a guarantee + * that it won't access the memory on the device from any stream on a device that + * has a zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. + * If the ::CU_MEM_ATTACH_SINGLE flag is specified and \p hStream is associated with + * a device that has a zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS, + * the program makes a guarantee that it will only access the memory on the device + * from \p hStream. It is illegal to attach singly to the NULL stream, because the + * NULL stream is a virtual global stream and not a specific stream. An error will + * be returned in this case. + * + * When memory is associated with a single stream, the Unified Memory system will + * allow CPU access to this memory region so long as all operations in \p hStream + * have completed, regardless of whether other streams are active. In effect, + * this constrains exclusive ownership of the managed memory region by + * an active GPU to per-stream activity instead of whole-GPU activity. + * + * Accessing memory on the device from streams that are not associated with + * it will produce undefined results. No error checking is performed by the + * Unified Memory system to ensure that kernels launched into other streams + * do not access this region. + * + * It is a program's responsibility to order calls to ::cuStreamAttachMemAsync + * via events, synchronization or other means to ensure legal access to memory + * at all times. Data visibility and coherency will be changed appropriately + * for all kernels which follow a stream-association change. + * + * If \p hStream is destroyed while data is associated with it, the association is + * removed and the association reverts to the default visibility of the allocation + * as specified at ::cuMemAllocManaged. For __managed__ variables, the default + * association is always ::CU_MEM_ATTACH_GLOBAL. Note that destroying a stream is an + * asynchronous operation, and as a result, the change to default association won't + * happen until all work in the stream has completed. + * + * \param hStream - Stream in which to enqueue the attach operation + * \param dptr - Pointer to memory (must be a pointer to managed memory or + * to a valid host-accessible region of system-allocated + * pageable memory) + * \param length - Length of memory + * \param flags - Must be one of ::CUmemAttach_flags + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_SUPPORTED + * \note_null_stream + * \notefnerr + * + * \sa ::cuStreamCreate, + * ::cuStreamQuery, + * ::cuStreamSynchronize, + * ::cuStreamWaitEvent, + * ::cuStreamDestroy, + * ::cuMemAllocManaged, + * ::cudaStreamAttachMemAsync + */ +CUresult CUDAAPI cuStreamAttachMemAsync(CUstream hStream, CUdeviceptr dptr, size_t length, unsigned int flags); + +/** + * \brief Determine status of a compute stream + * + * Returns ::CUDA_SUCCESS if all operations in the stream specified by + * \p hStream have completed, or ::CUDA_ERROR_NOT_READY if not. + * + * For the purposes of Unified Memory, a return value of ::CUDA_SUCCESS + * is equivalent to having called ::cuStreamSynchronize(). + * + * \param hStream - Stream to query status of + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_READY + * \note_null_stream + * \notefnerr + * + * \sa ::cuStreamCreate, + * ::cuStreamWaitEvent, + * ::cuStreamDestroy, + * ::cuStreamSynchronize, + * ::cuStreamAddCallback, + * ::cudaStreamQuery + */ +CUresult CUDAAPI cuStreamQuery(CUstream hStream); + +/** + * \brief Wait until a stream's tasks are completed + * + * Waits until the device has completed all operations in the stream specified + * by \p hStream. If the context was created with the + * ::CU_CTX_SCHED_BLOCKING_SYNC flag, the CPU thread will block until the + * stream is finished with all of its tasks. + * + * \param hStream - Stream to wait for + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE + + * \note_null_stream + * \notefnerr + * + * \sa ::cuStreamCreate, + * ::cuStreamDestroy, + * ::cuStreamWaitEvent, + * ::cuStreamQuery, + * ::cuStreamAddCallback, + * ::cudaStreamSynchronize + */ +CUresult CUDAAPI cuStreamSynchronize(CUstream hStream); + +/** + * \brief Destroys a stream + * + * Destroys the stream specified by \p hStream. + * + * In case the device is still doing work in the stream \p hStream + * when ::cuStreamDestroy() is called, the function will return immediately + * and the resources associated with \p hStream will be released automatically + * once the device has completed all work in \p hStream. + * + * \param hStream - Stream to destroy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa ::cuStreamCreate, + * ::cuStreamWaitEvent, + * ::cuStreamQuery, + * ::cuStreamSynchronize, + * ::cuStreamAddCallback, + * ::cudaStreamDestroy + */ +CUresult CUDAAPI cuStreamDestroy(CUstream hStream); + +/** + * \brief Copies attributes from source stream to destination stream. + * + * Copies attributes from source stream \p src to destination stream \p dst. + * Both streams must have the same context. + * + * \param[out] dst Destination stream + * \param[in] src Source stream + * For list of attributes see ::CUstreamAttrID + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa + * ::CUaccessPolicyWindow + */ +CUresult CUDAAPI cuStreamCopyAttributes(CUstream dst, CUstream src); + +/** + * \brief Queries stream attribute. + * + * Queries attribute \p attr from \p hStream and stores it in corresponding + * member of \p value_out. + * + * \param[in] hStream + * \param[in] attr + * \param[out] value_out + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa + * ::CUaccessPolicyWindow + */ +CUresult CUDAAPI cuStreamGetAttribute(CUstream hStream, CUstreamAttrID attr, + CUstreamAttrValue *value_out); + +/** + * \brief Sets stream attribute. + * + * Sets attribute \p attr on \p hStream from corresponding attribute of + * \p value. The updated attribute will be applied to subsequent work + * submitted to the stream. It will not affect previously submitted work. + * + * \param[out] hStream + * \param[in] attr + * \param[in] value + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa + * ::CUaccessPolicyWindow + */ +CUresult CUDAAPI cuStreamSetAttribute(CUstream hStream, CUstreamAttrID attr, + const CUstreamAttrValue *value); + +/** @} */ /* END CUDA_STREAM */ + + +/** + * \defgroup CUDA_EVENT Event Management + * + * ___MANBRIEF___ event management functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the event management functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** + * \brief Creates an event + * + * Creates an event *phEvent for the current context with the flags specified via + * \p Flags. Valid flags include: + * - ::CU_EVENT_DEFAULT: Default event creation flag. + * - ::CU_EVENT_BLOCKING_SYNC: Specifies that the created event should use blocking + * synchronization. A CPU thread that uses ::cuEventSynchronize() to wait on + * an event created with this flag will block until the event has actually + * been recorded. + * - ::CU_EVENT_DISABLE_TIMING: Specifies that the created event does not need + * to record timing data. Events created with this flag specified and + * the ::CU_EVENT_BLOCKING_SYNC flag not specified will provide the best + * performance when used with ::cuStreamWaitEvent() and ::cuEventQuery(). + * - ::CU_EVENT_INTERPROCESS: Specifies that the created event may be used as an + * interprocess event by ::cuIpcGetEventHandle(). ::CU_EVENT_INTERPROCESS must + * be specified along with ::CU_EVENT_DISABLE_TIMING. + * + * \param phEvent - Returns newly created event + * \param Flags - Event creation flags + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \notefnerr + * + * \sa + * ::cuEventRecord, + * ::cuEventQuery, + * ::cuEventSynchronize, + * ::cuEventDestroy, + * ::cuEventElapsedTime, + * ::cudaEventCreate, + * ::cudaEventCreateWithFlags + */ +CUresult CUDAAPI cuEventCreate(CUevent *phEvent, unsigned int Flags); + +/** + * \brief Records an event + * + * Captures in \p hEvent the contents of \p hStream at the time of this call. + * \p hEvent and \p hStream must be from the same context. + * Calls such as ::cuEventQuery() or ::cuStreamWaitEvent() will then + * examine or wait for completion of the work that was captured. Uses of + * \p hStream after this call do not modify \p hEvent. See note on default + * stream behavior for what is captured in the default case. + * + * ::cuEventRecord() can be called multiple times on the same event and + * will overwrite the previously captured state. Other APIs such as + * ::cuStreamWaitEvent() use the most recently captured state at the time + * of the API call, and are not affected by later calls to + * ::cuEventRecord(). Before the first call to ::cuEventRecord(), an + * event represents an empty set of work, so for example ::cuEventQuery() + * would return ::CUDA_SUCCESS. + * + * \param hEvent - Event to record + * \param hStream - Stream to record event for + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE + * \note_null_stream + * \notefnerr + * + * \sa ::cuEventCreate, + * ::cuEventQuery, + * ::cuEventSynchronize, + * ::cuStreamWaitEvent, + * ::cuEventDestroy, + * ::cuEventElapsedTime, + * ::cudaEventRecord, + * ::cuEventRecordWithFlags + */ +CUresult CUDAAPI cuEventRecord(CUevent hEvent, CUstream hStream); + +/** + * \brief Records an event + * + * Captures in \p hEvent the contents of \p hStream at the time of this call. + * \p hEvent and \p hStream must be from the same context. + * Calls such as ::cuEventQuery() or ::cuStreamWaitEvent() will then + * examine or wait for completion of the work that was captured. Uses of + * \p hStream after this call do not modify \p hEvent. See note on default + * stream behavior for what is captured in the default case. + * + * ::cuEventRecordWithFlags() can be called multiple times on the same event and + * will overwrite the previously captured state. Other APIs such as + * ::cuStreamWaitEvent() use the most recently captured state at the time + * of the API call, and are not affected by later calls to + * ::cuEventRecordWithFlags(). Before the first call to ::cuEventRecordWithFlags(), an + * event represents an empty set of work, so for example ::cuEventQuery() + * would return ::CUDA_SUCCESS. + * + * flags include: + * - ::CU_EVENT_RECORD_DEFAULT: Default event creation flag. + * - ::CU_EVENT_RECORD_EXTERNAL: Event is captured in the graph as an external + * event node when performing stream capture. This flag is invalid outside + * of stream capture. + * + * \param hEvent - Event to record + * \param hStream - Stream to record event for + * \param flags - See ::CUevent_capture_flags + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE + * \note_null_stream + * \notefnerr + * + * \sa ::cuEventCreate, + * ::cuEventQuery, + * ::cuEventSynchronize, + * ::cuStreamWaitEvent, + * ::cuEventDestroy, + * ::cuEventElapsedTime, + * ::cuEventRecord, + * ::cudaEventRecord + */ +CUresult CUDAAPI cuEventRecordWithFlags(CUevent hEvent, CUstream hStream, unsigned int flags); + +/** + * \brief Queries an event's status + * + * Queries the status of all work currently captured by \p hEvent. See + * ::cuEventRecord() for details on what is captured by an event. + * + * Returns ::CUDA_SUCCESS if all captured work has been completed, or + * ::CUDA_ERROR_NOT_READY if any captured work is incomplete. + * + * For the purposes of Unified Memory, a return value of ::CUDA_SUCCESS + * is equivalent to having called ::cuEventSynchronize(). + * + * \param hEvent - Event to query + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_READY + * \notefnerr + * + * \sa ::cuEventCreate, + * ::cuEventRecord, + * ::cuEventSynchronize, + * ::cuEventDestroy, + * ::cuEventElapsedTime, + * ::cudaEventQuery + */ +CUresult CUDAAPI cuEventQuery(CUevent hEvent); + +/** + * \brief Waits for an event to complete + * + * Waits until the completion of all work currently captured in \p hEvent. + * See ::cuEventRecord() for details on what is captured by an event. + * + * Waiting for an event that was created with the ::CU_EVENT_BLOCKING_SYNC + * flag will cause the calling CPU thread to block until the event has + * been completed by the device. If the ::CU_EVENT_BLOCKING_SYNC flag has + * not been set, then the CPU thread will busy-wait until the event has + * been completed by the device. + * + * \param hEvent - Event to wait for + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa ::cuEventCreate, + * ::cuEventRecord, + * ::cuEventQuery, + * ::cuEventDestroy, + * ::cuEventElapsedTime, + * ::cudaEventSynchronize + */ +CUresult CUDAAPI cuEventSynchronize(CUevent hEvent); + +/** + * \brief Destroys an event + * + * Destroys the event specified by \p hEvent. + * + * An event may be destroyed before it is complete (i.e., while + * ::cuEventQuery() would return ::CUDA_ERROR_NOT_READY). In this case, the + * call does not block on completion of the event, and any associated + * resources will automatically be released asynchronously at completion. + * + * \param hEvent - Event to destroy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa ::cuEventCreate, + * ::cuEventRecord, + * ::cuEventQuery, + * ::cuEventSynchronize, + * ::cuEventElapsedTime, + * ::cudaEventDestroy + */ +CUresult CUDAAPI cuEventDestroy(CUevent hEvent); + +/** + * \brief Computes the elapsed time between two events + * + * Computes the elapsed time between two events (in milliseconds with a + * resolution of around 0.5 microseconds). + * + * If either event was last recorded in a non-NULL stream, the resulting time + * may be greater than expected (even if both used the same stream handle). This + * happens because the ::cuEventRecord() operation takes place asynchronously + * and there is no guarantee that the measured latency is actually just between + * the two events. Any number of other different stream operations could execute + * in between the two measured events, thus altering the timing in a significant + * way. + * + * If ::cuEventRecord() has not been called on either event then + * ::CUDA_ERROR_INVALID_HANDLE is returned. If ::cuEventRecord() has been called + * on both events but one or both of them has not yet been completed (that is, + * ::cuEventQuery() would return ::CUDA_ERROR_NOT_READY on at least one of the + * events), ::CUDA_ERROR_NOT_READY is returned. If either event was created with + * the ::CU_EVENT_DISABLE_TIMING flag, then this function will return + * ::CUDA_ERROR_INVALID_HANDLE. + * + * \param pMilliseconds - Time between \p hStart and \p hEnd in ms + * \param hStart - Starting event + * \param hEnd - Ending event + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_READY, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa ::cuEventCreate, + * ::cuEventRecord, + * ::cuEventQuery, + * ::cuEventSynchronize, + * ::cuEventDestroy, + * ::cudaEventElapsedTime + */ +CUresult CUDAAPI cuEventElapsedTime(float *pMilliseconds, CUevent hStart, CUevent hEnd); + +/** @} */ /* END CUDA_EVENT */ + +/** + * \defgroup CUDA_EXTRES_INTEROP External Resource Interoperability + * + * ___MANBRIEF___ External resource interoperability functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the external resource interoperability functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + + /** + * \brief Imports an external memory object + * + * Imports an externally allocated memory object and returns + * a handle to that in \p extMem_out. + * + * The properties of the handle being imported must be described in + * \p memHandleDesc. The ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC structure + * is defined as follows: + * + * \code + typedef struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st { + CUexternalMemoryHandleType type; + union { + int fd; + struct { + void *handle; + const void *name; + } win32; + const void *nvSciBufObject; + } handle; + unsigned long long size; + unsigned int flags; + } CUDA_EXTERNAL_MEMORY_HANDLE_DESC; + * \endcode + * + * where ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type specifies the type + * of handle being imported. ::CUexternalMemoryHandleType is + * defined as: + * + * \code + typedef enum CUexternalMemoryHandleType_enum { + CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD = 1, + CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 = 2, + CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT = 3, + CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP = 4, + CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE = 5, + CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE = 6, + CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT = 7, + CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF = 8 + } CUexternalMemoryHandleType; + * \endcode + * + * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD, then + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::fd must be a valid + * file descriptor referencing a memory object. Ownership of + * the file descriptor is transferred to the CUDA driver when the + * handle is imported successfully. Performing any operations on the + * file descriptor after it is imported results in undefined behavior. + * + * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32, then exactly one + * of ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle and + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must not be + * NULL. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * references a memory object. Ownership of this handle is + * not transferred to CUDA after the import operation, so the + * application must release the handle using the appropriate system + * call. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name + * is not NULL, then it must point to a NULL-terminated array of + * UTF-16 characters that refers to a memory object. + * + * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT, then + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle must + * be non-NULL and + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name + * must be NULL. The handle specified must be a globally shared KMT + * handle. This handle does not hold a reference to the underlying + * object, and thus will be invalid when all references to the + * memory object are destroyed. + * + * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP, then exactly one + * of ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle and + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must not be + * NULL. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * is returned by ID3D12Device::CreateSharedHandle when referring to a + * ID3D12Heap object. This handle holds a reference to the underlying + * object. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name + * is not NULL, then it must point to a NULL-terminated array of + * UTF-16 characters that refers to a ID3D12Heap object. + * + * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE, then exactly one + * of ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle and + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must not be + * NULL. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * is returned by ID3D12Device::CreateSharedHandle when referring to a + * ID3D12Resource object. This handle holds a reference to the + * underlying object. If + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name + * is not NULL, then it must point to a NULL-terminated array of + * UTF-16 characters that refers to a ID3D12Resource object. + * + * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE, then + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle must + * represent a valid shared NT handle that is returned by + * IDXGIResource1::CreateSharedHandle when referring to a + * ID3D11Resource object. If + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name + * is not NULL, then it must point to a NULL-terminated array of + * UTF-16 characters that refers to a ID3D11Resource object. + * + * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT, then + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle must + * represent a valid shared KMT handle that is returned by + * IDXGIResource::GetSharedHandle when referring to a + * ID3D11Resource object and + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name + * must be NULL. + * + * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF, then + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::nvSciBufObject must be non-NULL + * and reference a valid NvSciBuf object. + * If the NvSciBuf object imported into CUDA is also mapped by other drivers, then the + * application must use ::cuWaitExternalSemaphoresAsync or ::cuSignalExternalSemaphoresAsync + * as appropriate barriers to maintain coherence between CUDA and the other drivers. + * See ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC and ::CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC + * for memory synchronization. + * + * + * The size of the memory object must be specified in + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::size. + * + * Specifying the flag ::CUDA_EXTERNAL_MEMORY_DEDICATED in + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::flags indicates that the + * resource is a dedicated resource. The definition of what a + * dedicated resource is outside the scope of this extension. + * This flag must be set if ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type + * is one of the following: + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT + * + * \param extMem_out - Returned handle to an external memory object + * \param memHandleDesc - Memory import handle descriptor + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_OPERATING_SYSTEM + * \notefnerr + * + * \note If the Vulkan memory imported into CUDA is mapped on the CPU then the + * application must use vkInvalidateMappedMemoryRanges/vkFlushMappedMemoryRanges + * as well as appropriate Vulkan pipeline barriers to maintain coherence between + * CPU and GPU. For more information on these APIs, please refer to "Synchronization + * and Cache Control" chapter from Vulkan specification. + * + * \sa ::cuDestroyExternalMemory, + * ::cuExternalMemoryGetMappedBuffer, + * ::cuExternalMemoryGetMappedMipmappedArray + */ +CUresult CUDAAPI cuImportExternalMemory(CUexternalMemory *extMem_out, const CUDA_EXTERNAL_MEMORY_HANDLE_DESC *memHandleDesc); + +/** + * \brief Maps a buffer onto an imported memory object + * + * Maps a buffer onto an imported memory object and returns a device + * pointer in \p devPtr. + * + * The properties of the buffer being mapped must be described in + * \p bufferDesc. The ::CUDA_EXTERNAL_MEMORY_BUFFER_DESC structure is + * defined as follows: + * + * \code + typedef struct CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st { + unsigned long long offset; + unsigned long long size; + unsigned int flags; + } CUDA_EXTERNAL_MEMORY_BUFFER_DESC; + * \endcode + * + * where ::CUDA_EXTERNAL_MEMORY_BUFFER_DESC::offset is the offset in + * the memory object where the buffer's base address is. + * ::CUDA_EXTERNAL_MEMORY_BUFFER_DESC::size is the size of the buffer. + * ::CUDA_EXTERNAL_MEMORY_BUFFER_DESC::flags must be zero. + * + * The offset and size have to be suitably aligned to match the + * requirements of the external API. Mapping two buffers whose ranges + * overlap may or may not result in the same virtual address being + * returned for the overlapped portion. In such cases, the application + * must ensure that all accesses to that region from the GPU are + * volatile. Otherwise writes made via one address are not guaranteed + * to be visible via the other address, even if they're issued by the + * same thread. It is recommended that applications map the combined + * range instead of mapping separate buffers and then apply the + * appropriate offsets to the returned pointer to derive the + * individual buffers. + * + * The returned pointer \p devPtr must be freed using ::cuMemFree. + * + * \param devPtr - Returned device pointer to buffer + * \param extMem - Handle to external memory object + * \param bufferDesc - Buffer descriptor + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa ::cuImportExternalMemory, + * ::cuDestroyExternalMemory, + * ::cuExternalMemoryGetMappedMipmappedArray + */ +CUresult CUDAAPI cuExternalMemoryGetMappedBuffer(CUdeviceptr *devPtr, CUexternalMemory extMem, const CUDA_EXTERNAL_MEMORY_BUFFER_DESC *bufferDesc); + +/** + * \brief Maps a CUDA mipmapped array onto an external memory object + * + * Maps a CUDA mipmapped array onto an external object and returns a + * handle to it in \p mipmap. + * + * The properties of the CUDA mipmapped array being mapped must be + * described in \p mipmapDesc. The structure + * ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC is defined as follows: + * + * \code + typedef struct CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st { + unsigned long long offset; + CUDA_ARRAY3D_DESCRIPTOR arrayDesc; + unsigned int numLevels; + } CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC; + * \endcode + * + * where ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::offset is the + * offset in the memory object where the base level of the mipmap + * chain is. + * ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::arrayDesc describes + * the format, dimensions and type of the base level of the mipmap + * chain. For further details on these parameters, please refer to the + * documentation for ::cuMipmappedArrayCreate. Note that if the mipmapped + * array is bound as a color target in the graphics API, then the flag + * ::CUDA_ARRAY3D_COLOR_ATTACHMENT must be specified in + * ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::arrayDesc::Flags. + * ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::numLevels specifies + * the total number of levels in the mipmap chain. + * + * If \p extMem was imported from a handle of type ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF, then + * ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::numLevels must be equal to 1. + * + * The returned CUDA mipmapped array must be freed using ::cuMipmappedArrayDestroy. + * + * \param mipmap - Returned CUDA mipmapped array + * \param extMem - Handle to external memory object + * \param mipmapDesc - CUDA array descriptor + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa ::cuImportExternalMemory, + * ::cuDestroyExternalMemory, + * ::cuExternalMemoryGetMappedBuffer + */ +CUresult CUDAAPI cuExternalMemoryGetMappedMipmappedArray(CUmipmappedArray *mipmap, CUexternalMemory extMem, const CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC *mipmapDesc); + +/** + * \brief Destroys an external memory object. + * + * Destroys the specified external memory object. Any existing buffers + * and CUDA mipmapped arrays mapped onto this object must no longer be + * used and must be explicitly freed using ::cuMemFree and + * ::cuMipmappedArrayDestroy respectively. + * + * \param extMem - External memory object to be destroyed + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa ::cuImportExternalMemory, + * ::cuExternalMemoryGetMappedBuffer, + * ::cuExternalMemoryGetMappedMipmappedArray + */ +CUresult CUDAAPI cuDestroyExternalMemory(CUexternalMemory extMem); + +/** + * \brief Imports an external semaphore + * + * Imports an externally allocated synchronization object and returns + * a handle to that in \p extSem_out. + * + * The properties of the handle being imported must be described in + * \p semHandleDesc. The ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC is + * defined as follows: + * + * \code + typedef struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st { + CUexternalSemaphoreHandleType type; + union { + int fd; + struct { + void *handle; + const void *name; + } win32; + const void* NvSciSyncObj; + } handle; + unsigned int flags; + } CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC; + * \endcode + * + * where ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type specifies the type of + * handle being imported. ::CUexternalSemaphoreHandleType is defined + * as: + * + * \code + typedef enum CUexternalSemaphoreHandleType_enum { + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD = 1, + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 = 2, + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT = 3, + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE = 4, + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE = 5, + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC = 6, + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX = 7, + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT = 8, + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD = 9, + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10 + } CUexternalSemaphoreHandleType; + * \endcode + * + * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD, then + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::fd must be a valid + * file descriptor referencing a synchronization object. Ownership of + * the file descriptor is transferred to the CUDA driver when the + * handle is imported successfully. Performing any operations on the + * file descriptor after it is imported results in undefined behavior. + * + * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32, then exactly one + * of ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle and + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name must not be + * NULL. If + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * references a synchronization object. Ownership of this handle is + * not transferred to CUDA after the import operation, so the + * application must release the handle using the appropriate system + * call. If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name + * is not NULL, then it must name a valid synchronization object. + * + * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT, then + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle must + * be non-NULL and + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name + * must be NULL. The handle specified must be a globally shared KMT + * handle. This handle does not hold a reference to the underlying + * object, and thus will be invalid when all references to the + * synchronization object are destroyed. + * + * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE, then exactly one + * of ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle and + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name must not be + * NULL. If + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * is returned by ID3D12Device::CreateSharedHandle when referring to a + * ID3D12Fence object. This handle holds a reference to the underlying + * object. If + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name + * is not NULL, then it must name a valid synchronization object that + * refers to a valid ID3D12Fence object. + * + * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE, then + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle + * represents a valid shared NT handle that is returned by + * ID3D11Fence::CreateSharedHandle. If + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name + * is not NULL, then it must name a valid synchronization object that + * refers to a valid ID3D11Fence object. + * + * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, then + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::nvSciSyncObj + * represents a valid NvSciSyncObj. + * + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX, then + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle + * represents a valid shared NT handle that + * is returned by IDXGIResource1::CreateSharedHandle when referring to + * a IDXGIKeyedMutex object. If + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name + * is not NULL, then it must name a valid synchronization object that + * refers to a valid IDXGIKeyedMutex object. + * + * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT, then + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle + * represents a valid shared KMT handle that + * is returned by IDXGIResource::GetSharedHandle when referring to + * a IDXGIKeyedMutex object and + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name must be NULL. + * + * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD, then + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::fd must be a valid + * file descriptor referencing a synchronization object. Ownership of + * the file descriptor is transferred to the CUDA driver when the + * handle is imported successfully. Performing any operations on the + * file descriptor after it is imported results in undefined behavior. + * + * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32, then exactly one + * of ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle and + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name must not be + * NULL. If + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * references a synchronization object. Ownership of this handle is + * not transferred to CUDA after the import operation, so the + * application must release the handle using the appropriate system + * call. If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name + * is not NULL, then it must name a valid synchronization object. + * + * \param extSem_out - Returned handle to an external semaphore + * \param semHandleDesc - Semaphore import handle descriptor + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_OPERATING_SYSTEM + * \notefnerr + * + * \sa ::cuDestroyExternalSemaphore, + * ::cuSignalExternalSemaphoresAsync, + * ::cuWaitExternalSemaphoresAsync + */ +CUresult CUDAAPI cuImportExternalSemaphore(CUexternalSemaphore *extSem_out, const CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC *semHandleDesc); + +/** + * \brief Signals a set of external semaphore objects + * + * Enqueues a signal operation on a set of externally allocated + * semaphore object in the specified stream. The operations will be + * executed when all prior operations in the stream complete. + * + * The exact semantics of signaling a semaphore depends on the type of + * the object. + * + * If the semaphore object is any one of the following types: + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD, + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32, + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT + * then signaling the semaphore will set it to the signaled state. + * + * If the semaphore object is any one of the following types: + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE, + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE, + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD, + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 + * then the semaphore will be set to the value specified in + * ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::fence::value. + * + * If the semaphore object is of the type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC + * this API sets ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::nvSciSync::fence + * to a value that can be used by subsequent waiters of the same NvSciSync object + * to order operations with those currently submitted in \p stream. Such an update + * will overwrite previous contents of + * ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::nvSciSync::fence. By default, + * signaling such an external semaphore object causes appropriate memory synchronization + * operations to be performed over all external memory objects that are imported as + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. This ensures that any subsequent accesses + * made by other importers of the same set of NvSciBuf memory object(s) are coherent. + * These operations can be skipped by specifying the flag + * ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC, which can be used as a + * performance optimization when data coherency is not required. But specifying this + * flag in scenarios where data coherency is required results in undefined behavior. + * Also, for semaphore object of the type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, + * if the NvSciSyncAttrList used to create the NvSciSyncObj had not set the flags in + * ::cuDeviceGetNvSciSyncAttributes to CUDA_NVSCISYNC_ATTR_SIGNAL, this API will return + * CUDA_ERROR_NOT_SUPPORTED. + * NvSciSyncFence associated with semaphore object of the type + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC can be deterministic. For this the + * NvSciSyncAttrList used to create the semaphore object must have value of + * NvSciSyncAttrKey_RequireDeterministicFences key set to true. Deterministic fences + * allow users to enqueue a wait over the semaphore object even before corresponding + * signal is enqueued. For such a semaphore object, CUDA guarantees that each signal + * operation will increment the fence value by '1'. Users are expected to track count + * of signals enqueued on the semaphore object and insert waits accordingly. When such + * a semaphore object is signaled from multiple streams, due to concurrent stream + * execution, it is possible that the order in which the semaphore gets signaled is + * indeterministic. This could lead to waiters of the semaphore getting unblocked + * incorrectly. Users are expected to handle such situations, either by not using the + * same semaphore object with deterministic fence support enabled in different streams + * or by adding explicit dependency amongst such streams so that the semaphore is + * signaled in order. + * + * If the semaphore object is any one of the following types: + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX, + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT + * then the keyed mutex will be released with the key specified in + * ::CUDA_EXTERNAL_SEMAPHORE_PARAMS::params::keyedmutex::key. + * + * \param extSemArray - Set of external semaphores to be signaled + * \param paramsArray - Array of semaphore parameters + * \param numExtSems - Number of semaphores to signal + * \param stream - Stream to enqueue the signal operations in + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_SUPPORTED + * \notefnerr + * + * \sa ::cuImportExternalSemaphore, + * ::cuDestroyExternalSemaphore, + * ::cuWaitExternalSemaphoresAsync + */ +CUresult CUDAAPI cuSignalExternalSemaphoresAsync(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS *paramsArray, unsigned int numExtSems, CUstream stream); + +/** + * \brief Waits on a set of external semaphore objects + * + * Enqueues a wait operation on a set of externally allocated + * semaphore object in the specified stream. The operations will be + * executed when all prior operations in the stream complete. + * + * The exact semantics of waiting on a semaphore depends on the type + * of the object. + * + * If the semaphore object is any one of the following types: + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD, + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32, + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT + * then waiting on the semaphore will wait until the semaphore reaches + * the signaled state. The semaphore will then be reset to the + * unsignaled state. Therefore for every signal operation, there can + * only be one wait operation. + * + * If the semaphore object is any one of the following types: + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE, + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE, + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD, + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 + * then waiting on the semaphore will wait until the value of the + * semaphore is greater than or equal to + * ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS::params::fence::value. + * + * If the semaphore object is of the type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC + * then, waiting on the semaphore will wait until the + * ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::nvSciSync::fence is signaled by the + * signaler of the NvSciSyncObj that was associated with this semaphore object. + * By default, waiting on such an external semaphore object causes appropriate + * memory synchronization operations to be performed over all external memory objects + * that are imported as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. This ensures that + * any subsequent accesses made by other importers of the same set of NvSciBuf memory + * object(s) are coherent. These operations can be skipped by specifying the flag + * ::CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC, which can be used as a + * performance optimization when data coherency is not required. But specifying this + * flag in scenarios where data coherency is required results in undefined behavior. + * Also, for semaphore object of the type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, + * if the NvSciSyncAttrList used to create the NvSciSyncObj had not set the flags in + * ::cuDeviceGetNvSciSyncAttributes to CUDA_NVSCISYNC_ATTR_WAIT, this API will return + * CUDA_ERROR_NOT_SUPPORTED. + * + * If the semaphore object is any one of the following types: + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX, + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT + * then the keyed mutex will be acquired when it is released with the key + * specified in ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS::params::keyedmutex::key + * or until the timeout specified by + * ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS::params::keyedmutex::timeoutMs + * has lapsed. The timeout interval can either be a finite value + * specified in milliseconds or an infinite value. In case an infinite + * value is specified the timeout never elapses. The windows INFINITE + * macro must be used to specify infinite timeout. + * + * \param extSemArray - External semaphores to be waited on + * \param paramsArray - Array of semaphore parameters + * \param numExtSems - Number of semaphores to wait on + * \param stream - Stream to enqueue the wait operations in + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_TIMEOUT + * \notefnerr + * + * \sa ::cuImportExternalSemaphore, + * ::cuDestroyExternalSemaphore, + * ::cuSignalExternalSemaphoresAsync + */ +CUresult CUDAAPI cuWaitExternalSemaphoresAsync(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS *paramsArray, unsigned int numExtSems, CUstream stream); + +/** + * \brief Destroys an external semaphore + * + * Destroys an external semaphore object and releases any references + * to the underlying resource. Any outstanding signals or waits must + * have completed before the semaphore is destroyed. + * + * \param extSem - External semaphore to be destroyed + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa ::cuImportExternalSemaphore, + * ::cuSignalExternalSemaphoresAsync, + * ::cuWaitExternalSemaphoresAsync + */ +CUresult CUDAAPI cuDestroyExternalSemaphore(CUexternalSemaphore extSem); + +/** @} */ /* END CUDA_EXTRES_INTEROP */ + +/** + * \defgroup CUDA_MEMOP Stream Memory Operations + * + * ___MANBRIEF___ Stream memory operations of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the stream memory operations of the low-level CUDA + * driver application programming interface. + * + * Support for the ::CU_STREAM_WAIT_VALUE_NOR flag can be queried with + * ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V2. + * + * Support for the ::cuStreamWriteValue64() and ::cuStreamWaitValue64() + * functions, as well as for the ::CU_STREAM_MEM_OP_WAIT_VALUE_64 and + * ::CU_STREAM_MEM_OP_WRITE_VALUE_64 flags, can be queried with + * ::CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS. + * + * Support for both ::CU_STREAM_WAIT_VALUE_FLUSH and + * ::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES requires dedicated platform + * hardware features and can be queried with ::cuDeviceGetAttribute() and + * ::CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES. + * + * Note that all memory pointers passed as parameters to these operations + * are device pointers. Where necessary a device pointer should be + * obtained, for example with ::cuMemHostGetDevicePointer(). + * + * None of the operations accepts pointers to managed memory buffers + * (::cuMemAllocManaged). + * + * \note + * Warning: + * Improper use of these APIs may deadlock the application. Synchronization + * ordering established through these APIs is not visible to CUDA. CUDA tasks + * that are (even indirectly) ordered by these APIs should also have that order + * expressed with CUDA-visible dependencies such as events. This ensures that + * the scheduler does not serialize them in an improper order. + * + * @{ + */ + +/** + * \brief Wait on a memory location + * + * Enqueues a synchronization of the stream on the given memory location. Work + * ordered after the operation will block until the given condition on the + * memory is satisfied. By default, the condition is to wait for + * (int32_t)(*addr - value) >= 0, a cyclic greater-or-equal. + * Other condition types can be specified via \p flags. + * + * If the memory was registered via ::cuMemHostRegister(), the device pointer + * should be obtained with ::cuMemHostGetDevicePointer(). This function cannot + * be used with managed memory (::cuMemAllocManaged). + * + * Support for CU_STREAM_WAIT_VALUE_NOR can be queried with ::cuDeviceGetAttribute() and + * ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V2. + * + * \note + * Warning: + * Improper use of this API may deadlock the application. Synchronization + * ordering established through this API is not visible to CUDA. CUDA tasks + * that are (even indirectly) ordered by this API should also have that order + * expressed with CUDA-visible dependencies such as events. This ensures that + * the scheduler does not serialize them in an improper order. + * + * \param stream The stream to synchronize on the memory location. + * \param addr The memory location to wait on. + * \param value The value to compare with the memory location. + * \param flags See ::CUstreamWaitValue_flags. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_SUPPORTED + * \notefnerr + * + * \sa ::cuStreamWaitValue64, + * ::cuStreamWriteValue32, + * ::cuStreamWriteValue64, + * ::cuStreamBatchMemOp, + * ::cuMemHostRegister, + * ::cuStreamWaitEvent + */ +CUresult CUDAAPI cuStreamWaitValue32(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags); + +/** + * \brief Wait on a memory location + * + * Enqueues a synchronization of the stream on the given memory location. Work + * ordered after the operation will block until the given condition on the + * memory is satisfied. By default, the condition is to wait for + * (int64_t)(*addr - value) >= 0, a cyclic greater-or-equal. + * Other condition types can be specified via \p flags. + * + * If the memory was registered via ::cuMemHostRegister(), the device pointer + * should be obtained with ::cuMemHostGetDevicePointer(). + * + * Support for this can be queried with ::cuDeviceGetAttribute() and + * ::CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS. + * + * \note + * Warning: + * Improper use of this API may deadlock the application. Synchronization + * ordering established through this API is not visible to CUDA. CUDA tasks + * that are (even indirectly) ordered by this API should also have that order + * expressed with CUDA-visible dependencies such as events. This ensures that + * the scheduler does not serialize them in an improper order. + * + * \param stream The stream to synchronize on the memory location. + * \param addr The memory location to wait on. + * \param value The value to compare with the memory location. + * \param flags See ::CUstreamWaitValue_flags. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_SUPPORTED + * \notefnerr + * + * \sa ::cuStreamWaitValue32, + * ::cuStreamWriteValue32, + * ::cuStreamWriteValue64, + * ::cuStreamBatchMemOp, + * ::cuMemHostRegister, + * ::cuStreamWaitEvent + */ +CUresult CUDAAPI cuStreamWaitValue64(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags); + +/** + * \brief Write a value to memory + * + * Write a value to memory. + * + * If the memory was registered via ::cuMemHostRegister(), the device pointer + * should be obtained with ::cuMemHostGetDevicePointer(). This function cannot + * be used with managed memory (::cuMemAllocManaged). + * + * \param stream The stream to do the write in. + * \param addr The device address to write to. + * \param value The value to write. + * \param flags See ::CUstreamWriteValue_flags. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_SUPPORTED + * \notefnerr + * + * \sa ::cuStreamWriteValue64, + * ::cuStreamWaitValue32, + * ::cuStreamWaitValue64, + * ::cuStreamBatchMemOp, + * ::cuMemHostRegister, + * ::cuEventRecord + */ +CUresult CUDAAPI cuStreamWriteValue32(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags); + +/** + * \brief Write a value to memory + * + * Write a value to memory. + * + * If the memory was registered via ::cuMemHostRegister(), the device pointer + * should be obtained with ::cuMemHostGetDevicePointer(). + * + * Support for this can be queried with ::cuDeviceGetAttribute() and + * ::CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS. + * + * \param stream The stream to do the write in. + * \param addr The device address to write to. + * \param value The value to write. + * \param flags See ::CUstreamWriteValue_flags. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_SUPPORTED + * \notefnerr + * + * \sa ::cuStreamWriteValue32, + * ::cuStreamWaitValue32, + * ::cuStreamWaitValue64, + * ::cuStreamBatchMemOp, + * ::cuMemHostRegister, + * ::cuEventRecord + */ +CUresult CUDAAPI cuStreamWriteValue64(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags); + +/** + * \brief Batch operations to synchronize the stream via memory operations + * + * This is a batch version of ::cuStreamWaitValue32() and ::cuStreamWriteValue32(). + * Batching operations may avoid some performance overhead in both the API call + * and the device execution versus adding them to the stream in separate API + * calls. The operations are enqueued in the order they appear in the array. + * + * See ::CUstreamBatchMemOpType for the full set of supported operations, and + * ::cuStreamWaitValue32(), ::cuStreamWaitValue64(), ::cuStreamWriteValue32(), + * and ::cuStreamWriteValue64() for details of specific operations. + * + * See related APIs for details on querying support for specific operations. + * + * \note + * Warning: + * Improper use of this API may deadlock the application. Synchronization + * ordering established through this API is not visible to CUDA. CUDA tasks + * that are (even indirectly) ordered by this API should also have that order + * expressed with CUDA-visible dependencies such as events. This ensures that + * the scheduler does not serialize them in an improper order. For more + * information, see the Stream Memory Operations section in the programming + * guide(https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html). + * + * \param stream The stream to enqueue the operations in. + * \param count The number of operations in the array. Must be less than 256. + * \param paramArray The types and parameters of the individual operations. + * \param flags Reserved for future expansion; must be 0. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_SUPPORTED + * \notefnerr + * + * \sa ::cuStreamWaitValue32, + * ::cuStreamWaitValue64, + * ::cuStreamWriteValue32, + * ::cuStreamWriteValue64, + * ::cuMemHostRegister + */ +CUresult CUDAAPI cuStreamBatchMemOp(CUstream stream, unsigned int count, CUstreamBatchMemOpParams *paramArray, unsigned int flags); + +/** @} */ /* END CUDA_MEMOP */ + +/** + * \defgroup CUDA_EXEC Execution Control + * + * ___MANBRIEF___ execution control functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the execution control functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** + * \brief Returns information about a function + * + * Returns in \p *pi the integer value of the attribute \p attrib on the kernel + * given by \p hfunc. The supported attributes are: + * - ::CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK: The maximum number of threads + * per block, beyond which a launch of the function would fail. This number + * depends on both the function and the device on which the function is + * currently loaded. + * - ::CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES: The size in bytes of + * statically-allocated shared memory per block required by this function. + * This does not include dynamically-allocated shared memory requested by + * the user at runtime. + * - ::CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES: The size in bytes of user-allocated + * constant memory required by this function. + * - ::CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES: The size in bytes of local memory + * used by each thread of this function. + * - ::CU_FUNC_ATTRIBUTE_NUM_REGS: The number of registers used by each thread + * of this function. + * - ::CU_FUNC_ATTRIBUTE_PTX_VERSION: The PTX virtual architecture version for + * which the function was compiled. This value is the major PTX version * 10 + * + the minor PTX version, so a PTX version 1.3 function would return the + * value 13. Note that this may return the undefined value of 0 for cubins + * compiled prior to CUDA 3.0. + * - ::CU_FUNC_ATTRIBUTE_BINARY_VERSION: The binary architecture version for + * which the function was compiled. This value is the major binary + * version * 10 + the minor binary version, so a binary version 1.3 function + * would return the value 13. Note that this will return a value of 10 for + * legacy cubins that do not have a properly-encoded binary architecture + * version. + * - ::CU_FUNC_CACHE_MODE_CA: The attribute to indicate whether the function has + * been compiled with user specified option "-Xptxas --dlcm=ca" set . + * - ::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: The maximum size in bytes of + * dynamically-allocated shared memory. + * - ::CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: Preferred shared memory-L1 + * cache split ratio in percent of total shared memory. + * - ::CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET: If this attribute is set, the + * kernel must launch with a valid cluster size specified. + * - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH: The required cluster width in + * blocks. + * - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT: The required cluster height in + * blocks. + * - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH: The required cluster depth in + * blocks. + * - ::CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED: Indicates whether + * the function can be launched with non-portable cluster size. 1 is allowed, + * 0 is disallowed. A non-portable cluster size may only function on the + * specific SKUs the program is tested on. The launch might fail if the + * program is run on a different hardware platform. CUDA API provides + * cudaOccupancyMaxActiveClusters to assist with checking whether the desired + * size can be launched on the current device. A portable cluster size is + * guaranteed to be functional on all compute capabilities higher than the + * target compute capability. The portable cluster size for sm_90 is 8 blocks + * per cluster. This value may increase for future compute capabilities. The + * specific hardware unit may support higher cluster sizes that’s not + * guaranteed to be portable. + * - ::CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE: The block + * scheduling policy of a function. The value type is CUclusterSchedulingPolicy. + * + * \param pi - Returned attribute value + * \param attrib - Attribute requested + * \param hfunc - Function to query attribute of + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuCtxGetCacheConfig, + * ::cuCtxSetCacheConfig, + * ::cuFuncSetCacheConfig, + * ::cuLaunchKernel, + * ::cudaFuncGetAttributes, + * ::cudaFuncSetAttribute, + * ::cuKernelGetAttribute + */ +CUresult CUDAAPI cuFuncGetAttribute(int *pi, CUfunction_attribute attrib, CUfunction hfunc); + +/** + * \brief Sets information about a function + * + * This call sets the value of a specified attribute \p attrib on the kernel given + * by \p hfunc to an integer value specified by \p val + * This function returns CUDA_SUCCESS if the new value of the attribute could be + * successfully set. If the set fails, this call will return an error. + * Not all attributes can have values set. Attempting to set a value on a read-only + * attribute will result in an error (CUDA_ERROR_INVALID_VALUE) + * + * Supported attributes for the cuFuncSetAttribute call are: + * - ::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: This maximum size in bytes of + * dynamically-allocated shared memory. The value should contain the requested + * maximum size of dynamically-allocated shared memory. The sum of this value and + * the function attribute ::CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES cannot exceed the + * device attribute ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN. + * The maximal size of requestable dynamic shared memory may differ by GPU + * architecture. + * - ::CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: On devices where the L1 + * cache and shared memory use the same hardware resources, this sets the shared memory + * carveout preference, in percent of the total shared memory. + * See ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR + * This is only a hint, and the driver can choose a different ratio if required to execute the function. + * - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH: The required cluster width in + * blocks. The width, height, and depth values must either all be 0 or all be + * positive. The validity of the cluster dimensions is checked at launch time. + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. + * - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT: The required cluster height in + * blocks. The width, height, and depth values must either all be 0 or all be + * positive. The validity of the cluster dimensions is checked at launch time. + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. + * - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH: The required cluster depth in + * blocks. The width, height, and depth values must either all be 0 or all be + * positive. The validity of the cluster dimensions is checked at launch time. + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. + * - ::CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE: The block + * scheduling policy of a function. The value type is CUclusterSchedulingPolicy. + * + * \param hfunc - Function to query attribute of + * \param attrib - Attribute requested + * \param value - The value to set + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuCtxGetCacheConfig, + * ::cuCtxSetCacheConfig, + * ::cuFuncSetCacheConfig, + * ::cuLaunchKernel, + * ::cudaFuncGetAttributes, + * ::cudaFuncSetAttribute, + * ::cuKernelSetAttribute + */ +CUresult CUDAAPI cuFuncSetAttribute(CUfunction hfunc, CUfunction_attribute attrib, int value); + +/** + * \brief Sets the preferred cache configuration for a device function + * + * On devices where the L1 cache and shared memory use the same hardware + * resources, this sets through \p config the preferred cache configuration for + * the device function \p hfunc. This is only a preference. The driver will use + * the requested configuration if possible, but it is free to choose a different + * configuration if required to execute \p hfunc. Any context-wide preference + * set via ::cuCtxSetCacheConfig() will be overridden by this per-function + * setting unless the per-function setting is ::CU_FUNC_CACHE_PREFER_NONE. In + * that case, the current context-wide setting will be used. + * + * This setting does nothing on devices where the size of the L1 cache and + * shared memory are fixed. + * + * Launching a kernel with a different preference than the most recent + * preference setting may insert a device-side synchronization point. + * + * + * The supported cache configurations are: + * - ::CU_FUNC_CACHE_PREFER_NONE: no preference for shared memory or L1 (default) + * - ::CU_FUNC_CACHE_PREFER_SHARED: prefer larger shared memory and smaller L1 cache + * - ::CU_FUNC_CACHE_PREFER_L1: prefer larger L1 cache and smaller shared memory + * - ::CU_FUNC_CACHE_PREFER_EQUAL: prefer equal sized L1 cache and shared memory + * + * \param hfunc - Kernel to configure cache for + * \param config - Requested cache configuration + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT + * \notefnerr + * + * \sa ::cuCtxGetCacheConfig, + * ::cuCtxSetCacheConfig, + * ::cuFuncGetAttribute, + * ::cuLaunchKernel, + * ::cudaFuncSetCacheConfig, + * ::cuKernelSetCacheConfig + */ +CUresult CUDAAPI cuFuncSetCacheConfig(CUfunction hfunc, CUfunc_cache config); + +/** + * \brief Sets the shared memory configuration for a device function. + * + * On devices with configurable shared memory banks, this function will + * force all subsequent launches of the specified device function to have + * the given shared memory bank size configuration. On any given launch of the + * function, the shared memory configuration of the device will be temporarily + * changed if needed to suit the function's preferred configuration. Changes in + * shared memory configuration between subsequent launches of functions, + * may introduce a device side synchronization point. + * + * Any per-function setting of shared memory bank size set via + * ::cuFuncSetSharedMemConfig will override the context wide setting set with + * ::cuCtxSetSharedMemConfig. + * + * Changing the shared memory bank size will not increase shared memory usage + * or affect occupancy of kernels, but may have major effects on performance. + * Larger bank sizes will allow for greater potential bandwidth to shared memory, + * but will change what kinds of accesses to shared memory will result in bank + * conflicts. + * + * This function will do nothing on devices with fixed shared memory bank size. + * + * The supported bank configurations are: + * - ::CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE: use the context's shared memory + * configuration when launching this function. + * - ::CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE: set shared memory bank width to + * be natively four bytes when launching this function. + * - ::CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE: set shared memory bank width to + * be natively eight bytes when launching this function. + * + * \param hfunc - kernel to be given a shared memory config + * \param config - requested shared memory configuration + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT + * \notefnerr + * + * \sa ::cuCtxGetCacheConfig, + * ::cuCtxSetCacheConfig, + * ::cuCtxGetSharedMemConfig, + * ::cuCtxSetSharedMemConfig, + * ::cuFuncGetAttribute, + * ::cuLaunchKernel, + * ::cudaFuncSetSharedMemConfig + */ +CUresult CUDAAPI cuFuncSetSharedMemConfig(CUfunction hfunc, CUsharedconfig config); + +/** + * \brief Returns a module handle + * + * Returns in \p *hmod the handle of the module that function \p hfunc + * is located in. The lifetime of the module corresponds to the lifetime of + * the context it was loaded in or until the module is explicitly unloaded. + * + * The CUDA runtime manages its own modules loaded into the primary context. + * If the handle returned by this API refers to a module loaded by the CUDA runtime, + * calling ::cuModuleUnload() on that module will result in undefined behavior. + * + * \param hmod - Returned module handle + * \param hfunc - Function to retrieve module for + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_FOUND + * \notefnerr + * + */ +CUresult CUDAAPI cuFuncGetModule(CUmodule *hmod, CUfunction hfunc); + +/** + * \brief Launches a CUDA function ::CUfunction or a CUDA kernel ::CUkernel + * + * Invokes the function ::CUfunction or the kernel ::CUkernel \p f + * on a \p gridDimX x \p gridDimY x \p gridDimZ grid of blocks. + * Each block contains \p blockDimX x \p blockDimY x + * \p blockDimZ threads. + * + * \p sharedMemBytes sets the amount of dynamic shared memory that will be + * available to each thread block. + * + * Kernel parameters to \p f can be specified in one of two ways: + * + * 1) Kernel parameters can be specified via \p kernelParams. If \p f + * has N parameters, then \p kernelParams needs to be an array of N + * pointers. Each of \p kernelParams[0] through \p kernelParams[N-1] + * must point to a region of memory from which the actual kernel + * parameter will be copied. The number of kernel parameters and their + * offsets and sizes do not need to be specified as that information is + * retrieved directly from the kernel's image. + * + * 2) Kernel parameters can also be packaged by the application into + * a single buffer that is passed in via the \p extra parameter. + * This places the burden on the application of knowing each kernel + * parameter's size and alignment/padding within the buffer. Here is + * an example of using the \p extra parameter in this manner: + * \code + size_t argBufferSize; + char argBuffer[256]; + + // populate argBuffer and argBufferSize + + void *config[] = { + CU_LAUNCH_PARAM_BUFFER_POINTER, argBuffer, + CU_LAUNCH_PARAM_BUFFER_SIZE, &argBufferSize, + CU_LAUNCH_PARAM_END + }; + status = cuLaunchKernel(f, gx, gy, gz, bx, by, bz, sh, s, NULL, config); + * \endcode + * + * The \p extra parameter exists to allow ::cuLaunchKernel to take + * additional less commonly used arguments. \p extra specifies a list of + * names of extra settings and their corresponding values. Each extra + * setting name is immediately followed by the corresponding value. The + * list must be terminated with either NULL or ::CU_LAUNCH_PARAM_END. + * + * - ::CU_LAUNCH_PARAM_END, which indicates the end of the \p extra + * array; + * - ::CU_LAUNCH_PARAM_BUFFER_POINTER, which specifies that the next + * value in \p extra will be a pointer to a buffer containing all + * the kernel parameters for launching kernel \p f; + * - ::CU_LAUNCH_PARAM_BUFFER_SIZE, which specifies that the next + * value in \p extra will be a pointer to a size_t containing the + * size of the buffer specified with ::CU_LAUNCH_PARAM_BUFFER_POINTER; + * + * The error ::CUDA_ERROR_INVALID_VALUE will be returned if kernel + * parameters are specified with both \p kernelParams and \p extra + * (i.e. both \p kernelParams and \p extra are non-NULL). + * + * Calling ::cuLaunchKernel() invalidates the persistent function state + * set through the following deprecated APIs: + * ::cuFuncSetBlockShape(), + * ::cuFuncSetSharedSize(), + * ::cuParamSetSize(), + * ::cuParamSeti(), + * ::cuParamSetf(), + * ::cuParamSetv(). + * + * Note that to use ::cuLaunchKernel(), the kernel \p f must either have + * been compiled with toolchain version 3.2 or later so that it will + * contain kernel parameter information, or have no kernel parameters. + * If either of these conditions is not met, then ::cuLaunchKernel() will + * return ::CUDA_ERROR_INVALID_IMAGE. + * + * Note that the API can also be used to launch context-less kernel ::CUkernel + * by querying the handle using ::cuLibraryGetKernel() and then passing it + * to the API by casting to ::CUfunction. Here, the context to launch + * the kernel on will either be taken from the specified stream \p hStream + * or the current context in case of NULL stream. + * + * \param f - Function ::CUfunction or Kernel ::CUkernel to launch + * \param gridDimX - Width of grid in blocks + * \param gridDimY - Height of grid in blocks + * \param gridDimZ - Depth of grid in blocks + * \param blockDimX - X dimension of each thread block + * \param blockDimY - Y dimension of each thread block + * \param blockDimZ - Z dimension of each thread block + * \param sharedMemBytes - Dynamic shared-memory size per thread block in bytes + * \param hStream - Stream identifier + * \param kernelParams - Array of pointers to kernel parameters + * \param extra - Extra options + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_IMAGE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_LAUNCH_FAILED, + * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, + * ::CUDA_ERROR_LAUNCH_TIMEOUT, + * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + * ::CUDA_ERROR_NOT_FOUND + * \note_null_stream + * \notefnerr + * + * \sa ::cuCtxGetCacheConfig, + * ::cuCtxSetCacheConfig, + * ::cuFuncSetCacheConfig, + * ::cuFuncGetAttribute, + * ::cudaLaunchKernel, + * ::cuLibraryGetKernel, + * ::cuKernelSetCacheConfig, + * ::cuKernelGetAttribute, + * ::cuKernelSetAttribute + */ +CUresult CUDAAPI cuLaunchKernel(CUfunction f, + unsigned int gridDimX, + unsigned int gridDimY, + unsigned int gridDimZ, + unsigned int blockDimX, + unsigned int blockDimY, + unsigned int blockDimZ, + unsigned int sharedMemBytes, + CUstream hStream, + void **kernelParams, + void **extra); + +/** + * \brief Launches a CUDA function ::CUfunction or a CUDA kernel ::CUkernel with launch-time configuration + * + * Invokes the function ::CUfunction or the kernel ::CUkernel \p f with the specified launch-time configuration + * \p config. + * + * The ::CUlaunchConfig structure is defined as: + * \code + typedef struct CUlaunchConfig_st { + unsigned int gridDimX; + unsigned int gridDimY; + unsigned int gridDimZ; + unsigned int blockDimX; + unsigned int blockDimY; + unsigned int blockDimZ; + unsigned int sharedMemBytes; + CUstream hStream; + CUlaunchAttribute *attrs; + unsigned int numAttrs; + } CUlaunchConfig; + * \endcode + * where: + * - ::CUlaunchConfig::gridDimX is the width of the grid in blocks. + * - ::CUlaunchConfig::gridDimY is the height of the grid in blocks. + * - ::CUlaunchConfig::gridDimZ is the depth of the grid in blocks. + * - ::CUlaunchConfig::blockDimX is the X dimension of each thread block. + * - ::CUlaunchConfig::blockDimX is the Y dimension of each thread block. + * - ::CUlaunchConfig::blockDimZ is the Z dimension of each thread block. + * - ::CUlaunchConfig::sharedMemBytes is the dynamic shared-memory size per + * thread block in bytes. + * - ::CUlaunchConfig::hStream is the handle to the stream to perform the launch + * in. The CUDA context associated with this stream must match that associated + * with function f. + * - ::CUlaunchConfig::attrs is an array of ::CUlaunchConfig::numAttrs + * continguous ::CUlaunchAttribute elements. The value of this pointer is not + * considered if ::CUlaunchConfig::numAttrs is zero. However, in that case, it + * is recommended to set the pointer to NULL. + * - ::CUlaunchConfig::numAttrs is the numbers of attributes populating the + * first ::CUlaunchConfig::numAttrs positions of the ::CUlaunchConfig::attrs + * array. + * + * Launch-time configuration is specified by adding entries to + * ::CUlaunchConfig::attrs. Each entry is an attribute ID and a corresponding + * attribute value. + * + * The ::CUlaunchAttribute structure is defined as: + * \code + typedef struct CUlaunchAttribute_st { + CUlaunchAttributeID id; + CUlaunchAttributeValue value; + } CUlaunchAttribute; + * \endcode + * where: + * - ::CUlaunchAttribute::id is a unique enum identifying the attribute. + * - ::CUlaunchAttribute::value is a union that hold the attribute value. + * + * An example of using the \p config parameter: + * \code + CUlaunchAttribute coopAttr = {.id = CU_LAUNCH_ATTRIBUTE_COOPERATIVE, + .value = 1}; + CUlaunchConfig config = {... // set block and grid dimensions + .attrs = &coopAttr, + .numAttrs = 1}; + + cuLaunchKernelEx(&config, kernel, NULL, NULL); + * \endcode + * + * The ::CUlaunchAttributeID enum is defined as: + * \code + typedef enum CUlaunchAttributeID_enum { + CU_LAUNCH_ATTRIBUTE_IGNORE = 0, + CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW = 1, + CU_LAUNCH_ATTRIBUTE_COOPERATIVE = 2, + CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY = 3, + CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION = 4, + CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE = 5, + CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION = 6, + CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT = 7, + } CUlaunchAttributeID; + * \endcode + * + * and the corresponding ::CUlaunchAttributeValue union as : + * \code + typedef union CUlaunchAttributeValue_union { + cuuint64_t pad[8]; + CUaccessPolicyWindow accessPolicyWindow; + int cooperative; + CUsynchronizationPolicy syncPolicy; + struct { + unsigned int x; + unsigned int y; + unsigned int z; + } clusterDim; + CUclusterSchedulingPolicy clusterSchedulingPolicyPreference; + int programmaticStreamSerializationAllowed; + struct { + CUevent event; + int flags; + int triggerAtBlockStart; + } programmaticEvent; + } CUlaunchAttributeValue; + * \endcode + * + * Setting ::CU_LAUNCH_ATTRIBUTE_COOPERATIVE to a non-zero value causes the + * kernel launch to be a cooperative launch, with exactly the same usage and + * semantics of ::cuLaunchCooperativeKernel. + * + * Setting ::CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION to a non-zero + * values causes the kernel to use programmatic means to resolve its stream + * dependency -- enabling the CUDA runtime to opportunistically allow the grid's + * execution to overlap with the previous kernel in the stream, if that kernel + * requests the overlap. + * + * ::CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT records an event along with the + * kernel launch. Event recorded through this launch attribute is guaranteed to + * only trigger after all block in the associated kernel trigger the event. A + * block can trigger the event through PTX launchdep.release or CUDA builtin + * function cudaTriggerProgrammaticLaunchCompletion(). A trigger can also be + * inserted at the beginning of each block's execution if triggerAtBlockStart is + * set to non-0. Note that dependents (including the CPU thread calling + * cuEventSynchronize()) are not guaranteed to observe the release precisely + * when it is released. For example, cuEventSynchronize() may only observe the + * event trigger long after the associated kernel has completed. This recording + * type is primarily meant for establishing programmatic dependency between + * device tasks. The event supplied must not be an interprocess or interop + * event. The event must disable timing (i.e. created with + * ::CU_EVENT_DISABLE_TIMING flag set). + * + * The effect of other attributes is consistent with their effect when set via + * persistent APIs. + * + * See ::cuStreamSetAttribute for + * - ::CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW + * - ::CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY + * + * See ::cuFunctionSetAttribute for + * - ::CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION + * - ::CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE + * + * Kernel parameters to \p f can be specified in the same ways that they can be + * using ::cuLaunchKernel. + * + * Note that the API can also be used to launch context-less kernel ::CUkernel + * by querying the handle using ::cuLibraryGetKernel() and then passing it + * to the API by casting to ::CUfunction. Here, the context to launch + * the kernel on will either be taken from the specified stream ::CUlaunchConfig::hStream + * or the current context in case of NULL stream. + * + * \param config - Config to launch + * \param f - Function ::CUfunction or Kernel ::CUkernel to launch + * \param kernelParams - Array of pointers to kernel parameters + * \param extra - Extra options + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_IMAGE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_LAUNCH_FAILED, + * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, + * ::CUDA_ERROR_LAUNCH_TIMEOUT, + * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, + * ::CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + * ::CUDA_ERROR_NOT_FOUND + * \note_null_stream + * \notefnerr + * + * \sa ::cuCtxGetCacheConfig, + * ::cuCtxSetCacheConfig, + * ::cuFuncSetCacheConfig, + * ::cuFuncGetAttribute, + * ::cudaLaunchKernel, + * ::cudaLaunchKernelEx, + * ::cuLibraryGetKernel, + * ::cuKernelSetCacheConfig, + * ::cuKernelGetAttribute, + * ::cuKernelSetAttribute + */ +CUresult CUDAAPI cuLaunchKernelEx(const CUlaunchConfig *config, + CUfunction f, + void **kernelParams, + void **extra); + +/** + * \brief Launches a CUDA function ::CUfunction or a CUDA kernel ::CUkernel where thread blocks + * can cooperate and synchronize as they execute + * + * Invokes the function ::CUfunction or the kernel ::CUkernel \p f on a \p gridDimX x \p gridDimY x \p gridDimZ + * grid of blocks. Each block contains \p blockDimX x \p blockDimY x + * \p blockDimZ threads. + * + * Note that the API can also be used to launch context-less kernel ::CUkernel + * by querying the handle using ::cuLibraryGetKernel() and then passing it + * to the API by casting to ::CUfunction. Here, the context to launch + * the kernel on will either be taken from the specified stream \p hStream + * or the current context in case of NULL stream. + * + * \p sharedMemBytes sets the amount of dynamic shared memory that will be + * available to each thread block. + * + * The device on which this kernel is invoked must have a non-zero value for + * the device attribute ::CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH. + * + * The total number of blocks launched cannot exceed the maximum number of blocks per + * multiprocessor as returned by ::cuOccupancyMaxActiveBlocksPerMultiprocessor (or + * ::cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times the number of multiprocessors + * as specified by the device attribute ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT. + * + * The kernel cannot make use of CUDA dynamic parallelism. + * + * Kernel parameters must be specified via \p kernelParams. If \p f + * has N parameters, then \p kernelParams needs to be an array of N + * pointers. Each of \p kernelParams[0] through \p kernelParams[N-1] + * must point to a region of memory from which the actual kernel + * parameter will be copied. The number of kernel parameters and their + * offsets and sizes do not need to be specified as that information is + * retrieved directly from the kernel's image. + * + * Calling ::cuLaunchCooperativeKernel() sets persistent function state that is + * the same as function state set through ::cuLaunchKernel API + * + * When the kernel \p f is launched via ::cuLaunchCooperativeKernel(), the previous + * block shape, shared size and parameter info associated with \p f + * is overwritten. + * + * Note that to use ::cuLaunchCooperativeKernel(), the kernel \p f must either have + * been compiled with toolchain version 3.2 or later so that it will + * contain kernel parameter information, or have no kernel parameters. + * If either of these conditions is not met, then ::cuLaunchCooperativeKernel() will + * return ::CUDA_ERROR_INVALID_IMAGE. + * + * Note that the API can also be used to launch context-less kernel ::CUkernel + * by querying the handle using ::cuLibraryGetKernel() and then passing it + * to the API by casting to ::CUfunction. Here, the context to launch + * the kernel on will either be taken from the specified stream \p hStream + * or the current context in case of NULL stream. + * + * \param f - Function ::CUfunction or Kernel ::CUkernel to launch + * \param gridDimX - Width of grid in blocks + * \param gridDimY - Height of grid in blocks + * \param gridDimZ - Depth of grid in blocks + * \param blockDimX - X dimension of each thread block + * \param blockDimY - Y dimension of each thread block + * \param blockDimZ - Z dimension of each thread block + * \param sharedMemBytes - Dynamic shared-memory size per thread block in bytes + * \param hStream - Stream identifier + * \param kernelParams - Array of pointers to kernel parameters + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_IMAGE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_LAUNCH_FAILED, + * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, + * ::CUDA_ERROR_LAUNCH_TIMEOUT, + * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, + * ::CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + * ::CUDA_ERROR_NOT_FOUND + * \note_null_stream + * \notefnerr + * + * \sa ::cuCtxGetCacheConfig, + * ::cuCtxSetCacheConfig, + * ::cuFuncSetCacheConfig, + * ::cuFuncGetAttribute, + * ::cuLaunchCooperativeKernelMultiDevice, + * ::cudaLaunchCooperativeKernel, + * ::cuLibraryGetKernel, + * ::cuKernelSetCacheConfig, + * ::cuKernelGetAttribute, + * ::cuKernelSetAttribute + */ +CUresult CUDAAPI cuLaunchCooperativeKernel(CUfunction f, + unsigned int gridDimX, + unsigned int gridDimY, + unsigned int gridDimZ, + unsigned int blockDimX, + unsigned int blockDimY, + unsigned int blockDimZ, + unsigned int sharedMemBytes, + CUstream hStream, + void **kernelParams); + +/** + * \brief Launches CUDA functions on multiple devices where thread blocks can cooperate and synchronize as they execute + * + * \deprecated This function is deprecated as of CUDA 11.3. + * + * Invokes kernels as specified in the \p launchParamsList array where each element + * of the array specifies all the parameters required to perform a single kernel launch. + * These kernels can cooperate and synchronize as they execute. The size of the array is + * specified by \p numDevices. + * + * No two kernels can be launched on the same device. All the devices targeted by this + * multi-device launch must be identical. All devices must have a non-zero value for the + * device attribute ::CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH. + * + * All kernels launched must be identical with respect to the compiled code. Note that + * any __device__, __constant__ or __managed__ variables present in the module that owns + * the kernel launched on each device, are independently instantiated on every device. + * It is the application's responsibility to ensure these variables are initialized and + * used appropriately. + * + * The size of the grids as specified in blocks, the size of the blocks themselves + * and the amount of shared memory used by each thread block must also match across + * all launched kernels. + * + * The streams used to launch these kernels must have been created via either ::cuStreamCreate + * or ::cuStreamCreateWithPriority. The NULL stream or ::CU_STREAM_LEGACY or ::CU_STREAM_PER_THREAD + * cannot be used. + * + * The total number of blocks launched per kernel cannot exceed the maximum number of blocks + * per multiprocessor as returned by ::cuOccupancyMaxActiveBlocksPerMultiprocessor (or + * ::cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times the number of multiprocessors + * as specified by the device attribute ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT. Since the + * total number of blocks launched per device has to match across all devices, the maximum + * number of blocks that can be launched per device will be limited by the device with the + * least number of multiprocessors. + * + * The kernels cannot make use of CUDA dynamic parallelism. + * + * The ::CUDA_LAUNCH_PARAMS structure is defined as: + * \code + typedef struct CUDA_LAUNCH_PARAMS_st + { + CUfunction function; + unsigned int gridDimX; + unsigned int gridDimY; + unsigned int gridDimZ; + unsigned int blockDimX; + unsigned int blockDimY; + unsigned int blockDimZ; + unsigned int sharedMemBytes; + CUstream hStream; + void **kernelParams; + } CUDA_LAUNCH_PARAMS; + * \endcode + * where: + * - ::CUDA_LAUNCH_PARAMS::function specifies the kernel to be launched. All functions must + * be identical with respect to the compiled code. + * Note that you can also specify context-less kernel ::CUkernel by querying the handle + * using ::cuLibraryGetKernel() and then casting to ::CUfunction. In this case, the context to + * launch the kernel on be taken from the specified stream ::CUDA_LAUNCH_PARAMS::hStream. + * - ::CUDA_LAUNCH_PARAMS::gridDimX is the width of the grid in blocks. This must match across + * all kernels launched. + * - ::CUDA_LAUNCH_PARAMS::gridDimY is the height of the grid in blocks. This must match across + * all kernels launched. + * - ::CUDA_LAUNCH_PARAMS::gridDimZ is the depth of the grid in blocks. This must match across + * all kernels launched. + * - ::CUDA_LAUNCH_PARAMS::blockDimX is the X dimension of each thread block. This must match across + * all kernels launched. + * - ::CUDA_LAUNCH_PARAMS::blockDimX is the Y dimension of each thread block. This must match across + * all kernels launched. + * - ::CUDA_LAUNCH_PARAMS::blockDimZ is the Z dimension of each thread block. This must match across + * all kernels launched. + * - ::CUDA_LAUNCH_PARAMS::sharedMemBytes is the dynamic shared-memory size per thread block in bytes. + * This must match across all kernels launched. + * - ::CUDA_LAUNCH_PARAMS::hStream is the handle to the stream to perform the launch in. This cannot + * be the NULL stream or ::CU_STREAM_LEGACY or ::CU_STREAM_PER_THREAD. The CUDA context associated + * with this stream must match that associated with ::CUDA_LAUNCH_PARAMS::function. + * - ::CUDA_LAUNCH_PARAMS::kernelParams is an array of pointers to kernel parameters. If + * ::CUDA_LAUNCH_PARAMS::function has N parameters, then ::CUDA_LAUNCH_PARAMS::kernelParams + * needs to be an array of N pointers. Each of ::CUDA_LAUNCH_PARAMS::kernelParams[0] through + * ::CUDA_LAUNCH_PARAMS::kernelParams[N-1] must point to a region of memory from which the actual + * kernel parameter will be copied. The number of kernel parameters and their offsets and sizes + * do not need to be specified as that information is retrieved directly from the kernel's image. + * + * By default, the kernel won't begin execution on any GPU until all prior work in all the specified + * streams has completed. This behavior can be overridden by specifying the flag + * ::CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC. When this flag is specified, each kernel + * will only wait for prior work in the stream corresponding to that GPU to complete before it begins + * execution. + * + * Similarly, by default, any subsequent work pushed in any of the specified streams will not begin + * execution until the kernels on all GPUs have completed. This behavior can be overridden by specifying + * the flag ::CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC. When this flag is specified, + * any subsequent work pushed in any of the specified streams will only wait for the kernel launched + * on the GPU corresponding to that stream to complete before it begins execution. + * + * Calling ::cuLaunchCooperativeKernelMultiDevice() sets persistent function state that is + * the same as function state set through ::cuLaunchKernel API when called individually for each + * element in \p launchParamsList. + * + * When kernels are launched via ::cuLaunchCooperativeKernelMultiDevice(), the previous + * block shape, shared size and parameter info associated with each ::CUDA_LAUNCH_PARAMS::function + * in \p launchParamsList is overwritten. + * + * Note that to use ::cuLaunchCooperativeKernelMultiDevice(), the kernels must either have + * been compiled with toolchain version 3.2 or later so that it will + * contain kernel parameter information, or have no kernel parameters. + * If either of these conditions is not met, then ::cuLaunchCooperativeKernelMultiDevice() will + * return ::CUDA_ERROR_INVALID_IMAGE. + * + * \param launchParamsList - List of launch parameters, one per device + * \param numDevices - Size of the \p launchParamsList array + * \param flags - Flags to control launch behavior + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_IMAGE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_LAUNCH_FAILED, + * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, + * ::CUDA_ERROR_LAUNCH_TIMEOUT, + * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, + * ::CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED + * \note_null_stream + * \notefnerr + * + * \sa ::cuCtxGetCacheConfig, + * ::cuCtxSetCacheConfig, + * ::cuFuncSetCacheConfig, + * ::cuFuncGetAttribute, + * ::cuLaunchCooperativeKernel, + * ::cudaLaunchCooperativeKernelMultiDevice + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuLaunchCooperativeKernelMultiDevice(CUDA_LAUNCH_PARAMS *launchParamsList, unsigned int numDevices, unsigned int flags); + +/** + * \brief Enqueues a host function call in a stream + * + * Enqueues a host function to run in a stream. The function will be called + * after currently enqueued work and will block work added after it. + * + * The host function must not make any CUDA API calls. Attempting to use a + * CUDA API may result in ::CUDA_ERROR_NOT_PERMITTED, but this is not required. + * The host function must not perform any synchronization that may depend on + * outstanding CUDA work not mandated to run earlier. Host functions without a + * mandated order (such as in independent streams) execute in undefined order + * and may be serialized. + * + * For the purposes of Unified Memory, execution makes a number of guarantees: + *
    + *
  • The stream is considered idle for the duration of the function's + * execution. Thus, for example, the function may always use memory attached + * to the stream it was enqueued in.
  • + *
  • The start of execution of the function has the same effect as + * synchronizing an event recorded in the same stream immediately prior to + * the function. It thus synchronizes streams which have been "joined" + * prior to the function.
  • + *
  • Adding device work to any stream does not have the effect of making + * the stream active until all preceding host functions and stream callbacks + * have executed. Thus, for + * example, a function might use global attached memory even if work has + * been added to another stream, if the work has been ordered behind the + * function call with an event.
  • + *
  • Completion of the function does not cause a stream to become + * active except as described above. The stream will remain idle + * if no device work follows the function, and will remain idle across + * consecutive host functions or stream callbacks without device work in + * between. Thus, for example, + * stream synchronization can be done by signaling from a host function at the + * end of the stream.
  • + *
+ * + * Note that, in contrast to ::cuStreamAddCallback, the function will not be + * called in the event of an error in the CUDA context. + * + * \param hStream - Stream to enqueue function call in + * \param fn - The function to call once preceding stream operations are complete + * \param userData - User-specified data to be passed to the function + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_SUPPORTED + * \note_null_stream + * \notefnerr + * + * \sa ::cuStreamCreate, + * ::cuStreamQuery, + * ::cuStreamSynchronize, + * ::cuStreamWaitEvent, + * ::cuStreamDestroy, + * ::cuMemAllocManaged, + * ::cuStreamAttachMemAsync, + * ::cuStreamAddCallback + */ +CUresult CUDAAPI cuLaunchHostFunc(CUstream hStream, CUhostFn fn, void *userData); + +/** @} */ /* END CUDA_EXEC */ + +/** + * \defgroup CUDA_EXEC_DEPRECATED Execution Control [DEPRECATED] + * + * ___MANBRIEF___ deprecated execution control functions of the low-level CUDA + * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the deprecated execution control functions of the + * low-level CUDA driver application programming interface. + * + * @{ + */ + +/** + * \brief Sets the block-dimensions for the function + * + * \deprecated + * + * Specifies the \p x, \p y, and \p z dimensions of the thread blocks that are + * created when the kernel given by \p hfunc is launched. + * + * \param hfunc - Kernel to specify dimensions of + * \param x - X dimension + * \param y - Y dimension + * \param z - Z dimension + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuFuncSetSharedSize, + * ::cuFuncSetCacheConfig, + * ::cuFuncGetAttribute, + * ::cuParamSetSize, + * ::cuParamSeti, + * ::cuParamSetf, + * ::cuParamSetv, + * ::cuLaunch, + * ::cuLaunchGrid, + * ::cuLaunchGridAsync, + * ::cuLaunchKernel + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuFuncSetBlockShape(CUfunction hfunc, int x, int y, int z); + +/** + * \brief Sets the dynamic shared-memory size for the function + * + * \deprecated + * + * Sets through \p bytes the amount of dynamic shared memory that will be + * available to each thread block when the kernel given by \p hfunc is launched. + * + * \param hfunc - Kernel to specify dynamic shared-memory size for + * \param bytes - Dynamic shared-memory size per thread in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuFuncSetBlockShape, + * ::cuFuncSetCacheConfig, + * ::cuFuncGetAttribute, + * ::cuParamSetSize, + * ::cuParamSeti, + * ::cuParamSetf, + * ::cuParamSetv, + * ::cuLaunch, + * ::cuLaunchGrid, + * ::cuLaunchGridAsync, + * ::cuLaunchKernel + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuFuncSetSharedSize(CUfunction hfunc, unsigned int bytes); + +/** + * \brief Sets the parameter size for the function + * + * \deprecated + * + * Sets through \p numbytes the total size in bytes needed by the function + * parameters of the kernel corresponding to \p hfunc. + * + * \param hfunc - Kernel to set parameter size for + * \param numbytes - Size of parameter list in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuFuncSetBlockShape, + * ::cuFuncSetSharedSize, + * ::cuFuncGetAttribute, + * ::cuParamSetf, + * ::cuParamSeti, + * ::cuParamSetv, + * ::cuLaunch, + * ::cuLaunchGrid, + * ::cuLaunchGridAsync, + * ::cuLaunchKernel + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuParamSetSize(CUfunction hfunc, unsigned int numbytes); + +/** + * \brief Adds an integer parameter to the function's argument list + * + * \deprecated + * + * Sets an integer parameter that will be specified the next time the + * kernel corresponding to \p hfunc will be invoked. \p offset is a byte offset. + * + * \param hfunc - Kernel to add parameter to + * \param offset - Offset to add parameter to argument list + * \param value - Value of parameter + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuFuncSetBlockShape, + * ::cuFuncSetSharedSize, + * ::cuFuncGetAttribute, + * ::cuParamSetSize, + * ::cuParamSetf, + * ::cuParamSetv, + * ::cuLaunch, + * ::cuLaunchGrid, + * ::cuLaunchGridAsync, + * ::cuLaunchKernel + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuParamSeti(CUfunction hfunc, int offset, unsigned int value); + +/** + * \brief Adds a floating-point parameter to the function's argument list + * + * \deprecated + * + * Sets a floating-point parameter that will be specified the next time the + * kernel corresponding to \p hfunc will be invoked. \p offset is a byte offset. + * + * \param hfunc - Kernel to add parameter to + * \param offset - Offset to add parameter to argument list + * \param value - Value of parameter + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuFuncSetBlockShape, + * ::cuFuncSetSharedSize, + * ::cuFuncGetAttribute, + * ::cuParamSetSize, + * ::cuParamSeti, + * ::cuParamSetv, + * ::cuLaunch, + * ::cuLaunchGrid, + * ::cuLaunchGridAsync, + * ::cuLaunchKernel + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuParamSetf(CUfunction hfunc, int offset, float value); + +/** + * \brief Adds arbitrary data to the function's argument list + * + * \deprecated + * + * Copies an arbitrary amount of data (specified in \p numbytes) from \p ptr + * into the parameter space of the kernel corresponding to \p hfunc. \p offset + * is a byte offset. + * + * \param hfunc - Kernel to add data to + * \param offset - Offset to add data to argument list + * \param ptr - Pointer to arbitrary data + * \param numbytes - Size of data to copy in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuFuncSetBlockShape, + * ::cuFuncSetSharedSize, + * ::cuFuncGetAttribute, + * ::cuParamSetSize, + * ::cuParamSetf, + * ::cuParamSeti, + * ::cuLaunch, + * ::cuLaunchGrid, + * ::cuLaunchGridAsync, + * ::cuLaunchKernel + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuParamSetv(CUfunction hfunc, int offset, void *ptr, unsigned int numbytes); + +/** + * \brief Launches a CUDA function + * + * \deprecated + * + * Invokes the kernel \p f on a 1 x 1 x 1 grid of blocks. The block + * contains the number of threads specified by a previous call to + * ::cuFuncSetBlockShape(). + * + * The block shape, dynamic shared memory size, and parameter information + * must be set using + * ::cuFuncSetBlockShape(), + * ::cuFuncSetSharedSize(), + * ::cuParamSetSize(), + * ::cuParamSeti(), + * ::cuParamSetf(), and + * ::cuParamSetv() + * prior to calling this function. + * + * Launching a function via ::cuLaunchKernel() invalidates the function's + * block shape, dynamic shared memory size, and parameter information. After + * launching via cuLaunchKernel, this state must be re-initialized prior to + * calling this function. Failure to do so results in undefined behavior. + * + * \param f - Kernel to launch + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_LAUNCH_FAILED, + * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, + * ::CUDA_ERROR_LAUNCH_TIMEOUT, + * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED + * \notefnerr + * + * \sa ::cuFuncSetBlockShape, + * ::cuFuncSetSharedSize, + * ::cuFuncGetAttribute, + * ::cuParamSetSize, + * ::cuParamSetf, + * ::cuParamSeti, + * ::cuParamSetv, + * ::cuLaunchGrid, + * ::cuLaunchGridAsync, + * ::cuLaunchKernel + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuLaunch(CUfunction f); + +/** + * \brief Launches a CUDA function + * + * \deprecated + * + * Invokes the kernel \p f on a \p grid_width x \p grid_height grid of + * blocks. Each block contains the number of threads specified by a previous + * call to ::cuFuncSetBlockShape(). + * + * The block shape, dynamic shared memory size, and parameter information + * must be set using + * ::cuFuncSetBlockShape(), + * ::cuFuncSetSharedSize(), + * ::cuParamSetSize(), + * ::cuParamSeti(), + * ::cuParamSetf(), and + * ::cuParamSetv() + * prior to calling this function. + * + * Launching a function via ::cuLaunchKernel() invalidates the function's + * block shape, dynamic shared memory size, and parameter information. After + * launching via cuLaunchKernel, this state must be re-initialized prior to + * calling this function. Failure to do so results in undefined behavior. + * + * \param f - Kernel to launch + * \param grid_width - Width of grid in blocks + * \param grid_height - Height of grid in blocks + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_LAUNCH_FAILED, + * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, + * ::CUDA_ERROR_LAUNCH_TIMEOUT, + * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED + * \notefnerr + * + * \sa ::cuFuncSetBlockShape, + * ::cuFuncSetSharedSize, + * ::cuFuncGetAttribute, + * ::cuParamSetSize, + * ::cuParamSetf, + * ::cuParamSeti, + * ::cuParamSetv, + * ::cuLaunch, + * ::cuLaunchGridAsync, + * ::cuLaunchKernel + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuLaunchGrid(CUfunction f, int grid_width, int grid_height); + +/** + * \brief Launches a CUDA function + * + * \deprecated + * + * Invokes the kernel \p f on a \p grid_width x \p grid_height grid of + * blocks. Each block contains the number of threads specified by a previous + * call to ::cuFuncSetBlockShape(). + * + * The block shape, dynamic shared memory size, and parameter information + * must be set using + * ::cuFuncSetBlockShape(), + * ::cuFuncSetSharedSize(), + * ::cuParamSetSize(), + * ::cuParamSeti(), + * ::cuParamSetf(), and + * ::cuParamSetv() + * prior to calling this function. + * + * Launching a function via ::cuLaunchKernel() invalidates the function's + * block shape, dynamic shared memory size, and parameter information. After + * launching via cuLaunchKernel, this state must be re-initialized prior to + * calling this function. Failure to do so results in undefined behavior. + * + * \param f - Kernel to launch + * \param grid_width - Width of grid in blocks + * \param grid_height - Height of grid in blocks + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_LAUNCH_FAILED, + * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, + * ::CUDA_ERROR_LAUNCH_TIMEOUT, + * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED + * + * \note In certain cases where cubins are created with no ABI (i.e., using \p ptxas \p --abi-compile \p no), + * this function may serialize kernel launches. The CUDA driver retains asynchronous behavior by + * growing the per-thread stack as needed per launch and not shrinking it afterwards. + * + * \note_null_stream + * \notefnerr + * + * \sa ::cuFuncSetBlockShape, + * ::cuFuncSetSharedSize, + * ::cuFuncGetAttribute, + * ::cuParamSetSize, + * ::cuParamSetf, + * ::cuParamSeti, + * ::cuParamSetv, + * ::cuLaunch, + * ::cuLaunchGrid, + * ::cuLaunchKernel + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuLaunchGridAsync(CUfunction f, int grid_width, int grid_height, CUstream hStream); + + +/** + * \brief Adds a texture-reference to the function's argument list + * + * \deprecated + * + * Makes the CUDA array or linear memory bound to the texture reference + * \p hTexRef available to a device program as a texture. In this version of + * CUDA, the texture-reference must be obtained via ::cuModuleGetTexRef() and + * the \p texunit parameter must be set to ::CU_PARAM_TR_DEFAULT. + * + * \param hfunc - Kernel to add texture-reference to + * \param texunit - Texture unit (must be ::CU_PARAM_TR_DEFAULT) + * \param hTexRef - Texture-reference to add to argument list + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuParamSetTexRef(CUfunction hfunc, int texunit, CUtexref hTexRef); +/** @} */ /* END CUDA_EXEC_DEPRECATED */ + +/** + * \defgroup CUDA_GRAPH Graph Management + * + * ___MANBRIEF___ graph management functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the graph management functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** + * \brief Creates a graph + * + * Creates an empty graph, which is returned via \p phGraph. + * + * \param phGraph - Returns newly created graph + * \param flags - Graph creation flags, must be 0 + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddHostNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode, + * ::cuGraphInstantiate, + * ::cuGraphDestroy, + * ::cuGraphGetNodes, + * ::cuGraphGetRootNodes, + * ::cuGraphGetEdges, + * ::cuGraphClone + */ +CUresult CUDAAPI cuGraphCreate(CUgraph *phGraph, unsigned int flags); + +/** + * \brief Creates a kernel execution node and adds it to a graph + * + * Creates a new kernel execution node and adds it to \p hGraph with \p numDependencies + * dependencies specified via \p dependencies and arguments specified in \p nodeParams. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p phGraphNode. + * + * The CUDA_KERNEL_NODE_PARAMS structure is defined as: + * + * \code + * typedef struct CUDA_KERNEL_NODE_PARAMS_st { + * CUfunction func; + * unsigned int gridDimX; + * unsigned int gridDimY; + * unsigned int gridDimZ; + * unsigned int blockDimX; + * unsigned int blockDimY; + * unsigned int blockDimZ; + * unsigned int sharedMemBytes; + * void **kernelParams; + * void **extra; + * } CUDA_KERNEL_NODE_PARAMS; + * \endcode + * + * When the graph is launched, the node will invoke kernel \p func on a (\p gridDimX x + * \p gridDimY x \p gridDimZ) grid of blocks. Each block contains + * (\p blockDimX x \p blockDimY x \p blockDimZ) threads. + * + * \p sharedMemBytes sets the amount of dynamic shared memory that will be + * available to each thread block. + * + * Kernel parameters to \p func can be specified in one of two ways: + * + * 1) Kernel parameters can be specified via \p kernelParams. If the kernel has N + * parameters, then \p kernelParams needs to be an array of N pointers. Each pointer, + * from \p kernelParams[0] to \p kernelParams[N-1], points to the region of memory from which the actual + * parameter will be copied. The number of kernel parameters and their offsets and sizes do not need + * to be specified as that information is retrieved directly from the kernel's image. + * + * 2) Kernel parameters for non-cooperative kernels can also be packaged by the application into a single + * buffer that is passed in via \p extra. This places the burden on the application of knowing each + * kernel parameter's size and alignment/padding within the buffer. The \p extra parameter exists + * to allow this function to take additional less commonly used arguments. \p extra specifies + * a list of names of extra settings and their corresponding values. Each extra setting name is + * immediately followed by the corresponding value. The list must be terminated with either NULL or + * CU_LAUNCH_PARAM_END. + * + * - ::CU_LAUNCH_PARAM_END, which indicates the end of the \p extra + * array; + * - ::CU_LAUNCH_PARAM_BUFFER_POINTER, which specifies that the next + * value in \p extra will be a pointer to a buffer + * containing all the kernel parameters for launching kernel + * \p func; + * - ::CU_LAUNCH_PARAM_BUFFER_SIZE, which specifies that the next + * value in \p extra will be a pointer to a size_t + * containing the size of the buffer specified with + * ::CU_LAUNCH_PARAM_BUFFER_POINTER; + * + * The error ::CUDA_ERROR_INVALID_VALUE will be returned if kernel parameters are specified with both + * \p kernelParams and \p extra (i.e. both \p kernelParams and \p extra are non-NULL). + * ::CUDA_ERROR_INVALID_VALUE will be returned if \p extra is used for a cooperative kernel. + * + * The \p kernelParams or \p extra array, as well as the argument values it points to, + * are copied during this call. + * + * \note Kernels launched using graphs must not use texture and surface references. Reading or + * writing through any texture or surface reference is undefined behavior. + * This restriction does not apply to texture and surface objects. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param nodeParams - Parameters for the GPU execution node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuLaunchKernel, + * ::cuLaunchCooperativeKernel, + * ::cuGraphKernelNodeGetParams, + * ::cuGraphKernelNodeSetParams, + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddHostNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode + */ +CUresult CUDAAPI cuGraphAddKernelNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_KERNEL_NODE_PARAMS *nodeParams); + +/** + * \brief Returns a kernel node's parameters + * + * Returns the parameters of kernel node \p hNode in \p nodeParams. + * The \p kernelParams or \p extra array returned in \p nodeParams, + * as well as the argument values it points to, are owned by the node. + * This memory remains valid until the node is destroyed or its + * parameters are modified, and should not be modified + * directly. Use ::cuGraphKernelNodeSetParams to update the + * parameters of this node. + * + * The params will contain either \p kernelParams or \p extra, + * according to which of these was most recently set on the node. + * + * \param hNode - Node to get the parameters for + * \param nodeParams - Pointer to return the parameters + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuLaunchKernel, + * ::cuGraphAddKernelNode, + * ::cuGraphKernelNodeSetParams + */ +CUresult CUDAAPI cuGraphKernelNodeGetParams(CUgraphNode hNode, CUDA_KERNEL_NODE_PARAMS *nodeParams); + +/** + * \brief Sets a kernel node's parameters + * + * Sets the parameters of kernel node \p hNode to \p nodeParams. + * + * \param hNode - Node to set the parameters for + * \param nodeParams - Parameters to copy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuLaunchKernel, + * ::cuGraphAddKernelNode, + * ::cuGraphKernelNodeGetParams + */ +CUresult CUDAAPI cuGraphKernelNodeSetParams(CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS *nodeParams); + +/** + * \brief Creates a memcpy node and adds it to a graph + * + * Creates a new memcpy node and adds it to \p hGraph with \p numDependencies + * dependencies specified via \p dependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p phGraphNode. + * + * When the graph is launched, the node will perform the memcpy described by \p copyParams. + * See ::cuMemcpy3D() for a description of the structure and its restrictions. + * + * Memcpy nodes have some additional restrictions with regards to managed memory, if the + * system contains at least one device which has a zero value for the device attribute + * ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. If one or more of the operands refer + * to managed memory, then using the memory type ::CU_MEMORYTYPE_UNIFIED is disallowed + * for those operand(s). The managed memory will be treated as residing on either the + * host or the device, depending on which memory type is specified. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param copyParams - Parameters for the memory copy + * \param ctx - Context on which to run the node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuMemcpy3D, + * ::cuGraphMemcpyNodeGetParams, + * ::cuGraphMemcpyNodeSetParams, + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddHostNode, + * ::cuGraphAddMemsetNode + */ +CUresult CUDAAPI cuGraphAddMemcpyNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_MEMCPY3D *copyParams, CUcontext ctx); + +/** + * \brief Returns a memcpy node's parameters + * + * Returns the parameters of memcpy node \p hNode in \p nodeParams. + * + * \param hNode - Node to get the parameters for + * \param nodeParams - Pointer to return the parameters + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuMemcpy3D, + * ::cuGraphAddMemcpyNode, + * ::cuGraphMemcpyNodeSetParams + */ +CUresult CUDAAPI cuGraphMemcpyNodeGetParams(CUgraphNode hNode, CUDA_MEMCPY3D *nodeParams); + +/** + * \brief Sets a memcpy node's parameters + * + * Sets the parameters of memcpy node \p hNode to \p nodeParams. + * + * \param hNode - Node to set the parameters for + * \param nodeParams - Parameters to copy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuMemcpy3D, + * ::cuGraphAddMemcpyNode, + * ::cuGraphMemcpyNodeGetParams + */ +CUresult CUDAAPI cuGraphMemcpyNodeSetParams(CUgraphNode hNode, const CUDA_MEMCPY3D *nodeParams); + +/** + * \brief Creates a memset node and adds it to a graph + * + * Creates a new memset node and adds it to \p hGraph with \p numDependencies + * dependencies specified via \p dependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p phGraphNode. + * + * The element size must be 1, 2, or 4 bytes. + * When the graph is launched, the node will perform the memset described by \p memsetParams. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param memsetParams - Parameters for the memory set + * \param ctx - Context on which to run the node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_CONTEXT + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuMemsetD2D32, + * ::cuGraphMemsetNodeGetParams, + * ::cuGraphMemsetNodeSetParams, + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddHostNode, + * ::cuGraphAddMemcpyNode + */ +CUresult CUDAAPI cuGraphAddMemsetNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_MEMSET_NODE_PARAMS *memsetParams, CUcontext ctx); + +/** + * \brief Returns a memset node's parameters + * + * Returns the parameters of memset node \p hNode in \p nodeParams. + * + * \param hNode - Node to get the parameters for + * \param nodeParams - Pointer to return the parameters + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuMemsetD2D32, + * ::cuGraphAddMemsetNode, + * ::cuGraphMemsetNodeSetParams + */ +CUresult CUDAAPI cuGraphMemsetNodeGetParams(CUgraphNode hNode, CUDA_MEMSET_NODE_PARAMS *nodeParams); + +/** + * \brief Sets a memset node's parameters + * + * Sets the parameters of memset node \p hNode to \p nodeParams. + * + * \param hNode - Node to set the parameters for + * \param nodeParams - Parameters to copy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuMemsetD2D32, + * ::cuGraphAddMemsetNode, + * ::cuGraphMemsetNodeGetParams + */ +CUresult CUDAAPI cuGraphMemsetNodeSetParams(CUgraphNode hNode, const CUDA_MEMSET_NODE_PARAMS *nodeParams); + +/** + * \brief Creates a host execution node and adds it to a graph + * + * Creates a new CPU execution node and adds it to \p hGraph with \p numDependencies + * dependencies specified via \p dependencies and arguments specified in \p nodeParams. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p phGraphNode. + * + * When the graph is launched, the node will invoke the specified CPU function. + * Host nodes are not supported under MPS with pre-Volta GPUs. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param nodeParams - Parameters for the host node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuLaunchHostFunc, + * ::cuGraphHostNodeGetParams, + * ::cuGraphHostNodeSetParams, + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode + */ +CUresult CUDAAPI cuGraphAddHostNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_HOST_NODE_PARAMS *nodeParams); + +/** + * \brief Returns a host node's parameters + * + * Returns the parameters of host node \p hNode in \p nodeParams. + * + * \param hNode - Node to get the parameters for + * \param nodeParams - Pointer to return the parameters + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuLaunchHostFunc, + * ::cuGraphAddHostNode, + * ::cuGraphHostNodeSetParams + */ +CUresult CUDAAPI cuGraphHostNodeGetParams(CUgraphNode hNode, CUDA_HOST_NODE_PARAMS *nodeParams); + +/** + * \brief Sets a host node's parameters + * + * Sets the parameters of host node \p hNode to \p nodeParams. + * + * \param hNode - Node to set the parameters for + * \param nodeParams - Parameters to copy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuLaunchHostFunc, + * ::cuGraphAddHostNode, + * ::cuGraphHostNodeGetParams + */ +CUresult CUDAAPI cuGraphHostNodeSetParams(CUgraphNode hNode, const CUDA_HOST_NODE_PARAMS *nodeParams); + +/** + * \brief Creates a child graph node and adds it to a graph + * + * Creates a new node which executes an embedded graph, and adds it to \p hGraph with + * \p numDependencies dependencies specified via \p dependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p phGraphNode. + * + * If \p hGraph contains allocation or free nodes, this call will return an error. + * + * The node executes an embedded child graph. The child graph is cloned in this call. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param childGraph - The graph to clone into this node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphChildGraphNodeGetGraph, + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddHostNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode, + * ::cuGraphClone + */ +CUresult CUDAAPI cuGraphAddChildGraphNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUgraph childGraph); + +/** + * \brief Gets a handle to the embedded graph of a child graph node + * + * Gets a handle to the embedded graph in a child graph node. This call + * does not clone the graph. Changes to the graph will be reflected in + * the node, and the node retains ownership of the graph. + * + * Allocation and free nodes cannot be added to the returned graph. + * Attempting to do so will return an error. + * + * \param hNode - Node to get the embedded graph for + * \param phGraph - Location to store a handle to the graph + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddChildGraphNode, + * ::cuGraphNodeFindInClone + */ +CUresult CUDAAPI cuGraphChildGraphNodeGetGraph(CUgraphNode hNode, CUgraph *phGraph); + +/** + * \brief Creates an empty node and adds it to a graph + * + * Creates a new node which performs no operation, and adds it to \p hGraph with + * \p numDependencies dependencies specified via \p dependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p phGraphNode. + * + * An empty node performs no operation during execution, but can be used for + * transitive ordering. For example, a phased execution graph with 2 groups of n + * nodes with a barrier between them can be represented using an empty node and + * 2*n dependency edges, rather than no empty node and n^2 dependency edges. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddHostNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode + */ +CUresult CUDAAPI cuGraphAddEmptyNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies); + +/** + * \brief Creates an event record node and adds it to a graph + * + * Creates a new event record node and adds it to \p hGraph with \p numDependencies + * dependencies specified via \p dependencies and event specified in \p event. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p phGraphNode. + * + * Each launch of the graph will record \p event to capture execution of the + * node's dependencies. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param event - Event for the node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddEventWaitNode, + * ::cuEventRecordWithFlags, + * ::cuStreamWaitEvent, + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode + */ +CUresult CUDAAPI cuGraphAddEventRecordNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUevent event); + +/** + * \brief Returns the event associated with an event record node + * + * Returns the event of event record node \p hNode in \p event_out. + * + * \param hNode - Node to get the event for + * \param event_out - Pointer to return the event + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddEventRecordNode, + * ::cuGraphEventRecordNodeSetEvent, + * ::cuGraphEventWaitNodeGetEvent, + * ::cuEventRecordWithFlags, + * ::cuStreamWaitEvent + */ +CUresult CUDAAPI cuGraphEventRecordNodeGetEvent(CUgraphNode hNode, CUevent *event_out); + +/** + * \brief Sets an event record node's event + * + * Sets the event of event record node \p hNode to \p event. + * + * \param hNode - Node to set the event for + * \param event - Event to use + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddEventRecordNode, + * ::cuGraphEventRecordNodeGetEvent, + * ::cuGraphEventWaitNodeSetEvent, + * ::cuEventRecordWithFlags, + * ::cuStreamWaitEvent + */ +CUresult CUDAAPI cuGraphEventRecordNodeSetEvent(CUgraphNode hNode, CUevent event); + +/** + * \brief Creates an event wait node and adds it to a graph + * + * Creates a new event wait node and adds it to \p hGraph with \p numDependencies + * dependencies specified via \p dependencies and event specified in \p event. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p phGraphNode. + * + * The graph node will wait for all work captured in \p event. See ::cuEventRecord() + * for details on what is captured by an event. \p event may be from a different context + * or device than the launch stream. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param event - Event for the node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddEventRecordNode, + * ::cuEventRecordWithFlags, + * ::cuStreamWaitEvent, + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode + */ +CUresult CUDAAPI cuGraphAddEventWaitNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUevent event); + +/** + * \brief Returns the event associated with an event wait node + * + * Returns the event of event wait node \p hNode in \p event_out. + * + * \param hNode - Node to get the event for + * \param event_out - Pointer to return the event + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddEventWaitNode, + * ::cuGraphEventWaitNodeSetEvent, + * ::cuGraphEventRecordNodeGetEvent, + * ::cuEventRecordWithFlags, + * ::cuStreamWaitEvent + */ +CUresult CUDAAPI cuGraphEventWaitNodeGetEvent(CUgraphNode hNode, CUevent *event_out); + +/** + * \brief Sets an event wait node's event + * + * Sets the event of event wait node \p hNode to \p event. + * + * \param hNode - Node to set the event for + * \param event - Event to use + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddEventWaitNode, + * ::cuGraphEventWaitNodeGetEvent, + * ::cuGraphEventRecordNodeSetEvent, + * ::cuEventRecordWithFlags, + * ::cuStreamWaitEvent + */ +CUresult CUDAAPI cuGraphEventWaitNodeSetEvent(CUgraphNode hNode, CUevent event); + +/** + * \brief Creates an external semaphore signal node and adds it to a graph + * + * Creates a new external semaphore signal node and adds it to \p hGraph with \p + * numDependencies dependencies specified via \p dependencies and arguments specified + * in \p nodeParams. It is possible for \p numDependencies to be 0, in which case the + * node will be placed at the root of the graph. \p dependencies may not have any + * duplicate entries. A handle to the new node will be returned in \p phGraphNode. + * + * Performs a signal operation on a set of externally allocated semaphore objects + * when the node is launched. The operation(s) will occur after all of the node's + * dependencies have completed. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param nodeParams - Parameters for the node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphExternalSemaphoresSignalNodeGetParams, + * ::cuGraphExternalSemaphoresSignalNodeSetParams, + * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cuGraphAddExternalSemaphoresWaitNode, + * ::cuImportExternalSemaphore, + * ::cuSignalExternalSemaphoresAsync, + * ::cuWaitExternalSemaphoresAsync, + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddEventRecordNode, + * ::cuGraphAddEventWaitNode, + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode + */ +CUresult CUDAAPI cuGraphAddExternalSemaphoresSignalNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS *nodeParams); + +/** + * \brief Returns an external semaphore signal node's parameters + * + * Returns the parameters of an external semaphore signal node \p hNode in \p params_out. + * The \p extSemArray and \p paramsArray returned in \p params_out, + * are owned by the node. This memory remains valid until the node is destroyed or its + * parameters are modified, and should not be modified + * directly. Use ::cuGraphExternalSemaphoresSignalNodeSetParams to update the + * parameters of this node. + * + * \param hNode - Node to get the parameters for + * \param params_out - Pointer to return the parameters + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuLaunchKernel, + * ::cuGraphAddExternalSemaphoresSignalNode, + * ::cuGraphExternalSemaphoresSignalNodeSetParams, + * ::cuGraphAddExternalSemaphoresWaitNode, + * ::cuSignalExternalSemaphoresAsync, + * ::cuWaitExternalSemaphoresAsync + */ +CUresult CUDAAPI cuGraphExternalSemaphoresSignalNodeGetParams(CUgraphNode hNode, CUDA_EXT_SEM_SIGNAL_NODE_PARAMS *params_out); + +/** + * \brief Sets an external semaphore signal node's parameters + * + * Sets the parameters of an external semaphore signal node \p hNode to \p nodeParams. + * + * \param hNode - Node to set the parameters for + * \param nodeParams - Parameters to copy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddExternalSemaphoresSignalNode, + * ::cuGraphExternalSemaphoresSignalNodeSetParams, + * ::cuGraphAddExternalSemaphoresWaitNode, + * ::cuSignalExternalSemaphoresAsync, + * ::cuWaitExternalSemaphoresAsync + */ +CUresult CUDAAPI cuGraphExternalSemaphoresSignalNodeSetParams(CUgraphNode hNode, const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS *nodeParams); + +/** + * \brief Creates an external semaphore wait node and adds it to a graph + * + * Creates a new external semaphore wait node and adds it to \p hGraph with \p numDependencies + * dependencies specified via \p dependencies and arguments specified in \p nodeParams. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. A handle + * to the new node will be returned in \p phGraphNode. + * + * Performs a wait operation on a set of externally allocated semaphore objects + * when the node is launched. The node's dependencies will not be launched until + * the wait operation has completed. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param nodeParams - Parameters for the node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphExternalSemaphoresWaitNodeGetParams, + * ::cuGraphExternalSemaphoresWaitNodeSetParams, + * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cuGraphAddExternalSemaphoresSignalNode, + * ::cuImportExternalSemaphore, + * ::cuSignalExternalSemaphoresAsync, + * ::cuWaitExternalSemaphoresAsync, + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddEventRecordNode, + * ::cuGraphAddEventWaitNode, + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode + */ +CUresult CUDAAPI cuGraphAddExternalSemaphoresWaitNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_EXT_SEM_WAIT_NODE_PARAMS *nodeParams); + +/** + * \brief Returns an external semaphore wait node's parameters + * + * Returns the parameters of an external semaphore wait node \p hNode in \p params_out. + * The \p extSemArray and \p paramsArray returned in \p params_out, + * are owned by the node. This memory remains valid until the node is destroyed or its + * parameters are modified, and should not be modified + * directly. Use ::cuGraphExternalSemaphoresSignalNodeSetParams to update the + * parameters of this node. + * + * \param hNode - Node to get the parameters for + * \param params_out - Pointer to return the parameters + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuLaunchKernel, + * ::cuGraphAddExternalSemaphoresWaitNode, + * ::cuGraphExternalSemaphoresWaitNodeSetParams, + * ::cuGraphAddExternalSemaphoresWaitNode, + * ::cuSignalExternalSemaphoresAsync, + * ::cuWaitExternalSemaphoresAsync + */ +CUresult CUDAAPI cuGraphExternalSemaphoresWaitNodeGetParams(CUgraphNode hNode, CUDA_EXT_SEM_WAIT_NODE_PARAMS *params_out); + +/** + * \brief Sets an external semaphore wait node's parameters + * + * Sets the parameters of an external semaphore wait node \p hNode to \p nodeParams. + * + * \param hNode - Node to set the parameters for + * \param nodeParams - Parameters to copy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddExternalSemaphoresWaitNode, + * ::cuGraphExternalSemaphoresWaitNodeSetParams, + * ::cuGraphAddExternalSemaphoresWaitNode, + * ::cuSignalExternalSemaphoresAsync, + * ::cuWaitExternalSemaphoresAsync + */ +CUresult CUDAAPI cuGraphExternalSemaphoresWaitNodeSetParams(CUgraphNode hNode, const CUDA_EXT_SEM_WAIT_NODE_PARAMS *nodeParams); + +/** + * \brief Creates a batch memory operation node and adds it to a graph + * + * Creates a new batch memory operation node and adds it to \p hGraph with \p + * numDependencies dependencies specified via \p dependencies and arguments specified in \p nodeParams. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p phGraphNode. + * + * When the node is added, the paramArray inside \p nodeParams is copied and therefore it can be + * freed after the call returns. + * + * \note + * Warning: + * Improper use of this API may deadlock the application. Synchronization + * ordering established through this API is not visible to CUDA. CUDA tasks + * that are (even indirectly) ordered by this API should also have that order + * expressed with CUDA-visible dependencies such as events. This ensures that + * the scheduler does not serialize them in an improper order. For more + * information, see the Stream Memory Operations section in the programming + * guide(https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html). + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param nodeParams - Parameters for the node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuStreamBatchMemOp, + * ::cuStreamWaitValue32, + * ::cuStreamWriteValue32, + * ::cuStreamWaitValue64, + * ::cuStreamWriteValue64, + * ::cuGraphBatchMemOpNodeGetParams, + * ::cuGraphBatchMemOpNodeSetParams, + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode + */ +CUresult CUDAAPI cuGraphAddBatchMemOpNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams); + +/** + * \brief Returns a batch mem op node's parameters + * + * Returns the parameters of batch mem op node \p hNode in \p nodeParams_out. + * The \p paramArray returned in \p nodeParams_out is owned by the node. + * This memory remains valid until the node is destroyed or its + * parameters are modified, and should not be modified + * directly. Use ::cuGraphBatchMemOpNodeSetParams to update the + * parameters of this node. + * + * \param hNode - Node to get the parameters for + * \param nodeParams_out - Pointer to return the parameters + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuStreamBatchMemOp, + * ::cuGraphAddBatchMemOpNode, + * ::cuGraphBatchMemOpNodeSetParams + */ +CUresult CUDAAPI cuGraphBatchMemOpNodeGetParams(CUgraphNode hNode, CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams_out); + +/** + * \brief Sets a batch mem op node's parameters + * + * Sets the parameters of batch mem op node \p hNode to \p nodeParams. + * + * The paramArray inside \p nodeParams is copied and therefore it can be + * freed after the call returns. + * + * \param hNode - Node to set the parameters for + * \param nodeParams - Parameters to copy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuStreamBatchMemOp, + * ::cuGraphAddBatchMemOpNode, + * ::cuGraphBatchMemOpNodeGetParams + */ +CUresult CUDAAPI cuGraphBatchMemOpNodeSetParams(CUgraphNode hNode, const CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams); + +/** + * \brief Sets the parameters for a batch mem op node in the given graphExec + * + * Sets the parameters of a batch mem op node in an executable graph \p hGraphExec. + * The node is identified by the corresponding node \p hNode in the + * non-executable graph, from which the executable graph was instantiated. + * + * The following fields on operations may be modified on an executable graph: + * + * op.waitValue.address + * op.waitValue.value[64] + * op.waitValue.flags bits corresponding to wait type (i.e. CU_STREAM_WAIT_VALUE_FLUSH bit cannot be modified) + * op.writeValue.address + * op.writeValue.value[64] + * + * Other fields, such as the context, count or type of operations, and other types of operations such as membars, + * may not be modified. + * + * \p hNode must not have been removed from the original graph. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p hNode is also not modified by this call. + * + * The paramArray inside \p nodeParams is copied and therefore it can be + * freed after the call returns. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - Batch mem op node from the graph from which graphExec was instantiated + * \param nodeParams - Updated Parameters to set + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuStreamBatchMemOp, + * ::cuGraphAddBatchMemOpNode, + * ::cuGraphBatchMemOpNodeGetParams, + * ::cuGraphBatchMemOpNodeSetParams, + * ::cuGraphInstantiate + */ +CUresult CUDAAPI cuGraphExecBatchMemOpNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams); + +/** + * \brief Creates an allocation node and adds it to a graph + * + * Creates a new allocation node and adds it to \p hGraph with \p numDependencies + * dependencies specified via \p dependencies and arguments specified in \p nodeParams. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. A handle + * to the new node will be returned in \p phGraphNode. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param nodeParams - Parameters for the node + * + * When ::cuGraphAddMemAllocNode creates an allocation node, it returns the address of the allocation in + * \p nodeParams.dptr. The allocation's address remains fixed across instantiations and launches. + * + * If the allocation is freed in the same graph, by creating a free node using ::cuGraphAddMemFreeNode, + * the allocation can be accessed by nodes ordered after the allocation node but before the free node. + * These allocations cannot be freed outside the owning graph, and they can only be freed once in the + * owning graph. + * + * If the allocation is not freed in the same graph, then it can be accessed not only by nodes in the + * graph which are ordered after the allocation node, but also by stream operations ordered after the + * graph's execution but before the allocation is freed. + * + * Allocations which are not freed in the same graph can be freed by: + * - passing the allocation to ::cuMemFreeAsync or ::cuMemFree; + * - launching a graph with a free node for that allocation; or + * - specifying ::CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH during instantiation, which makes + * each launch behave as though it called ::cuMemFreeAsync for every unfreed allocation. + * + * It is not possible to free an allocation in both the owning graph and another graph. If the allocation + * is freed in the same graph, a free node cannot be added to another graph. If the allocation is freed + * in another graph, a free node can no longer be added to the owning graph. + * + * The following restrictions apply to graphs which contain allocation and/or memory free nodes: + * - Nodes and edges of the graph cannot be deleted. + * - The graph cannot be used in a child node. + * - Only one instantiation of the graph may exist at any point in time. + * - The graph cannot be cloned. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddMemFreeNode, + * ::cuGraphMemAllocNodeGetParams, + * ::cuDeviceGraphMemTrim, + * ::cuDeviceGetGraphMemAttribute, + * ::cuDeviceSetGraphMemAttribute, + * ::cuMemAllocAsync, + * ::cuMemFreeAsync, + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddEventRecordNode, + * ::cuGraphAddEventWaitNode, + * ::cuGraphAddExternalSemaphoresSignalNode, + * ::cuGraphAddExternalSemaphoresWaitNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode + */ +CUresult CUDAAPI cuGraphAddMemAllocNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUDA_MEM_ALLOC_NODE_PARAMS *nodeParams); + +/** + * \brief Returns a memory alloc node's parameters + * + * Returns the parameters of a memory alloc node \p hNode in \p params_out. + * The \p poolProps and \p accessDescs returned in \p params_out, are owned by the + * node. This memory remains valid until the node is destroyed. The returned + * parameters must not be modified. + * + * \param hNode - Node to get the parameters for + * \param params_out - Pointer to return the parameters + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddMemAllocNode, + * ::cuGraphMemFreeNodeGetParams + */ +CUresult CUDAAPI cuGraphMemAllocNodeGetParams(CUgraphNode hNode, CUDA_MEM_ALLOC_NODE_PARAMS *params_out); + +/** + * \brief Creates a memory free node and adds it to a graph + * + * Creates a new memory free node and adds it to \p hGraph with \p numDependencies + * dependencies specified via \p dependencies and arguments specified in \p nodeParams. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. A handle + * to the new node will be returned in \p phGraphNode. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param dptr - Address of memory to free + * + * ::cuGraphAddMemFreeNode will return ::CUDA_ERROR_INVALID_VALUE if the user attempts to free: + * - an allocation twice in the same graph. + * - an address that was not returned by an allocation node. + * - an invalid address. + * + * The following restrictions apply to graphs which contain allocation and/or memory free nodes: + * - Nodes and edges of the graph cannot be deleted. + * - The graph cannot be used in a child node. + * - Only one instantiation of the graph may exist at any point in time. + * - The graph cannot be cloned. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddMemAllocNode, + * ::cuGraphMemFreeNodeGetParams, + * ::cuDeviceGraphMemTrim, + * ::cuDeviceGetGraphMemAttribute, + * ::cuDeviceSetGraphMemAttribute, + * ::cuMemAllocAsync, + * ::cuMemFreeAsync, + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddEventRecordNode, + * ::cuGraphAddEventWaitNode, + * ::cuGraphAddExternalSemaphoresSignalNode, + * ::cuGraphAddExternalSemaphoresWaitNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode + */ +CUresult CUDAAPI cuGraphAddMemFreeNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUdeviceptr dptr); + +/** + * \brief Returns a memory free node's parameters + * + * Returns the address of a memory free node \p hNode in \p dptr_out. + * + * \param hNode - Node to get the parameters for + * \param dptr_out - Pointer to return the device address + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddMemFreeNode, + * ::cuGraphMemAllocNodeGetParams + */ +CUresult CUDAAPI cuGraphMemFreeNodeGetParams(CUgraphNode hNode, CUdeviceptr *dptr_out); + +/** + * \brief Free unused memory that was cached on the specified device for use with graphs back to the OS. + * + * Blocks which are not in use by a graph that is either currently executing or scheduled to execute are + * freed back to the operating system. + * + * \param device - The device for which cached memory should be freed. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_DEVICE + * + * \sa + * ::cuGraphAddMemAllocNode, + * ::cuGraphAddMemFreeNode, + * ::cuDeviceSetGraphMemAttribute, + * ::cuDeviceGetGraphMemAttribute + */ +CUresult CUDAAPI cuDeviceGraphMemTrim(CUdevice device); + +/** + * \brief Query asynchronous allocation attributes related to graphs + * + * Valid attributes are: + * + * - ::CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT: Amount of memory, in bytes, currently associated with graphs + * - ::CU_GRAPH_MEM_ATTR_USED_MEM_HIGH: High watermark of memory, in bytes, associated with graphs since the + * last time it was reset. High watermark can only be reset to zero. + * - ::CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT: Amount of memory, in bytes, currently allocated for use by + * the CUDA graphs asynchronous allocator. + * - ::CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH: High watermark of memory, in bytes, currently allocated for use by + * the CUDA graphs asynchronous allocator. + * + * \param device - Specifies the scope of the query + * \param attr - attribute to get + * \param value - retrieved value + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_DEVICE + * + * \sa + * ::cuDeviceSetGraphMemAttribute, + * ::cuGraphAddMemAllocNode, + * ::cuGraphAddMemFreeNode + */ +CUresult CUDAAPI cuDeviceGetGraphMemAttribute(CUdevice device, CUgraphMem_attribute attr, void* value); + +/** + * \brief Set asynchronous allocation attributes related to graphs + * + * Valid attributes are: + * + * - ::CU_GRAPH_MEM_ATTR_USED_MEM_HIGH: High watermark of memory, in bytes, associated with graphs since the + * last time it was reset. High watermark can only be reset to zero. + * - ::CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH: High watermark of memory, in bytes, currently allocated for use by + * the CUDA graphs asynchronous allocator. + * + * \param device - Specifies the scope of the query + * \param attr - attribute to get + * \param value - pointer to value to set + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_DEVICE + * + * \sa + * ::cuDeviceGetGraphMemAttribute, + * ::cuGraphAddMemAllocNode, + * ::cuGraphAddMemFreeNode + */ +CUresult CUDAAPI cuDeviceSetGraphMemAttribute(CUdevice device, CUgraphMem_attribute attr, void* value); + +/** + * \brief Clones a graph + * + * This function creates a copy of \p originalGraph and returns it in \p phGraphClone. + * All parameters are copied into the cloned graph. The original graph may be modified + * after this call without affecting the clone. + * + * Child graph nodes in the original graph are recursively copied into the clone. + * + * \param phGraphClone - Returns newly created cloned graph + * \param originalGraph - Graph to clone + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphCreate, + * ::cuGraphNodeFindInClone + */ +CUresult CUDAAPI cuGraphClone(CUgraph *phGraphClone, CUgraph originalGraph); + +/** + * \brief Finds a cloned version of a node + * + * This function returns the node in \p hClonedGraph corresponding to \p hOriginalNode + * in the original graph. + * + * \p hClonedGraph must have been cloned from \p hOriginalGraph via ::cuGraphClone. + * \p hOriginalNode must have been in \p hOriginalGraph at the time of the call to + * ::cuGraphClone, and the corresponding cloned node in \p hClonedGraph must not have + * been removed. The cloned node is then returned via \p phClonedNode. + * + * \param phNode - Returns handle to the cloned node + * \param hOriginalNode - Handle to the original node + * \param hClonedGraph - Cloned graph to query + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphClone + */ +CUresult CUDAAPI cuGraphNodeFindInClone(CUgraphNode *phNode, CUgraphNode hOriginalNode, CUgraph hClonedGraph); + +/** + * \brief Returns a node's type + * + * Returns the node type of \p hNode in \p type. + * + * \param hNode - Node to query + * \param type - Pointer to return the node type + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphGetNodes, + * ::cuGraphGetRootNodes, + * ::cuGraphChildGraphNodeGetGraph, + * ::cuGraphKernelNodeGetParams, + * ::cuGraphKernelNodeSetParams, + * ::cuGraphHostNodeGetParams, + * ::cuGraphHostNodeSetParams, + * ::cuGraphMemcpyNodeGetParams, + * ::cuGraphMemcpyNodeSetParams, + * ::cuGraphMemsetNodeGetParams, + * ::cuGraphMemsetNodeSetParams + */ +CUresult CUDAAPI cuGraphNodeGetType(CUgraphNode hNode, CUgraphNodeType *type); + +/** + * \brief Returns a graph's nodes + * + * Returns a list of \p hGraph's nodes. \p nodes may be NULL, in which case this + * function will return the number of nodes in \p numNodes. Otherwise, + * \p numNodes entries will be filled in. If \p numNodes is higher than the actual + * number of nodes, the remaining entries in \p nodes will be set to NULL, and the + * number of nodes actually obtained will be returned in \p numNodes. + * + * \param hGraph - Graph to query + * \param nodes - Pointer to return the nodes + * \param numNodes - See description + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphCreate, + * ::cuGraphGetRootNodes, + * ::cuGraphGetEdges, + * ::cuGraphNodeGetType, + * ::cuGraphNodeGetDependencies, + * ::cuGraphNodeGetDependentNodes + */ +CUresult CUDAAPI cuGraphGetNodes(CUgraph hGraph, CUgraphNode *nodes, size_t *numNodes); + +/** + * \brief Returns a graph's root nodes + * + * Returns a list of \p hGraph's root nodes. \p rootNodes may be NULL, in which case this + * function will return the number of root nodes in \p numRootNodes. Otherwise, + * \p numRootNodes entries will be filled in. If \p numRootNodes is higher than the actual + * number of root nodes, the remaining entries in \p rootNodes will be set to NULL, and the + * number of nodes actually obtained will be returned in \p numRootNodes. + * + * \param hGraph - Graph to query + * \param rootNodes - Pointer to return the root nodes + * \param numRootNodes - See description + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphCreate, + * ::cuGraphGetNodes, + * ::cuGraphGetEdges, + * ::cuGraphNodeGetType, + * ::cuGraphNodeGetDependencies, + * ::cuGraphNodeGetDependentNodes + */ +CUresult CUDAAPI cuGraphGetRootNodes(CUgraph hGraph, CUgraphNode *rootNodes, size_t *numRootNodes); + +/** + * \brief Returns a graph's dependency edges + * + * Returns a list of \p hGraph's dependency edges. Edges are returned via corresponding + * indices in \p from and \p to; that is, the node in \p to[i] has a dependency on the + * node in \p from[i]. \p from and \p to may both be NULL, in which + * case this function only returns the number of edges in \p numEdges. Otherwise, + * \p numEdges entries will be filled in. If \p numEdges is higher than the actual + * number of edges, the remaining entries in \p from and \p to will be set to NULL, and + * the number of edges actually returned will be written to \p numEdges. + * + * \param hGraph - Graph to get the edges from + * \param from - Location to return edge endpoints + * \param to - Location to return edge endpoints + * \param numEdges - See description + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphGetNodes, + * ::cuGraphGetRootNodes, + * ::cuGraphAddDependencies, + * ::cuGraphRemoveDependencies, + * ::cuGraphNodeGetDependencies, + * ::cuGraphNodeGetDependentNodes + */ +CUresult CUDAAPI cuGraphGetEdges(CUgraph hGraph, CUgraphNode *from, CUgraphNode *to, size_t *numEdges); + +/** + * \brief Returns a node's dependencies + * + * Returns a list of \p node's dependencies. \p dependencies may be NULL, in which case this + * function will return the number of dependencies in \p numDependencies. Otherwise, + * \p numDependencies entries will be filled in. If \p numDependencies is higher than the actual + * number of dependencies, the remaining entries in \p dependencies will be set to NULL, and the + * number of nodes actually obtained will be returned in \p numDependencies. + * + * \param hNode - Node to query + * \param dependencies - Pointer to return the dependencies + * \param numDependencies - See description + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphNodeGetDependentNodes, + * ::cuGraphGetNodes, + * ::cuGraphGetRootNodes, + * ::cuGraphGetEdges, + * ::cuGraphAddDependencies, + * ::cuGraphRemoveDependencies + */ +CUresult CUDAAPI cuGraphNodeGetDependencies(CUgraphNode hNode, CUgraphNode *dependencies, size_t *numDependencies); + +/** + * \brief Returns a node's dependent nodes + * + * Returns a list of \p node's dependent nodes. \p dependentNodes may be NULL, in which + * case this function will return the number of dependent nodes in \p numDependentNodes. + * Otherwise, \p numDependentNodes entries will be filled in. If \p numDependentNodes is + * higher than the actual number of dependent nodes, the remaining entries in + * \p dependentNodes will be set to NULL, and the number of nodes actually obtained will + * be returned in \p numDependentNodes. + * + * \param hNode - Node to query + * \param dependentNodes - Pointer to return the dependent nodes + * \param numDependentNodes - See description + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphNodeGetDependencies, + * ::cuGraphGetNodes, + * ::cuGraphGetRootNodes, + * ::cuGraphGetEdges, + * ::cuGraphAddDependencies, + * ::cuGraphRemoveDependencies + */ +CUresult CUDAAPI cuGraphNodeGetDependentNodes(CUgraphNode hNode, CUgraphNode *dependentNodes, size_t *numDependentNodes); + +/** + * \brief Adds dependency edges to a graph + * + * The number of dependencies to be added is defined by \p numDependencies + * Elements in \p from and \p to at corresponding indices define a dependency. + * Each node in \p from and \p to must belong to \p hGraph. + * + * If \p numDependencies is 0, elements in \p from and \p to will be ignored. + * Specifying an existing dependency will return an error. + * + * \param hGraph - Graph to which dependencies are added + * \param from - Array of nodes that provide the dependencies + * \param to - Array of dependent nodes + * \param numDependencies - Number of dependencies to be added + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphRemoveDependencies, + * ::cuGraphGetEdges, + * ::cuGraphNodeGetDependencies, + * ::cuGraphNodeGetDependentNodes + */ +CUresult CUDAAPI cuGraphAddDependencies(CUgraph hGraph, const CUgraphNode *from, const CUgraphNode *to, size_t numDependencies); + +/** + * \brief Removes dependency edges from a graph + * + * The number of \p dependencies to be removed is defined by \p numDependencies. + * Elements in \p from and \p to at corresponding indices define a dependency. + * Each node in \p from and \p to must belong to \p hGraph. + * + * If \p numDependencies is 0, elements in \p from and \p to will be ignored. + * Specifying a non-existing dependency will return an error. + * + * Dependencies cannot be removed from graphs which contain allocation or free nodes. + * Any attempt to do so will return an error. + * + * \param hGraph - Graph from which to remove dependencies + * \param from - Array of nodes that provide the dependencies + * \param to - Array of dependent nodes + * \param numDependencies - Number of dependencies to be removed + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddDependencies, + * ::cuGraphGetEdges, + * ::cuGraphNodeGetDependencies, + * ::cuGraphNodeGetDependentNodes + */ +CUresult CUDAAPI cuGraphRemoveDependencies(CUgraph hGraph, const CUgraphNode *from, const CUgraphNode *to, size_t numDependencies); + +/** + * \brief Remove a node from the graph + * + * Removes \p hNode from its graph. This operation also severs any dependencies of other nodes + * on \p hNode and vice versa. + * + * Nodes which belong to a graph which contains allocation or free nodes cannot be destroyed. + * Any attempt to do so will return an error. + * + * \param hNode - Node to remove + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddHostNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode + */ +CUresult CUDAAPI cuGraphDestroyNode(CUgraphNode hNode); + +/** + * \brief Creates an executable graph from a graph + * + * Instantiates \p hGraph as an executable graph. The graph is validated for any + * structural constraints or intra-node constraints which were not previously + * validated. If instantiation is successful, a handle to the instantiated graph + * is returned in \p phGraphExec. + * + * The \p flags parameter controls the behavior of instantiation and subsequent + * graph launches. Valid flags are: + * + * - ::CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH, which configures a + * graph containing memory allocation nodes to automatically free any + * unfreed memory allocations before the graph is relaunched. + * + * - ::CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH, which configures the graph for launch + * from the device. If this flag is passed, the executable graph handle returned can be + * used to launch the graph from both the host and device. This flag can only be used + * on platforms which support unified addressing. This flag cannot be used in + * conjunction with ::CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH. + * + * - ::CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY, which causes the graph + * to use the priorities from the per-node attributes rather than the priority + * of the launch stream during execution. Note that priorities are only available + * on kernel nodes, and are copied from stream priority during stream capture. + * + * If \p hGraph contains any allocation or free nodes, there can be at most one + * executable graph in existence for that graph at a time. An attempt to instantiate + * a second executable graph before destroying the first with ::cuGraphExecDestroy + * will result in an error. + * + * If \p hGraph contains kernels which call device-side cudaGraphLaunch() from multiple + * contexts, this will result in an error. + * + * Graphs instantiated for launch on the device have additional restrictions which do not + * apply to host graphs: + * + * - The graph's nodes must reside on a single context. + * - The graph can only contain kernel nodes, memcpy nodes, memset nodes, and child graph nodes. + * Operation-specific restrictions are outlined below. + * - Kernel nodes: + * - Use of CUDA Dynamic Parallelism is not permitted. + * - Cooperative launches are permitted as long as MPS is not in use. + * - Memcpy nodes: + * - Only copies involving device memory and/or pinned device-mapped host memory are permitted. + * - Copies involving CUDA arrays are not permitted. + * - Both operands must be accessible from the current context, and the current context must + * match the context of other nodes in the graph. + * + * \param phGraphExec - Returns instantiated graph + * \param hGraph - Graph to instantiate + * \param flags - Flags to control instantiation. See ::CUgraphInstantiate_flags. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphInstantiate, + * ::cuGraphCreate, + * ::cuGraphUpload, + * ::cuGraphLaunch, + * ::cuGraphExecDestroy + */ +CUresult CUDAAPI cuGraphInstantiate(CUgraphExec *phGraphExec, CUgraph hGraph, unsigned long long flags); + +/** + * \brief Creates an executable graph from a graph + * + * Instantiates \p hGraph as an executable graph according to the \p instantiateParams structure. + * The graph is validated for any structural constraints or intra-node constraints + * which were not previously validated. If instantiation is successful, a handle to + * the instantiated graph is returned in \p phGraphExec. + * + * \p instantiateParams controls the behavior of instantiation and subsequent + * graph launches, as well as returning more detailed information in the event of an error. + * ::CUDA_GRAPH_INSTANTIATE_PARAMS is defined as: + * + * \code + typedef struct { + cuuint64_t flags; + CUstream hUploadStream; + CUgraphNode hErrNode_out; + CUgraphInstantiateResult result_out; + } CUDA_GRAPH_INSTANTIATE_PARAMS; + * \endcode + * + * The \p flags field controls the behavior of instantiation and subsequent + * graph launches. Valid flags are: + * + * - ::CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH, which configures a + * graph containing memory allocation nodes to automatically free any + * unfreed memory allocations before the graph is relaunched. + * + * - ::CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD, which will perform an upload of the graph + * into \p hUploadStream once the graph has been instantiated. + * + * - ::CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH, which configures the graph for launch + * from the device. If this flag is passed, the executable graph handle returned can be + * used to launch the graph from both the host and device. This flag can only be used + * on platforms which support unified addressing. This flag cannot be used in + * conjunction with ::CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH. + * + * - ::CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY, which causes the graph + * to use the priorities from the per-node attributes rather than the priority + * of the launch stream during execution. Note that priorities are only available + * on kernel nodes, and are copied from stream priority during stream capture. + * + * If \p hGraph contains any allocation or free nodes, there can be at most one + * executable graph in existence for that graph at a time. An attempt to instantiate a + * second executable graph before destroying the first with ::cuGraphExecDestroy will + * result in an error. + * + * If \p hGraph contains kernels which call device-side cudaGraphLaunch() from multiple + * contexts, this will result in an error. + * + * Graphs instantiated for launch on the device have additional restrictions which do not + * apply to host graphs: + * + * - The graph's nodes must reside on a single context. + * - The graph can only contain kernel nodes, memcpy nodes, memset nodes, and child graph nodes. + * Operation-specific restrictions are outlined below. + * - Kernel nodes: + * - Use of CUDA Dynamic Parallelism is not permitted. + * - Cooperative launches are permitted as long as MPS is not in use. + * - Memcpy nodes: + * - Only copies involving device memory and/or pinned device-mapped host memory are permitted. + * - Copies involving CUDA arrays are not permitted. + * - Both operands must be accessible from the current context, and the current context must + * match the context of other nodes in the graph. + * + * In the event of an error, the \p result_out and \p hErrNode_out fields will contain more + * information about the nature of the error. Possible error reporting includes: + * + * - ::CUDA_GRAPH_INSTANTIATE_ERROR, if passed an invalid value or if an unexpected error occurred + * which is described by the return value of the function. \p hErrNode_out will be set to NULL. + * - ::CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE, if the graph structure is invalid. \p hErrNode_out + * will be set to one of the offending nodes. + * - ::CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED, if the graph is instantiated for device + * launch but contains a node of an unsupported node type, or a node which performs unsupported + * operations, such as use of CUDA dynamic parallelism within a kernel node. \p hErrNode_out will + * be set to this node. + * - ::CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED, if the graph is instantiated for device + * launch but a node’s context differs from that of another node. This error can also be returned + * if a graph is not instantiated for device launch and it contains kernels which call device-side + * cudaGraphLaunch() from multiple contexts. \p hErrNode_out will be set to this node. + * + * If instantiation is successful, \p result_out will be set to ::CUDA_GRAPH_INSTANTIATE_SUCCESS, + * and \p hErrNode_out will be set to NULL. + * + * \param phGraphExec - Returns instantiated graph + * \param hGraph - Graph to instantiate + * \param instantiateParams - Instantiation parameters + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphCreate, + * ::cuGraphInstantiate, + * ::cuGraphExecDestroy + */ +CUresult CUDAAPI cuGraphInstantiateWithParams(CUgraphExec *phGraphExec, CUgraph hGraph, CUDA_GRAPH_INSTANTIATE_PARAMS *instantiateParams); + +/** + * \brief Query the instantiation flags of an executable graph + * + * Returns the flags that were passed to instantiation for the given executable graph. + * ::CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD will not be returned by this API as it does + * not affect the resulting executable graph. + * + * \param hGraphExec - The executable graph to query + * \param flags - Returns the instantiation flags + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphInstantiate, + * ::cuGraphInstantiateWithParams + */ +CUresult CUDAAPI cuGraphExecGetFlags(CUgraphExec hGraphExec, cuuint64_t *flags); + +/** + * \brief Sets the parameters for a kernel node in the given graphExec + * + * Sets the parameters of a kernel node in an executable graph \p hGraphExec. + * The node is identified by the corresponding node \p hNode in the + * non-executable graph, from which the executable graph was instantiated. + * + * \p hNode must not have been removed from the original graph. All \p nodeParams + * fields may change, but the following restrictions apply to \p func updates: + * + * - The owning context of the function cannot change. + * - A node whose function originally did not use CUDA dynamic parallelism cannot be updated + * to a function which uses CDP + * - If \p hGraphExec was not instantiated for device launch, a node whose function originally + * did not use device-side cudaGraphLaunch() cannot be updated to a function which uses + * device-side cudaGraphLaunch() unless the node resides on the same context as nodes which + * contained such calls at instantiate-time. If no such calls were present at instantiation, + * these updates cannot be performed at all. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p hNode is also not modified by this call. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - kernel node from the graph from which graphExec was instantiated + * \param nodeParams - Updated Parameters to set + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddKernelNode, + * ::cuGraphKernelNodeSetParams, + * ::cuGraphExecMemcpyNodeSetParams, + * ::cuGraphExecMemsetNodeSetParams, + * ::cuGraphExecHostNodeSetParams, + * ::cuGraphExecChildGraphNodeSetParams, + * ::cuGraphExecEventRecordNodeSetEvent, + * ::cuGraphExecEventWaitNodeSetEvent, + * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cuGraphExecUpdate, + * ::cuGraphInstantiate + */ +CUresult CUDAAPI cuGraphExecKernelNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS *nodeParams); + +/** + * \brief Sets the parameters for a memcpy node in the given graphExec. + * + * Updates the work represented by \p hNode in \p hGraphExec as though \p hNode had + * contained \p copyParams at instantiation. hNode must remain in the graph which was + * used to instantiate \p hGraphExec. Changed edges to and from hNode are ignored. + * + * The source and destination memory in \p copyParams must be allocated from the same + * contexts as the original source and destination memory. Both the instantiation-time + * memory operands and the memory operands in \p copyParams must be 1-dimensional. + * Zero-length operations are not supported. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. hNode is also + * not modified by this call. + * + * Returns CUDA_ERROR_INVALID_VALUE if the memory operands' mappings changed or + * either the original or new memory operands are multidimensional. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - Memcpy node from the graph which was used to instantiate graphExec + * \param copyParams - The updated parameters to set + * \param ctx - Context on which to run the node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddMemcpyNode, + * ::cuGraphMemcpyNodeSetParams, + * ::cuGraphExecKernelNodeSetParams, + * ::cuGraphExecMemsetNodeSetParams, + * ::cuGraphExecHostNodeSetParams, + * ::cuGraphExecChildGraphNodeSetParams, + * ::cuGraphExecEventRecordNodeSetEvent, + * ::cuGraphExecEventWaitNodeSetEvent, + * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cuGraphExecUpdate, + * ::cuGraphInstantiate + */ +CUresult CUDAAPI cuGraphExecMemcpyNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_MEMCPY3D *copyParams, CUcontext ctx); + +/** + * \brief Sets the parameters for a memset node in the given graphExec. + * + * Updates the work represented by \p hNode in \p hGraphExec as though \p hNode had + * contained \p memsetParams at instantiation. hNode must remain in the graph which was + * used to instantiate \p hGraphExec. Changed edges to and from hNode are ignored. + * + * The destination memory in \p memsetParams must be allocated from the same + * contexts as the original destination memory. Both the instantiation-time + * memory operand and the memory operand in \p memsetParams must be 1-dimensional. + * Zero-length operations are not supported. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. hNode is also + * not modified by this call. + * + * Returns CUDA_ERROR_INVALID_VALUE if the memory operand's mappings changed or + * either the original or new memory operand are multidimensional. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - Memset node from the graph which was used to instantiate graphExec + * \param memsetParams - The updated parameters to set + * \param ctx - Context on which to run the node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddMemsetNode, + * ::cuGraphMemsetNodeSetParams, + * ::cuGraphExecKernelNodeSetParams, + * ::cuGraphExecMemcpyNodeSetParams, + * ::cuGraphExecHostNodeSetParams, + * ::cuGraphExecChildGraphNodeSetParams, + * ::cuGraphExecEventRecordNodeSetEvent, + * ::cuGraphExecEventWaitNodeSetEvent, + * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cuGraphExecUpdate, + * ::cuGraphInstantiate + */ +CUresult CUDAAPI cuGraphExecMemsetNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_MEMSET_NODE_PARAMS *memsetParams, CUcontext ctx); + +/** + * \brief Sets the parameters for a host node in the given graphExec. + * + * Updates the work represented by \p hNode in \p hGraphExec as though \p hNode had + * contained \p nodeParams at instantiation. hNode must remain in the graph which was + * used to instantiate \p hGraphExec. Changed edges to and from hNode are ignored. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. hNode is also + * not modified by this call. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - Host node from the graph which was used to instantiate graphExec + * \param nodeParams - The updated parameters to set + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddHostNode, + * ::cuGraphHostNodeSetParams, + * ::cuGraphExecKernelNodeSetParams, + * ::cuGraphExecMemcpyNodeSetParams, + * ::cuGraphExecMemsetNodeSetParams, + * ::cuGraphExecChildGraphNodeSetParams, + * ::cuGraphExecEventRecordNodeSetEvent, + * ::cuGraphExecEventWaitNodeSetEvent, + * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cuGraphExecUpdate, + * ::cuGraphInstantiate + */ +CUresult CUDAAPI cuGraphExecHostNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_HOST_NODE_PARAMS *nodeParams); + +/** + * \brief Updates node parameters in the child graph node in the given graphExec. + * + * Updates the work represented by \p hNode in \p hGraphExec as though the nodes contained + * in \p hNode's graph had the parameters contained in \p childGraph's nodes at instantiation. + * \p hNode must remain in the graph which was used to instantiate \p hGraphExec. + * Changed edges to and from \p hNode are ignored. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. \p hNode is also + * not modified by this call. + * + * The topology of \p childGraph, as well as the node insertion order, must match that + * of the graph contained in \p hNode. See ::cuGraphExecUpdate() for a list of restrictions + * on what can be updated in an instantiated graph. The update is recursive, so child graph + * nodes contained within the top level child graph will also be updated. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - Host node from the graph which was used to instantiate graphExec + * \param childGraph - The graph supplying the updated parameters + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddChildGraphNode, + * ::cuGraphChildGraphNodeGetGraph, + * ::cuGraphExecKernelNodeSetParams, + * ::cuGraphExecMemcpyNodeSetParams, + * ::cuGraphExecMemsetNodeSetParams, + * ::cuGraphExecHostNodeSetParams, + * ::cuGraphExecEventRecordNodeSetEvent, + * ::cuGraphExecEventWaitNodeSetEvent, + * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cuGraphExecUpdate, + * ::cuGraphInstantiate + */ +CUresult CUDAAPI cuGraphExecChildGraphNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, CUgraph childGraph); + +/** + * \brief Sets the event for an event record node in the given graphExec + * + * Sets the event of an event record node in an executable graph \p hGraphExec. + * The node is identified by the corresponding node \p hNode in the + * non-executable graph, from which the executable graph was instantiated. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p hNode is also not modified by this call. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - event record node from the graph from which graphExec was instantiated + * \param event - Updated event to use + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddEventRecordNode, + * ::cuGraphEventRecordNodeGetEvent, + * ::cuGraphEventWaitNodeSetEvent, + * ::cuEventRecordWithFlags, + * ::cuStreamWaitEvent, + * ::cuGraphExecKernelNodeSetParams, + * ::cuGraphExecMemcpyNodeSetParams, + * ::cuGraphExecMemsetNodeSetParams, + * ::cuGraphExecHostNodeSetParams, + * ::cuGraphExecChildGraphNodeSetParams, + * ::cuGraphExecEventWaitNodeSetEvent, + * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cuGraphExecUpdate, + * ::cuGraphInstantiate + */ +CUresult CUDAAPI cuGraphExecEventRecordNodeSetEvent(CUgraphExec hGraphExec, CUgraphNode hNode, CUevent event); + +/** + * \brief Sets the event for an event wait node in the given graphExec + * + * Sets the event of an event wait node in an executable graph \p hGraphExec. + * The node is identified by the corresponding node \p hNode in the + * non-executable graph, from which the executable graph was instantiated. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p hNode is also not modified by this call. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - event wait node from the graph from which graphExec was instantiated + * \param event - Updated event to use + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddEventWaitNode, + * ::cuGraphEventWaitNodeGetEvent, + * ::cuGraphEventRecordNodeSetEvent, + * ::cuEventRecordWithFlags, + * ::cuStreamWaitEvent, + * ::cuGraphExecKernelNodeSetParams, + * ::cuGraphExecMemcpyNodeSetParams, + * ::cuGraphExecMemsetNodeSetParams, + * ::cuGraphExecHostNodeSetParams, + * ::cuGraphExecChildGraphNodeSetParams, + * ::cuGraphExecEventRecordNodeSetEvent, + * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cuGraphExecUpdate, + * ::cuGraphInstantiate + */ +CUresult CUDAAPI cuGraphExecEventWaitNodeSetEvent(CUgraphExec hGraphExec, CUgraphNode hNode, CUevent event); + +/** + * \brief Sets the parameters for an external semaphore signal node in the given graphExec + * + * Sets the parameters of an external semaphore signal node in an executable graph \p hGraphExec. + * The node is identified by the corresponding node \p hNode in the + * non-executable graph, from which the executable graph was instantiated. + * + * \p hNode must not have been removed from the original graph. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p hNode is also not modified by this call. + * + * Changing \p nodeParams->numExtSems is not supported. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - semaphore signal node from the graph from which graphExec was instantiated + * \param nodeParams - Updated Parameters to set + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddExternalSemaphoresSignalNode, + * ::cuImportExternalSemaphore, + * ::cuSignalExternalSemaphoresAsync, + * ::cuWaitExternalSemaphoresAsync, + * ::cuGraphExecKernelNodeSetParams, + * ::cuGraphExecMemcpyNodeSetParams, + * ::cuGraphExecMemsetNodeSetParams, + * ::cuGraphExecHostNodeSetParams, + * ::cuGraphExecChildGraphNodeSetParams, + * ::cuGraphExecEventRecordNodeSetEvent, + * ::cuGraphExecEventWaitNodeSetEvent, + * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cuGraphExecUpdate, + * ::cuGraphInstantiate + */ +CUresult CUDAAPI cuGraphExecExternalSemaphoresSignalNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS *nodeParams); + +/** + * \brief Sets the parameters for an external semaphore wait node in the given graphExec + * + * Sets the parameters of an external semaphore wait node in an executable graph \p hGraphExec. + * The node is identified by the corresponding node \p hNode in the + * non-executable graph, from which the executable graph was instantiated. + * + * \p hNode must not have been removed from the original graph. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p hNode is also not modified by this call. + * + * Changing \p nodeParams->numExtSems is not supported. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - semaphore wait node from the graph from which graphExec was instantiated + * \param nodeParams - Updated Parameters to set + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddExternalSemaphoresWaitNode, + * ::cuImportExternalSemaphore, + * ::cuSignalExternalSemaphoresAsync, + * ::cuWaitExternalSemaphoresAsync, + * ::cuGraphExecKernelNodeSetParams, + * ::cuGraphExecMemcpyNodeSetParams, + * ::cuGraphExecMemsetNodeSetParams, + * ::cuGraphExecHostNodeSetParams, + * ::cuGraphExecChildGraphNodeSetParams, + * ::cuGraphExecEventRecordNodeSetEvent, + * ::cuGraphExecEventWaitNodeSetEvent, + * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cuGraphExecUpdate, + * ::cuGraphInstantiate + */ +CUresult CUDAAPI cuGraphExecExternalSemaphoresWaitNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_EXT_SEM_WAIT_NODE_PARAMS *nodeParams); + +/** + * \brief Enables or disables the specified node in the given graphExec + * + * Sets \p hNode to be either enabled or disabled. Disabled nodes are functionally equivalent + * to empty nodes until they are reenabled. Existing node parameters are not affected by + * disabling/enabling the node. + * + * The node is identified by the corresponding node \p hNode in the non-executable + * graph, from which the executable graph was instantiated. + * + * \p hNode must not have been removed from the original graph. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p hNode is also not modified by this call. + * + * \note Currently only kernel, memset and memcpy nodes are supported. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - Node from the graph from which graphExec was instantiated + * \param isEnabled - Node is enabled if != 0, otherwise the node is disabled + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphNodeGetEnabled, + * ::cuGraphExecUpdate, + * ::cuGraphInstantiate + * ::cuGraphLaunch + */ +CUresult CUDAAPI cuGraphNodeSetEnabled(CUgraphExec hGraphExec, CUgraphNode hNode, unsigned int isEnabled); + +/** + * \brief Query whether a node in the given graphExec is enabled + * + * Sets isEnabled to 1 if \p hNode is enabled, or 0 if \p hNode is disabled. + * + * The node is identified by the corresponding node \p hNode in the non-executable + * graph, from which the executable graph was instantiated. + * + * \p hNode must not have been removed from the original graph. + * + * \note Currently only kernel, memset and memcpy nodes are supported. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - Node from the graph from which graphExec was instantiated + * \param isEnabled - Location to return the enabled status of the node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphNodeSetEnabled, + * ::cuGraphExecUpdate, + * ::cuGraphInstantiate + * ::cuGraphLaunch + */ +CUresult CUDAAPI cuGraphNodeGetEnabled(CUgraphExec hGraphExec, CUgraphNode hNode, unsigned int *isEnabled); + +/** + * \brief Uploads an executable graph in a stream + * + * Uploads \p hGraphExec to the device in \p hStream without executing it. Uploads of + * the same \p hGraphExec will be serialized. Each upload is ordered behind both any + * previous work in \p hStream and any previous launches of \p hGraphExec. + * Uses memory cached by \p stream to back the allocations owned by \p hGraphExec. + * + * \param hGraphExec - Executable graph to upload + * \param hStream - Stream in which to upload the graph + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphInstantiate, + * ::cuGraphLaunch, + * ::cuGraphExecDestroy + */ +CUresult CUDAAPI cuGraphUpload(CUgraphExec hGraphExec, CUstream hStream); + +/** + * \brief Launches an executable graph in a stream + * + * Executes \p hGraphExec in \p hStream. Only one instance of \p hGraphExec may be executing + * at a time. Each launch is ordered behind both any previous work in \p hStream + * and any previous launches of \p hGraphExec. To execute a graph concurrently, it must be + * instantiated multiple times into multiple executable graphs. + * + * If any allocations created by \p hGraphExec remain unfreed (from a previous launch) and + * \p hGraphExec was not instantiated with ::CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH, + * the launch will fail with ::CUDA_ERROR_INVALID_VALUE. + * + * \param hGraphExec - Executable graph to launch + * \param hStream - Stream in which to launch the graph + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphInstantiate, + * ::cuGraphUpload, + * ::cuGraphExecDestroy + */ +CUresult CUDAAPI cuGraphLaunch(CUgraphExec hGraphExec, CUstream hStream); + +/** + * \brief Destroys an executable graph + * + * Destroys the executable graph specified by \p hGraphExec, as well + * as all of its executable nodes. If the executable graph is + * in-flight, it will not be terminated, but rather freed + * asynchronously on completion. + * + * \param hGraphExec - Executable graph to destroy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphInstantiate, + * ::cuGraphUpload, + * ::cuGraphLaunch + */ +CUresult CUDAAPI cuGraphExecDestroy(CUgraphExec hGraphExec); + +/** + * \brief Destroys a graph + * + * Destroys the graph specified by \p hGraph, as well as all of its nodes. + * + * \param hGraph - Graph to destroy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphCreate + */ +CUresult CUDAAPI cuGraphDestroy(CUgraph hGraph); + +/** + * \brief Check whether an executable graph can be updated with a graph and perform the update if possible + * + * Updates the node parameters in the instantiated graph specified by \p hGraphExec with the + * node parameters in a topologically identical graph specified by \p hGraph. + * + * Limitations: + * + * - Kernel nodes: + * - The owning context of the function cannot change. + * - A node whose function originally did not use CUDA dynamic parallelism cannot be updated + * to a function which uses CDP. + * - A cooperative node cannot be updated to a non-cooperative node, and vice-versa. + * - If the graph was instantiated with CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY, the + * priority attribute cannot change. Equality is checked on the originally requested + * priority values, before they are clamped to the device's supported range. + * - If \p hGraphExec was not instantiated for device launch, a node whose function originally + * did not use device-side cudaGraphLaunch() cannot be updated to a function which uses + * device-side cudaGraphLaunch() unless the node resides on the same context as nodes which + * contained such calls at instantiate-time. If no such calls were present at instantiation, + * these updates cannot be performed at all. + * - Memset and memcpy nodes: + * - The CUDA device(s) to which the operand(s) was allocated/mapped cannot change. + * - The source/destination memory must be allocated from the same contexts as the original + * source/destination memory. + * - Only 1D memsets can be changed. + * - Additional memcpy node restrictions: + * - Changing either the source or destination memory type(i.e. CU_MEMORYTYPE_DEVICE, + * CU_MEMORYTYPE_ARRAY, etc.) is not supported. + * - External semaphore wait nodes and record nodes: + * - Changing the number of semaphores is not supported. + * + * Note: The API may add further restrictions in future releases. The return code should always be checked. + * + * cuGraphExecUpdate sets the result member of \p resultInfo to CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED + * under the following conditions: + * - The count of nodes directly in \p hGraphExec and \p hGraph differ, in which case resultInfo->errorNode + * is set to NULL. + * - \p hGraph has more exit nodes than \p hGraph, in which case resultInfo->errorNode is set to one of + * the exit nodes in hGraph. + * - A node in \p hGraph has a different number of dependencies than the node from \p hGraphExec it is paired with, + * in which case resultInfo->errorNode is set to the node from \p hGraph. + * - A node in \p hGraph has a dependency that does not match with the corresponding dependency of the paired node + * from \p hGraphExec. resultInfo->errorNode will be set to the node from \p hGraph. resultInfo->errorFromNode + * will be set to the mismatched dependency. The dependencies are paired based on edge order and a dependency + * does not match when the nodes are already paired based on other edges examined in the graph. + * + * cuGraphExecUpdate sets the result member of \p resultInfo to: + * - CU_GRAPH_EXEC_UPDATE_ERROR if passed an invalid value. + * - CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED if the graph topology changed + * - CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED if the type of a node changed, in which case + * \p hErrorNode_out is set to the node from \p hGraph. + * - CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE if the function changed in an unsupported + * way(see note above), in which case \p hErrorNode_out is set to the node from \p hGraph + * - CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED if any parameters to a node changed in a way + * that is not supported, in which case \p hErrorNode_out is set to the node from \p hGraph. + * - CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED if any attributes of a node changed in a way + * that is not supported, in which case \p hErrorNode_out is set to the node from \p hGraph. + * - CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED if something about a node is unsupported, like + * the node's type or configuration, in which case \p hErrorNode_out is set to the node from \p hGraph + * + * If the update fails for a reason not listed above, the result member of \p resultInfo will be set + * to CU_GRAPH_EXEC_UPDATE_ERROR. If the update succeeds, the result member will be set to CU_GRAPH_EXEC_UPDATE_SUCCESS. + * + * cuGraphExecUpdate returns CUDA_SUCCESS when the updated was performed successfully. It returns + * CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE if the graph update was not performed because it included + * changes which violated constraints specific to instantiated graph update. + * + * \param hGraphExec The instantiated graph to be updated + * \param hGraph The graph containing the updated parameters + * \param resultInfo the error info structure + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphInstantiate + */ +CUresult CUDAAPI cuGraphExecUpdate(CUgraphExec hGraphExec, CUgraph hGraph, CUgraphExecUpdateResultInfo *resultInfo); + +/** + * \brief Copies attributes from source node to destination node. + * + * Copies attributes from source node \p src to destination node \p dst. + * Both node must have the same context. + * + * \param[out] dst Destination node + * \param[in] src Source node + * For list of attributes see ::CUkernelNodeAttrID + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa + * ::CUaccessPolicyWindow + */ +CUresult CUDAAPI cuGraphKernelNodeCopyAttributes(CUgraphNode dst, CUgraphNode src); + +/** + * \brief Queries node attribute. + * + * Queries attribute \p attr from node \p hNode and stores it in corresponding + * member of \p value_out. + * + * \param[in] hNode + * \param[in] attr + * \param[out] value_out + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa + * ::CUaccessPolicyWindow + */ +CUresult CUDAAPI cuGraphKernelNodeGetAttribute(CUgraphNode hNode, CUkernelNodeAttrID attr, + CUkernelNodeAttrValue *value_out); + +/** + * \brief Sets node attribute. + * + * Sets attribute \p attr on node \p hNode from corresponding attribute of + * \p value. + * + * \param[out] hNode + * \param[in] attr + * \param[out] value + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa + * ::CUaccessPolicyWindow + */ +CUresult CUDAAPI cuGraphKernelNodeSetAttribute(CUgraphNode hNode, CUkernelNodeAttrID attr, + const CUkernelNodeAttrValue *value); + +/** + * \brief Write a DOT file describing graph structure + * + * Using the provided \p hGraph, write to \p path a DOT formatted description of the graph. + * By default this includes the graph topology, node types, node id, kernel names and memcpy direction. + * \p flags can be specified to write more detailed information about each node type such as + * parameter values, kernel attributes, node and function handles. + * + * \param hGraph - The graph to create a DOT file from + * \param path - The path to write the DOT file to + * \param flags - Flags from CUgraphDebugDot_flags for specifying which additional node information to write + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OPERATING_SYSTEM + */ +CUresult CUDAAPI cuGraphDebugDotPrint(CUgraph hGraph, const char *path, unsigned int flags); + +/** + * \brief Create a user object + * + * Create a user object with the specified destructor callback and initial reference count. The + * initial references are owned by the caller. + * + * Destructor callbacks cannot make CUDA API calls and should avoid blocking behavior, as they + * are executed by a shared internal thread. Another thread may be signaled to perform such + * actions, if it does not block forward progress of tasks scheduled through CUDA. + * + * See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. + * + * \param object_out - Location to return the user object handle + * \param ptr - The pointer to pass to the destroy function + * \param destroy - Callback to free the user object when it is no longer in use + * \param initialRefcount - The initial refcount to create the object with, typically 1. The + * initial references are owned by the calling thread. + * \param flags - Currently it is required to pass ::CU_USER_OBJECT_NO_DESTRUCTOR_SYNC, + * which is the only defined flag. This indicates that the destroy + * callback cannot be waited on by any CUDA API. Users requiring + * synchronization of the callback should signal its completion + * manually. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuUserObjectRetain, + * ::cuUserObjectRelease, + * ::cuGraphRetainUserObject, + * ::cuGraphReleaseUserObject, + * ::cuGraphCreate + */ +CUresult CUDAAPI cuUserObjectCreate(CUuserObject *object_out, void *ptr, CUhostFn destroy, + unsigned int initialRefcount, unsigned int flags); + +/** + * \brief Retain a reference to a user object + * + * Retains new references to a user object. The new references are owned by the caller. + * + * See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. + * + * \param object - The object to retain + * \param count - The number of references to retain, typically 1. Must be nonzero + * and not larger than INT_MAX. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuUserObjectCreate, + * ::cuUserObjectRelease, + * ::cuGraphRetainUserObject, + * ::cuGraphReleaseUserObject, + * ::cuGraphCreate + */ +CUresult CUDAAPI cuUserObjectRetain(CUuserObject object, unsigned int count); + +/** + * \brief Release a reference to a user object + * + * Releases user object references owned by the caller. The object's destructor is invoked if + * the reference count reaches zero. + * + * It is undefined behavior to release references not owned by the caller, or to use a user + * object handle after all references are released. + * + * See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. + * + * \param object - The object to release + * \param count - The number of references to release, typically 1. Must be nonzero + * and not larger than INT_MAX. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuUserObjectCreate, + * ::cuUserObjectRetain, + * ::cuGraphRetainUserObject, + * ::cuGraphReleaseUserObject, + * ::cuGraphCreate + */ +CUresult CUDAAPI cuUserObjectRelease(CUuserObject object, unsigned int count); + +/** + * \brief Retain a reference to a user object from a graph + * + * Creates or moves user object references that will be owned by a CUDA graph. + * + * See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. + * + * \param graph - The graph to associate the reference with + * \param object - The user object to retain a reference for + * \param count - The number of references to add to the graph, typically 1. Must be + * nonzero and not larger than INT_MAX. + * \param flags - The optional flag ::CU_GRAPH_USER_OBJECT_MOVE transfers references + * from the calling thread, rather than create new references. Pass 0 + * to create new references. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuUserObjectCreate, + * ::cuUserObjectRetain, + * ::cuUserObjectRelease, + * ::cuGraphReleaseUserObject, + * ::cuGraphCreate + */ +CUresult CUDAAPI cuGraphRetainUserObject(CUgraph graph, CUuserObject object, unsigned int count, unsigned int flags); + +/** + * \brief Release a user object reference from a graph + * + * Releases user object references owned by a graph. + * + * See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. + * + * \param graph - The graph that will release the reference + * \param object - The user object to release a reference for + * \param count - The number of references to release, typically 1. Must be nonzero + * and not larger than INT_MAX. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuUserObjectCreate, + * ::cuUserObjectRetain, + * ::cuUserObjectRelease, + * ::cuGraphRetainUserObject, + * ::cuGraphCreate + */ +CUresult CUDAAPI cuGraphReleaseUserObject(CUgraph graph, CUuserObject object, unsigned int count); + +/** @} */ /* END CUDA_GRAPH */ + +/** + * \defgroup CUDA_OCCUPANCY Occupancy + * + * ___MANBRIEF___ occupancy calculation functions of the low-level CUDA driver + * API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the occupancy calculation functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** + * \brief Returns occupancy of a function + * + * Returns in \p *numBlocks the number of the maximum active blocks per + * streaming multiprocessor. + * + * \param numBlocks - Returned occupancy + * \param func - Kernel for which occupancy is calculated + * \param blockSize - Block size the kernel is intended to be launched with + * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa + * ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + */ +CUresult CUDAAPI cuOccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, CUfunction func, int blockSize, size_t dynamicSMemSize); + +/** + * \brief Returns occupancy of a function + * + * Returns in \p *numBlocks the number of the maximum active blocks per + * streaming multiprocessor. + * + * The \p Flags parameter controls how special cases are handled. The + * valid flags are: + * + * - ::CU_OCCUPANCY_DEFAULT, which maintains the default behavior as + * ::cuOccupancyMaxActiveBlocksPerMultiprocessor; + * + * - ::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE, which suppresses the + * default behavior on platform where global caching affects + * occupancy. On such platforms, if caching is enabled, but + * per-block SM resource usage would result in zero occupancy, the + * occupancy calculator will calculate the occupancy as if caching + * is disabled. Setting ::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE makes + * the occupancy calculator to return 0 in such cases. More information + * can be found about this feature in the "Unified L1/Texture Cache" + * section of the Maxwell tuning guide. + * + * \param numBlocks - Returned occupancy + * \param func - Kernel for which occupancy is calculated + * \param blockSize - Block size the kernel is intended to be launched with + * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes + * \param flags - Requested behavior for the occupancy calculator + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa + * ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags + */ +CUresult CUDAAPI cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, CUfunction func, int blockSize, size_t dynamicSMemSize, unsigned int flags); + +/** + * \brief Suggest a launch configuration with reasonable occupancy + * + * Returns in \p *blockSize a reasonable block size that can achieve + * the maximum occupancy (or, the maximum number of active warps with + * the fewest blocks per multiprocessor), and in \p *minGridSize the + * minimum grid size to achieve the maximum occupancy. + * + * If \p blockSizeLimit is 0, the configurator will use the maximum + * block size permitted by the device / function instead. + * + * If per-block dynamic shared memory allocation is not needed, the + * user should leave both \p blockSizeToDynamicSMemSize and \p + * dynamicSMemSize as 0. + * + * If per-block dynamic shared memory allocation is needed, then if + * the dynamic shared memory size is constant regardless of block + * size, the size should be passed through \p dynamicSMemSize, and \p + * blockSizeToDynamicSMemSize should be NULL. + * + * Otherwise, if the per-block dynamic shared memory size varies with + * different block sizes, the user needs to provide a unary function + * through \p blockSizeToDynamicSMemSize that computes the dynamic + * shared memory needed by \p func for any given block size. \p + * dynamicSMemSize is ignored. An example signature is: + * + * \code + * // Take block size, returns dynamic shared memory needed + * size_t blockToSmem(int blockSize); + * \endcode + * + * \param minGridSize - Returned minimum grid size needed to achieve the maximum occupancy + * \param blockSize - Returned maximum block size that can achieve the maximum occupancy + * \param func - Kernel for which launch configuration is calculated + * \param blockSizeToDynamicSMemSize - A function that calculates how much per-block dynamic shared memory \p func uses based on the block size + * \param dynamicSMemSize - Dynamic shared memory usage intended, in bytes + * \param blockSizeLimit - The maximum block size \p func is designed to handle + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa + * ::cudaOccupancyMaxPotentialBlockSize + */ +CUresult CUDAAPI cuOccupancyMaxPotentialBlockSize(int *minGridSize, int *blockSize, CUfunction func, CUoccupancyB2DSize blockSizeToDynamicSMemSize, size_t dynamicSMemSize, int blockSizeLimit); + +/** + * \brief Suggest a launch configuration with reasonable occupancy + * + * An extended version of ::cuOccupancyMaxPotentialBlockSize. In + * addition to arguments passed to ::cuOccupancyMaxPotentialBlockSize, + * ::cuOccupancyMaxPotentialBlockSizeWithFlags also takes a \p Flags + * parameter. + * + * The \p Flags parameter controls how special cases are handled. The + * valid flags are: + * + * - ::CU_OCCUPANCY_DEFAULT, which maintains the default behavior as + * ::cuOccupancyMaxPotentialBlockSize; + * + * - ::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE, which suppresses the + * default behavior on platform where global caching affects + * occupancy. On such platforms, the launch configurations that + * produces maximal occupancy might not support global + * caching. Setting ::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE + * guarantees that the the produced launch configuration is global + * caching compatible at a potential cost of occupancy. More information + * can be found about this feature in the "Unified L1/Texture Cache" + * section of the Maxwell tuning guide. + * + * \param minGridSize - Returned minimum grid size needed to achieve the maximum occupancy + * \param blockSize - Returned maximum block size that can achieve the maximum occupancy + * \param func - Kernel for which launch configuration is calculated + * \param blockSizeToDynamicSMemSize - A function that calculates how much per-block dynamic shared memory \p func uses based on the block size + * \param dynamicSMemSize - Dynamic shared memory usage intended, in bytes + * \param blockSizeLimit - The maximum block size \p func is designed to handle + * \param flags - Options + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa + * ::cudaOccupancyMaxPotentialBlockSizeWithFlags + */ +CUresult CUDAAPI cuOccupancyMaxPotentialBlockSizeWithFlags(int *minGridSize, int *blockSize, CUfunction func, CUoccupancyB2DSize blockSizeToDynamicSMemSize, size_t dynamicSMemSize, int blockSizeLimit, unsigned int flags); + +/** + * \brief Returns dynamic shared memory available per block when launching \p numBlocks blocks on SM + * + * Returns in \p *dynamicSmemSize the maximum size of dynamic shared memory to allow \p numBlocks blocks per SM. + * + * \param dynamicSmemSize - Returned maximum dynamic shared memory + * \param func - Kernel function for which occupancy is calculated + * \param numBlocks - Number of blocks to fit on SM + * \param blockSize - Size of the blocks + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + */ +CUresult CUDAAPI cuOccupancyAvailableDynamicSMemPerBlock(size_t *dynamicSmemSize, CUfunction func, int numBlocks, int blockSize); + +/** + * \brief Given the kernel function (\p func) and launch configuration + * (\p config), return the maximum cluster size in \p *clusterSize. + * + * The cluster dimensions in \p config are ignored. If func has a required + * cluster size set (see ::cudaFuncGetAttributes / ::cuFuncGetAttribute),\p + * *clusterSize will reflect the required cluster size. + * + * By default this function will always return a value that's portable on + * future hardware. A higher value may be returned if the kernel function + * allows non-portable cluster sizes. + * + * This function will respect the compile time launch bounds. + * + * \param clusterSize - Returned maximum cluster size that can be launched + * for the given kernel function and launch configuration + * \param func - Kernel function for which maximum cluster + * size is calculated + * \param config - Launch configuration for the given kernel function + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa + * ::cudaFuncGetAttributes, + * ::cuFuncGetAttribute + */ +CUresult CUDAAPI cuOccupancyMaxPotentialClusterSize(int *clusterSize, CUfunction func, const CUlaunchConfig *config); + +/** + * \brief Given the kernel function (\p func) and launch configuration + * (\p config), return the maximum number of clusters that could co-exist + * on the target device in \p *numClusters. + * + * If the function has required cluster size already set (see + * ::cudaFuncGetAttributes / ::cuFuncGetAttribute), the cluster size + * from config must either be unspecified or match the required size. + * Without required sizes, the cluster size must be specified in config, + * else the function will return an error. + * + * Note that various attributes of the kernel function may affect occupancy + * calculation. Runtime environment may affect how the hardware schedules + * the clusters, so the calculated occupancy is not guaranteed to be achievable. + * + * \param numClusters - Returned maximum number of clusters that + * could co-exist on the target device + * \param func - Kernel function for which maximum number + * of clusters are calculated + * \param config - Launch configuration for the given kernel function + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_CLUSTER_SIZE, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa + * ::cudaFuncGetAttributes, + * ::cuFuncGetAttribute + */ +CUresult CUDAAPI cuOccupancyMaxActiveClusters(int *numClusters, CUfunction func, const CUlaunchConfig *config); +/** @} */ /* END CUDA_OCCUPANCY */ + +/** + * \defgroup CUDA_TEXREF_DEPRECATED Texture Reference Management [DEPRECATED] + * + * ___MANBRIEF___ deprecated texture reference management functions of the + * low-level CUDA driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the deprecated texture reference management + * functions of the low-level CUDA driver application programming interface. + * + * @{ + */ + +/** + * \brief Binds an array as a texture reference + * + * \deprecated + * + * Binds the CUDA array \p hArray to the texture reference \p hTexRef. Any + * previous address or CUDA array state associated with the texture reference + * is superseded by this function. \p Flags must be set to + * ::CU_TRSA_OVERRIDE_FORMAT. Any CUDA array previously bound to \p hTexRef is + * unbound. + * + * \param hTexRef - Texture reference to bind + * \param hArray - Array to bind + * \param Flags - Options (must be ::CU_TRSA_OVERRIDE_FORMAT) + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetArray(CUtexref hTexRef, CUarray hArray, unsigned int Flags); + +/** + * \brief Binds a mipmapped array to a texture reference + * + * \deprecated + * + * Binds the CUDA mipmapped array \p hMipmappedArray to the texture reference \p hTexRef. + * Any previous address or CUDA array state associated with the texture reference + * is superseded by this function. \p Flags must be set to ::CU_TRSA_OVERRIDE_FORMAT. + * Any CUDA array previously bound to \p hTexRef is unbound. + * + * \param hTexRef - Texture reference to bind + * \param hMipmappedArray - Mipmapped array to bind + * \param Flags - Options (must be ::CU_TRSA_OVERRIDE_FORMAT) + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetMipmappedArray(CUtexref hTexRef, CUmipmappedArray hMipmappedArray, unsigned int Flags); + +/** + * \brief Binds an address as a texture reference + * + * \deprecated + * + * Binds a linear address range to the texture reference \p hTexRef. Any + * previous address or CUDA array state associated with the texture reference + * is superseded by this function. Any memory previously bound to \p hTexRef + * is unbound. + * + * Since the hardware enforces an alignment requirement on texture base + * addresses, ::cuTexRefSetAddress() passes back a byte offset in + * \p *ByteOffset that must be applied to texture fetches in order to read from + * the desired memory. This offset must be divided by the texel size and + * passed to kernels that read from the texture so they can be applied to the + * ::tex1Dfetch() function. + * + * If the device memory pointer was returned from ::cuMemAlloc(), the offset + * is guaranteed to be 0 and NULL may be passed as the \p ByteOffset parameter. + * + * The total number of elements (or texels) in the linear address range + * cannot exceed ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH. + * The number of elements is computed as (\p bytes / bytesPerElement), + * where bytesPerElement is determined from the data format and number of + * components set using ::cuTexRefSetFormat(). + * + * \param ByteOffset - Returned byte offset + * \param hTexRef - Texture reference to bind + * \param dptr - Device pointer to bind + * \param bytes - Size of memory to bind in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetAddress(size_t *ByteOffset, CUtexref hTexRef, CUdeviceptr dptr, size_t bytes); + +/** + * \brief Binds an address as a 2D texture reference + * + * \deprecated + * + * Binds a linear address range to the texture reference \p hTexRef. Any + * previous address or CUDA array state associated with the texture reference + * is superseded by this function. Any memory previously bound to \p hTexRef + * is unbound. + * + * Using a ::tex2D() function inside a kernel requires a call to either + * ::cuTexRefSetArray() to bind the corresponding texture reference to an + * array, or ::cuTexRefSetAddress2D() to bind the texture reference to linear + * memory. + * + * Function calls to ::cuTexRefSetFormat() cannot follow calls to + * ::cuTexRefSetAddress2D() for the same texture reference. + * + * It is required that \p dptr be aligned to the appropriate hardware-specific + * texture alignment. You can query this value using the device attribute + * ::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT. If an unaligned \p dptr is + * supplied, ::CUDA_ERROR_INVALID_VALUE is returned. + * + * \p Pitch has to be aligned to the hardware-specific texture pitch alignment. + * This value can be queried using the device attribute + * ::CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT. If an unaligned \p Pitch is + * supplied, ::CUDA_ERROR_INVALID_VALUE is returned. + * + * Width and Height, which are specified in elements (or texels), cannot exceed + * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH and + * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT respectively. + * \p Pitch, which is specified in bytes, cannot exceed + * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH. + * + * \param hTexRef - Texture reference to bind + * \param desc - Descriptor of CUDA array + * \param dptr - Device pointer to bind + * \param Pitch - Line pitch in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddress, + * ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetAddress2D(CUtexref hTexRef, const CUDA_ARRAY_DESCRIPTOR *desc, CUdeviceptr dptr, size_t Pitch); + +/** + * \brief Sets the format for a texture reference + * + * \deprecated + * + * Specifies the format of the data to be read by the texture reference + * \p hTexRef. \p fmt and \p NumPackedComponents are exactly analogous to the + * ::Format and ::NumChannels members of the ::CUDA_ARRAY_DESCRIPTOR structure: + * They specify the format of each component and the number of components per + * array element. + * + * \param hTexRef - Texture reference + * \param fmt - Format to set + * \param NumPackedComponents - Number of components per array element + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat, + * ::cudaCreateChannelDesc + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetFormat(CUtexref hTexRef, CUarray_format fmt, int NumPackedComponents); + +/** + * \brief Sets the addressing mode for a texture reference + * + * \deprecated + * + * Specifies the addressing mode \p am for the given dimension \p dim of the + * texture reference \p hTexRef. If \p dim is zero, the addressing mode is + * applied to the first parameter of the functions used to fetch from the + * texture; if \p dim is 1, the second, and so on. ::CUaddress_mode is defined + * as: + * \code + typedef enum CUaddress_mode_enum { + CU_TR_ADDRESS_MODE_WRAP = 0, + CU_TR_ADDRESS_MODE_CLAMP = 1, + CU_TR_ADDRESS_MODE_MIRROR = 2, + CU_TR_ADDRESS_MODE_BORDER = 3 + } CUaddress_mode; + * \endcode + * + * Note that this call has no effect if \p hTexRef is bound to linear memory. + * Also, if the flag, ::CU_TRSF_NORMALIZED_COORDINATES, is not set, the only + * supported address mode is ::CU_TR_ADDRESS_MODE_CLAMP. + * + * \param hTexRef - Texture reference + * \param dim - Dimension + * \param am - Addressing mode to set + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetArray, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetAddressMode(CUtexref hTexRef, int dim, CUaddress_mode am); + +/** + * \brief Sets the filtering mode for a texture reference + * + * \deprecated + * + * Specifies the filtering mode \p fm to be used when reading memory through + * the texture reference \p hTexRef. ::CUfilter_mode_enum is defined as: + * + * \code + typedef enum CUfilter_mode_enum { + CU_TR_FILTER_MODE_POINT = 0, + CU_TR_FILTER_MODE_LINEAR = 1 + } CUfilter_mode; + * \endcode + * + * Note that this call has no effect if \p hTexRef is bound to linear memory. + * + * \param hTexRef - Texture reference + * \param fm - Filtering mode to set + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetFilterMode(CUtexref hTexRef, CUfilter_mode fm); + +/** + * \brief Sets the mipmap filtering mode for a texture reference + * + * \deprecated + * + * Specifies the mipmap filtering mode \p fm to be used when reading memory through + * the texture reference \p hTexRef. ::CUfilter_mode_enum is defined as: + * + * \code + typedef enum CUfilter_mode_enum { + CU_TR_FILTER_MODE_POINT = 0, + CU_TR_FILTER_MODE_LINEAR = 1 + } CUfilter_mode; + * \endcode + * + * Note that this call has no effect if \p hTexRef is not bound to a mipmapped array. + * + * \param hTexRef - Texture reference + * \param fm - Filtering mode to set + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetMipmapFilterMode(CUtexref hTexRef, CUfilter_mode fm); + +/** + * \brief Sets the mipmap level bias for a texture reference + * + * \deprecated + * + * Specifies the mipmap level bias \p bias to be added to the specified mipmap level when + * reading memory through the texture reference \p hTexRef. + * + * Note that this call has no effect if \p hTexRef is not bound to a mipmapped array. + * + * \param hTexRef - Texture reference + * \param bias - Mipmap level bias + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetMipmapLevelBias(CUtexref hTexRef, float bias); + +/** + * \brief Sets the mipmap min/max mipmap level clamps for a texture reference + * + * \deprecated + * + * Specifies the min/max mipmap level clamps, \p minMipmapLevelClamp and \p maxMipmapLevelClamp + * respectively, to be used when reading memory through the texture reference + * \p hTexRef. + * + * Note that this call has no effect if \p hTexRef is not bound to a mipmapped array. + * + * \param hTexRef - Texture reference + * \param minMipmapLevelClamp - Mipmap min level clamp + * \param maxMipmapLevelClamp - Mipmap max level clamp + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetMipmapLevelClamp(CUtexref hTexRef, float minMipmapLevelClamp, float maxMipmapLevelClamp); + +/** + * \brief Sets the maximum anisotropy for a texture reference + * + * \deprecated + * + * Specifies the maximum anisotropy \p maxAniso to be used when reading memory through + * the texture reference \p hTexRef. + * + * Note that this call has no effect if \p hTexRef is bound to linear memory. + * + * \param hTexRef - Texture reference + * \param maxAniso - Maximum anisotropy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetMaxAnisotropy(CUtexref hTexRef, unsigned int maxAniso); + +/** + * \brief Sets the border color for a texture reference + * + * \deprecated + * + * Specifies the value of the RGBA color via the \p pBorderColor to the texture reference + * \p hTexRef. The color value supports only float type and holds color components in + * the following sequence: + * pBorderColor[0] holds 'R' component + * pBorderColor[1] holds 'G' component + * pBorderColor[2] holds 'B' component + * pBorderColor[3] holds 'A' component + * + * Note that the color values can be set only when the Address mode is set to + * CU_TR_ADDRESS_MODE_BORDER using ::cuTexRefSetAddressMode. + * Applications using integer border color values have to "reinterpret_cast" their values to float. + * + * \param hTexRef - Texture reference + * \param pBorderColor - RGBA color + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddressMode, + * ::cuTexRefGetAddressMode, ::cuTexRefGetBorderColor + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetBorderColor(CUtexref hTexRef, float *pBorderColor); + +/** + * \brief Sets the flags for a texture reference + * + * \deprecated + * + * Specifies optional flags via \p Flags to specify the behavior of data + * returned through the texture reference \p hTexRef. The valid flags are: + * + * - ::CU_TRSF_READ_AS_INTEGER, which suppresses the default behavior of + * having the texture promote integer data to floating point data in the + * range [0, 1]. Note that texture with 32-bit integer format + * would not be promoted, regardless of whether or not this + * flag is specified; + * - ::CU_TRSF_NORMALIZED_COORDINATES, which suppresses the + * default behavior of having the texture coordinates range + * from [0, Dim) where Dim is the width or height of the CUDA + * array. Instead, the texture coordinates [0, 1.0) reference + * the entire breadth of the array dimension; + * - ::CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION, which disables any trilinear + * filtering optimizations. Trilinear optimizations improve texture filtering + * performance by allowing bilinear filtering on textures in scenarios where + * it can closely approximate the expected results. + * + * \param hTexRef - Texture reference + * \param Flags - Optional flags to set + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetFlags(CUtexref hTexRef, unsigned int Flags); + +/** + * \brief Gets the address associated with a texture reference + * + * \deprecated + * + * Returns in \p *pdptr the base address bound to the texture reference + * \p hTexRef, or returns ::CUDA_ERROR_INVALID_VALUE if the texture reference + * is not bound to any device memory range. + * + * \param pdptr - Returned device address + * \param hTexRef - Texture reference + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetAddress(CUdeviceptr *pdptr, CUtexref hTexRef); + +/** + * \brief Gets the array bound to a texture reference + * + * \deprecated + * + * Returns in \p *phArray the CUDA array bound to the texture reference + * \p hTexRef, or returns ::CUDA_ERROR_INVALID_VALUE if the texture reference + * is not bound to any CUDA array. + * + * \param phArray - Returned array + * \param hTexRef - Texture reference + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetArray(CUarray *phArray, CUtexref hTexRef); + +/** + * \brief Gets the mipmapped array bound to a texture reference + * + * \deprecated + * + * Returns in \p *phMipmappedArray the CUDA mipmapped array bound to the texture + * reference \p hTexRef, or returns ::CUDA_ERROR_INVALID_VALUE if the texture reference + * is not bound to any CUDA mipmapped array. + * + * \param phMipmappedArray - Returned mipmapped array + * \param hTexRef - Texture reference + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetMipmappedArray(CUmipmappedArray *phMipmappedArray, CUtexref hTexRef); + +/** + * \brief Gets the addressing mode used by a texture reference + * + * \deprecated + * + * Returns in \p *pam the addressing mode corresponding to the + * dimension \p dim of the texture reference \p hTexRef. Currently, the only + * valid value for \p dim are 0 and 1. + * + * \param pam - Returned addressing mode + * \param hTexRef - Texture reference + * \param dim - Dimension + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetAddressMode(CUaddress_mode *pam, CUtexref hTexRef, int dim); + +/** + * \brief Gets the filter-mode used by a texture reference + * + * \deprecated + * + * Returns in \p *pfm the filtering mode of the texture reference + * \p hTexRef. + * + * \param pfm - Returned filtering mode + * \param hTexRef - Texture reference + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetFilterMode(CUfilter_mode *pfm, CUtexref hTexRef); + +/** + * \brief Gets the format used by a texture reference + * + * \deprecated + * + * Returns in \p *pFormat and \p *pNumChannels the format and number + * of components of the CUDA array bound to the texture reference \p hTexRef. + * If \p pFormat or \p pNumChannels is NULL, it will be ignored. + * + * \param pFormat - Returned format + * \param pNumChannels - Returned number of components + * \param hTexRef - Texture reference + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetFormat(CUarray_format *pFormat, int *pNumChannels, CUtexref hTexRef); + +/** + * \brief Gets the mipmap filtering mode for a texture reference + * + * \deprecated + * + * Returns the mipmap filtering mode in \p pfm that's used when reading memory through + * the texture reference \p hTexRef. + * + * \param pfm - Returned mipmap filtering mode + * \param hTexRef - Texture reference + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetMipmapFilterMode(CUfilter_mode *pfm, CUtexref hTexRef); + +/** + * \brief Gets the mipmap level bias for a texture reference + * + * \deprecated + * + * Returns the mipmap level bias in \p pBias that's added to the specified mipmap + * level when reading memory through the texture reference \p hTexRef. + * + * \param pbias - Returned mipmap level bias + * \param hTexRef - Texture reference + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetMipmapLevelBias(float *pbias, CUtexref hTexRef); + +/** + * \brief Gets the min/max mipmap level clamps for a texture reference + * + * \deprecated + * + * Returns the min/max mipmap level clamps in \p pminMipmapLevelClamp and \p pmaxMipmapLevelClamp + * that's used when reading memory through the texture reference \p hTexRef. + * + * \param pminMipmapLevelClamp - Returned mipmap min level clamp + * \param pmaxMipmapLevelClamp - Returned mipmap max level clamp + * \param hTexRef - Texture reference + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetMipmapLevelClamp(float *pminMipmapLevelClamp, float *pmaxMipmapLevelClamp, CUtexref hTexRef); + +/** + * \brief Gets the maximum anisotropy for a texture reference + * + * \deprecated + * + * Returns the maximum anisotropy in \p pmaxAniso that's used when reading memory through + * the texture reference \p hTexRef. + * + * \param pmaxAniso - Returned maximum anisotropy + * \param hTexRef - Texture reference + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetMaxAnisotropy(int *pmaxAniso, CUtexref hTexRef); + +/** + * \brief Gets the border color used by a texture reference + * + * \deprecated + * + * Returns in \p pBorderColor, values of the RGBA color used by + * the texture reference \p hTexRef. + * The color value is of type float and holds color components in + * the following sequence: + * pBorderColor[0] holds 'R' component + * pBorderColor[1] holds 'G' component + * pBorderColor[2] holds 'B' component + * pBorderColor[3] holds 'A' component + * + * \param hTexRef - Texture reference + * \param pBorderColor - Returned Type and Value of RGBA color + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefSetAddressMode, + * ::cuTexRefSetAddressMode, ::cuTexRefSetBorderColor + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetBorderColor(float *pBorderColor, CUtexref hTexRef); + +/** + * \brief Gets the flags used by a texture reference + * + * \deprecated + * + * Returns in \p *pFlags the flags of the texture reference \p hTexRef. + * + * \param pFlags - Returned flags + * \param hTexRef - Texture reference + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetFlags(unsigned int *pFlags, CUtexref hTexRef); + +/** + * \brief Creates a texture reference + * + * \deprecated + * + * Creates a texture reference and returns its handle in \p *pTexRef. Once + * created, the application must call ::cuTexRefSetArray() or + * ::cuTexRefSetAddress() to associate the reference with allocated memory. + * Other texture reference functions are used to specify the format and + * interpretation (addressing, filtering, etc.) to be used when the memory is + * read through this texture reference. + * + * \param pTexRef - Returned texture reference + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefDestroy + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefCreate(CUtexref *pTexRef); + +/** + * \brief Destroys a texture reference + * + * \deprecated + * + * Destroys the texture reference specified by \p hTexRef. + * + * \param hTexRef - Texture reference to destroy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefCreate + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefDestroy(CUtexref hTexRef); + +/** @} */ /* END CUDA_TEXREF_DEPRECATED */ + + +/** + * \defgroup CUDA_SURFREF_DEPRECATED Surface Reference Management [DEPRECATED] + * + * ___MANBRIEF___ surface reference management functions of the low-level CUDA + * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the surface reference management functions of the + * low-level CUDA driver application programming interface. + * + * @{ + */ + +/** + * \brief Sets the CUDA array for a surface reference. + * + * \deprecated + * + * Sets the CUDA array \p hArray to be read and written by the surface reference + * \p hSurfRef. Any previous CUDA array state associated with the surface + * reference is superseded by this function. \p Flags must be set to 0. + * The ::CUDA_ARRAY3D_SURFACE_LDST flag must have been set for the CUDA array. + * Any CUDA array previously bound to \p hSurfRef is unbound. + + * \param hSurfRef - Surface reference handle + * \param hArray - CUDA array handle + * \param Flags - set to 0 + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuModuleGetSurfRef, + * ::cuSurfRefGetArray + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuSurfRefSetArray(CUsurfref hSurfRef, CUarray hArray, unsigned int Flags); + +/** + * \brief Passes back the CUDA array bound to a surface reference. + * + * \deprecated + * + * Returns in \p *phArray the CUDA array bound to the surface reference + * \p hSurfRef, or returns ::CUDA_ERROR_INVALID_VALUE if the surface reference + * is not bound to any CUDA array. + + * \param phArray - Surface reference handle + * \param hSurfRef - Surface reference handle + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuModuleGetSurfRef, ::cuSurfRefSetArray + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuSurfRefGetArray(CUarray *phArray, CUsurfref hSurfRef); + +/** @} */ /* END CUDA_SURFREF_DEPRECATED */ + +/** + * \defgroup CUDA_TEXOBJECT Texture Object Management + * + * ___MANBRIEF___ texture object management functions of the low-level CUDA + * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the texture object management functions of the + * low-level CUDA driver application programming interface. The texture + * object API is only supported on devices of compute capability 3.0 or higher. + * + * @{ + */ + +/** + * \brief Creates a texture object + * + * Creates a texture object and returns it in \p pTexObject. \p pResDesc describes + * the data to texture from. \p pTexDesc describes how the data should be sampled. + * \p pResViewDesc is an optional argument that specifies an alternate format for + * the data described by \p pResDesc, and also describes the subresource region + * to restrict access to when texturing. \p pResViewDesc can only be specified if + * the type of resource is a CUDA array or a CUDA mipmapped array. + * + * Texture objects are only supported on devices of compute capability 3.0 or higher. + * Additionally, a texture object is an opaque value, and, as such, should only be + * accessed through CUDA API calls. + * + * The ::CUDA_RESOURCE_DESC structure is defined as: + * \code + typedef struct CUDA_RESOURCE_DESC_st + { + CUresourcetype resType; + + union { + struct { + CUarray hArray; + } array; + struct { + CUmipmappedArray hMipmappedArray; + } mipmap; + struct { + CUdeviceptr devPtr; + CUarray_format format; + unsigned int numChannels; + size_t sizeInBytes; + } linear; + struct { + CUdeviceptr devPtr; + CUarray_format format; + unsigned int numChannels; + size_t width; + size_t height; + size_t pitchInBytes; + } pitch2D; + } res; + + unsigned int flags; + } CUDA_RESOURCE_DESC; + + * \endcode + * where: + * - ::CUDA_RESOURCE_DESC::resType specifies the type of resource to texture from. + * CUresourceType is defined as: + * \code + typedef enum CUresourcetype_enum { + CU_RESOURCE_TYPE_ARRAY = 0x00, + CU_RESOURCE_TYPE_MIPMAPPED_ARRAY = 0x01, + CU_RESOURCE_TYPE_LINEAR = 0x02, + CU_RESOURCE_TYPE_PITCH2D = 0x03 + } CUresourcetype; + * \endcode + * + * \par + * If ::CUDA_RESOURCE_DESC::resType is set to ::CU_RESOURCE_TYPE_ARRAY, ::CUDA_RESOURCE_DESC::res::array::hArray + * must be set to a valid CUDA array handle. + * + * \par + * If ::CUDA_RESOURCE_DESC::resType is set to ::CU_RESOURCE_TYPE_MIPMAPPED_ARRAY, ::CUDA_RESOURCE_DESC::res::mipmap::hMipmappedArray + * must be set to a valid CUDA mipmapped array handle. + * + * \par + * If ::CUDA_RESOURCE_DESC::resType is set to ::CU_RESOURCE_TYPE_LINEAR, ::CUDA_RESOURCE_DESC::res::linear::devPtr + * must be set to a valid device pointer, that is aligned to ::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT. + * ::CUDA_RESOURCE_DESC::res::linear::format and ::CUDA_RESOURCE_DESC::res::linear::numChannels + * describe the format of each component and the number of components per array element. ::CUDA_RESOURCE_DESC::res::linear::sizeInBytes + * specifies the size of the array in bytes. The total number of elements in the linear address range cannot exceed + * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH. The number of elements is computed as (sizeInBytes / (sizeof(format) * numChannels)). + * + * \par + * If ::CUDA_RESOURCE_DESC::resType is set to ::CU_RESOURCE_TYPE_PITCH2D, ::CUDA_RESOURCE_DESC::res::pitch2D::devPtr + * must be set to a valid device pointer, that is aligned to ::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT. + * ::CUDA_RESOURCE_DESC::res::pitch2D::format and ::CUDA_RESOURCE_DESC::res::pitch2D::numChannels + * describe the format of each component and the number of components per array element. ::CUDA_RESOURCE_DESC::res::pitch2D::width + * and ::CUDA_RESOURCE_DESC::res::pitch2D::height specify the width and height of the array in elements, and cannot exceed + * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH and ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT respectively. + * ::CUDA_RESOURCE_DESC::res::pitch2D::pitchInBytes specifies the pitch between two rows in bytes and has to be aligned to + * ::CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT. Pitch cannot exceed ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH. + * + * - ::flags must be set to zero. + * + * + * The ::CUDA_TEXTURE_DESC struct is defined as + * \code + typedef struct CUDA_TEXTURE_DESC_st { + CUaddress_mode addressMode[3]; + CUfilter_mode filterMode; + unsigned int flags; + unsigned int maxAnisotropy; + CUfilter_mode mipmapFilterMode; + float mipmapLevelBias; + float minMipmapLevelClamp; + float maxMipmapLevelClamp; + } CUDA_TEXTURE_DESC; + * \endcode + * where + * - ::CUDA_TEXTURE_DESC::addressMode specifies the addressing mode for each dimension of the texture data. ::CUaddress_mode is defined as: + * \code + typedef enum CUaddress_mode_enum { + CU_TR_ADDRESS_MODE_WRAP = 0, + CU_TR_ADDRESS_MODE_CLAMP = 1, + CU_TR_ADDRESS_MODE_MIRROR = 2, + CU_TR_ADDRESS_MODE_BORDER = 3 + } CUaddress_mode; + * \endcode + * This is ignored if ::CUDA_RESOURCE_DESC::resType is ::CU_RESOURCE_TYPE_LINEAR. Also, if the flag, ::CU_TRSF_NORMALIZED_COORDINATES + * is not set, the only supported address mode is ::CU_TR_ADDRESS_MODE_CLAMP. + * + * - ::CUDA_TEXTURE_DESC::filterMode specifies the filtering mode to be used when fetching from the texture. CUfilter_mode is defined as: + * \code + typedef enum CUfilter_mode_enum { + CU_TR_FILTER_MODE_POINT = 0, + CU_TR_FILTER_MODE_LINEAR = 1 + } CUfilter_mode; + * \endcode + * This is ignored if ::CUDA_RESOURCE_DESC::resType is ::CU_RESOURCE_TYPE_LINEAR. + * + * - ::CUDA_TEXTURE_DESC::flags can be any combination of the following: + * - ::CU_TRSF_READ_AS_INTEGER, which suppresses the default behavior of + * having the texture promote integer data to floating point data in the + * range [0, 1]. Note that texture with 32-bit integer format would not be + * promoted, regardless of whether or not this flag is specified. + * - ::CU_TRSF_NORMALIZED_COORDINATES, which suppresses the default behavior + * of having the texture coordinates range from [0, Dim) where Dim is the + * width or height of the CUDA array. Instead, the texture coordinates + * [0, 1.0) reference the entire breadth of the array dimension; Note that + * for CUDA mipmapped arrays, this flag has to be set. + * - ::CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION, which disables any trilinear + * filtering optimizations. Trilinear optimizations improve texture filtering + * performance by allowing bilinear filtering on textures in scenarios where + * it can closely approximate the expected results. + * - ::CU_TRSF_SEAMLESS_CUBEMAP, which enables seamless cube map filtering. + * This flag can only be specified if the underlying resource is a CUDA array + * or a CUDA mipmapped array that was created with the flag ::CUDA_ARRAY3D_CUBEMAP. + * When seamless cube map filtering is enabled, texture address modes specified + * by ::CUDA_TEXTURE_DESC::addressMode are ignored. Instead, if the ::CUDA_TEXTURE_DESC::filterMode + * is set to ::CU_TR_FILTER_MODE_POINT the address mode ::CU_TR_ADDRESS_MODE_CLAMP + * will be applied for all dimensions. If the ::CUDA_TEXTURE_DESC::filterMode is + * set to ::CU_TR_FILTER_MODE_LINEAR seamless cube map filtering will be performed + * when sampling along the cube face borders. + * + * - ::CUDA_TEXTURE_DESC::maxAnisotropy specifies the maximum anisotropy ratio to be used when doing anisotropic filtering. This value will be + * clamped to the range [1,16]. + * + * - ::CUDA_TEXTURE_DESC::mipmapFilterMode specifies the filter mode when the calculated mipmap level lies between two defined mipmap levels. + * + * - ::CUDA_TEXTURE_DESC::mipmapLevelBias specifies the offset to be applied to the calculated mipmap level. + * + * - ::CUDA_TEXTURE_DESC::minMipmapLevelClamp specifies the lower end of the mipmap level range to clamp access to. + * + * - ::CUDA_TEXTURE_DESC::maxMipmapLevelClamp specifies the upper end of the mipmap level range to clamp access to. + * + * + * The ::CUDA_RESOURCE_VIEW_DESC struct is defined as + * \code + typedef struct CUDA_RESOURCE_VIEW_DESC_st + { + CUresourceViewFormat format; + size_t width; + size_t height; + size_t depth; + unsigned int firstMipmapLevel; + unsigned int lastMipmapLevel; + unsigned int firstLayer; + unsigned int lastLayer; + } CUDA_RESOURCE_VIEW_DESC; + * \endcode + * where: + * - ::CUDA_RESOURCE_VIEW_DESC::format specifies how the data contained in the CUDA array or CUDA mipmapped array should + * be interpreted. Note that this can incur a change in size of the texture data. If the resource view format is a block + * compressed format, then the underlying CUDA array or CUDA mipmapped array has to have a base of format ::CU_AD_FORMAT_UNSIGNED_INT32. + * with 2 or 4 channels, depending on the block compressed format. For ex., BC1 and BC4 require the underlying CUDA array to have + * a format of ::CU_AD_FORMAT_UNSIGNED_INT32 with 2 channels. The other BC formats require the underlying resource to have the same base + * format but with 4 channels. + * + * - ::CUDA_RESOURCE_VIEW_DESC::width specifies the new width of the texture data. If the resource view format is a block + * compressed format, this value has to be 4 times the original width of the resource. For non block compressed formats, + * this value has to be equal to that of the original resource. + * + * - ::CUDA_RESOURCE_VIEW_DESC::height specifies the new height of the texture data. If the resource view format is a block + * compressed format, this value has to be 4 times the original height of the resource. For non block compressed formats, + * this value has to be equal to that of the original resource. + * + * - ::CUDA_RESOURCE_VIEW_DESC::depth specifies the new depth of the texture data. This value has to be equal to that of the + * original resource. + * + * - ::CUDA_RESOURCE_VIEW_DESC::firstMipmapLevel specifies the most detailed mipmap level. This will be the new mipmap level zero. + * For non-mipmapped resources, this value has to be zero.::CUDA_TEXTURE_DESC::minMipmapLevelClamp and ::CUDA_TEXTURE_DESC::maxMipmapLevelClamp + * will be relative to this value. For ex., if the firstMipmapLevel is set to 2, and a minMipmapLevelClamp of 1.2 is specified, + * then the actual minimum mipmap level clamp will be 3.2. + * + * - ::CUDA_RESOURCE_VIEW_DESC::lastMipmapLevel specifies the least detailed mipmap level. For non-mipmapped resources, this value + * has to be zero. + * + * - ::CUDA_RESOURCE_VIEW_DESC::firstLayer specifies the first layer index for layered textures. This will be the new layer zero. + * For non-layered resources, this value has to be zero. + * + * - ::CUDA_RESOURCE_VIEW_DESC::lastLayer specifies the last layer index for layered textures. For non-layered resources, + * this value has to be zero. + * + * + * \param pTexObject - Texture object to create + * \param pResDesc - Resource descriptor + * \param pTexDesc - Texture descriptor + * \param pResViewDesc - Resource view descriptor + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexObjectDestroy, + * ::cudaCreateTextureObject + */ +CUresult CUDAAPI cuTexObjectCreate(CUtexObject *pTexObject, const CUDA_RESOURCE_DESC *pResDesc, const CUDA_TEXTURE_DESC *pTexDesc, const CUDA_RESOURCE_VIEW_DESC *pResViewDesc); + +/** + * \brief Destroys a texture object + * + * Destroys the texture object specified by \p texObject. + * + * \param texObject - Texture object to destroy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexObjectCreate, + * ::cudaDestroyTextureObject + */ +CUresult CUDAAPI cuTexObjectDestroy(CUtexObject texObject); + +/** + * \brief Returns a texture object's resource descriptor + * + * Returns the resource descriptor for the texture object specified by \p texObject. + * + * \param pResDesc - Resource descriptor + * \param texObject - Texture object + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexObjectCreate, + * ::cudaGetTextureObjectResourceDesc, + */ +CUresult CUDAAPI cuTexObjectGetResourceDesc(CUDA_RESOURCE_DESC *pResDesc, CUtexObject texObject); + +/** + * \brief Returns a texture object's texture descriptor + * + * Returns the texture descriptor for the texture object specified by \p texObject. + * + * \param pTexDesc - Texture descriptor + * \param texObject - Texture object + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexObjectCreate, + * ::cudaGetTextureObjectTextureDesc + */ +CUresult CUDAAPI cuTexObjectGetTextureDesc(CUDA_TEXTURE_DESC *pTexDesc, CUtexObject texObject); + +/** + * \brief Returns a texture object's resource view descriptor + * + * Returns the resource view descriptor for the texture object specified by \p texObject. + * If no resource view was set for \p texObject, the ::CUDA_ERROR_INVALID_VALUE is returned. + * + * \param pResViewDesc - Resource view descriptor + * \param texObject - Texture object + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexObjectCreate, + * ::cudaGetTextureObjectResourceViewDesc + */ +CUresult CUDAAPI cuTexObjectGetResourceViewDesc(CUDA_RESOURCE_VIEW_DESC *pResViewDesc, CUtexObject texObject); + +/** @} */ /* END CUDA_TEXOBJECT */ + +/** + * \defgroup CUDA_SURFOBJECT Surface Object Management + * + * ___MANBRIEF___ surface object management functions of the low-level CUDA + * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the surface object management functions of the + * low-level CUDA driver application programming interface. The surface + * object API is only supported on devices of compute capability 3.0 or higher. + * + * @{ + */ + +/** + * \brief Creates a surface object + * + * Creates a surface object and returns it in \p pSurfObject. \p pResDesc describes + * the data to perform surface load/stores on. ::CUDA_RESOURCE_DESC::resType must be + * ::CU_RESOURCE_TYPE_ARRAY and ::CUDA_RESOURCE_DESC::res::array::hArray + * must be set to a valid CUDA array handle. ::CUDA_RESOURCE_DESC::flags must be set to zero. + * + * Surface objects are only supported on devices of compute capability 3.0 or higher. + * Additionally, a surface object is an opaque value, and, as such, should only be + * accessed through CUDA API calls. + * + * \param pSurfObject - Surface object to create + * \param pResDesc - Resource descriptor + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuSurfObjectDestroy, + * ::cudaCreateSurfaceObject + */ +CUresult CUDAAPI cuSurfObjectCreate(CUsurfObject *pSurfObject, const CUDA_RESOURCE_DESC *pResDesc); + +/** + * \brief Destroys a surface object + * + * Destroys the surface object specified by \p surfObject. + * + * \param surfObject - Surface object to destroy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuSurfObjectCreate, + * ::cudaDestroySurfaceObject + */ +CUresult CUDAAPI cuSurfObjectDestroy(CUsurfObject surfObject); + +/** + * \brief Returns a surface object's resource descriptor + * + * Returns the resource descriptor for the surface object specified by \p surfObject. + * + * \param pResDesc - Resource descriptor + * \param surfObject - Surface object + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuSurfObjectCreate, + * ::cudaGetSurfaceObjectResourceDesc + */ +CUresult CUDAAPI cuSurfObjectGetResourceDesc(CUDA_RESOURCE_DESC *pResDesc, CUsurfObject surfObject); + +/** @} */ /* END CUDA_SURFOBJECT */ + +/** + * \defgroup CUDA_TENSOR_MEMORY Tensor Map Object Managment + * + * ___MANBRIEF___ tensor map object management functions of the low-level CUDA + * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the tensor map object management functions of the + * low-level CUDA driver application programming interface. The tensor + * core API is only supported on devices of compute capability 9.0 or higher. + * + * @{ + */ + +/** + * \brief Create a tensor map descriptor object representing tiled memory region + * + * Creates a descriptor for Tensor Memory Access (TMA) object specified + * by the parameters describing a tiled region and returns it in \p tensorMap. + * + * Tensor map objects are only supported on devices of compute capability 9.0 or higher. + * Additionally, a tensor map object is an opaque value, and, as such, should only be + * accessed through CUDA API calls. + * + * The parameters passed are bound to the following requirements: + * + * - \p tensorMap address must be aligned to 64 bytes. + * + * - \p tensorDataType has to be an enum from ::CUtensorMapDataType which is defined as: + * \code + typedef enum CUtensorMapDataType_enum { + CU_TENSOR_MAP_DATA_TYPE_UINT8 = 0, // 1 byte + CU_TENSOR_MAP_DATA_TYPE_UINT16, // 2 bytes + CU_TENSOR_MAP_DATA_TYPE_UINT32, // 4 bytes + CU_TENSOR_MAP_DATA_TYPE_INT32, // 4 bytes + CU_TENSOR_MAP_DATA_TYPE_UINT64, // 8 bytes + CU_TENSOR_MAP_DATA_TYPE_INT64, // 8 bytes + CU_TENSOR_MAP_DATA_TYPE_FLOAT16, // 2 bytes + CU_TENSOR_MAP_DATA_TYPE_FLOAT32, // 4 bytes + CU_TENSOR_MAP_DATA_TYPE_FLOAT64, // 8 bytes + CU_TENSOR_MAP_DATA_TYPE_BFLOAT16, // 2 bytes + CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ, // 4 bytes + CU_TENSOR_MAP_DATA_TYPE_TFLOAT32, // 4 bytes + CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ // 4 bytes + } CUtensorMapDataType; + * \endcode + * + * - \p tensorRank must be non-zero and less than or equal to the maximum supported dimensionality of 5. If \p interleave is not + * ::CU_TENSOR_MAP_INTERLEAVE_NONE, then \p tensorRank must additionally be greater than or equal to 3. + * + * - \p globalAddress, which specifies the starting address of the memory region described, must be 32 byte aligned when \p interleave is + * ::CU_TENSOR_MAP_INTERLEAVE_32B and 16 byte aligned otherwise. + * + * - \p globalDim array, which specifies tensor size of each of the \p tensorRank dimensions, must be non-zero and less than or + * equal to 2^32. + * + * - \p globalStrides array, which specifies tensor stride of each of the lower \p tensorRank - 1 dimensions in bytes, must be a + * multiple of 16 and less than 2^40. Additionally, the stride must be a multiple of 32 when \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_32B. + * Each following dimension specified includes previous dimension stride: + * \code + globalStrides[0] = globalDim[0] * elementSizeInBytes(tensorDataType) + padding[0]; + for (i = 1; i < tensorRank - 1; i++) + globalStrides[i] = globalStrides[i – 1] * (globalDim[i] + padding[i]); + assert(globalStrides[i] >= globalDim[i]); + * \endcode + * + * - \p boxDim array, which specifies number of elements to be traversed along each of the \p tensorRank dimensions, must be non-zero + * and less than or equal to 256. + * When \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_NONE, { \p boxDim[0] * elementSizeInBytes( \p tensorDataType ) } must be a multiple + * of 16 bytes. + * + * - \p elementStrides array, which specifies the iteration step along each of the \p tensorRank dimensions, must be non-zero and less + * than or equal to 8. Note that when \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_NONE, the first element of this array is ignored since + * TMA doesn’t support the stride for dimension zero. + * When all elemets of \p elementStrides array is one, \p boxDim specifies the number of elements to load. However, if the \p elementStrides[i] + * is not equal to one, then TMA loads ceil( \p boxDim[i] / \p elementStrides[i]) number of elements along i-th dimension. To load N elements along + * i-th dimension, \p boxDim[i] must be set to N * \p elementStrides[i]. + * + * - \p interleave specifies the interleaved layout of type ::CUtensorMapInterleave, which is defined as: + * \code + typedef enum CUtensorMapInterleave_enum { + CU_TENSOR_MAP_INTERLEAVE_NONE = 0, + CU_TENSOR_MAP_INTERLEAVE_16B, + CU_TENSOR_MAP_INTERLEAVE_32B + } CUtensorMapInterleave; + * \endcode + * TMA supports interleaved layouts like NC/8HWC8 where C8 utilizes 16 bytes in memory assuming 2 byte per channel or NC/16HWC16 where C16 + * uses 32 bytes. + * When \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_NONE and \p swizzle is not ::CU_TENSOR_MAP_SWIZZLE_NONE, the bounding box inner dimension + * (computed as \p boxDim[0] multiplied by element size derived from \p tensorDataType) must be less than or equal to the swizzle size. + * - CU_TENSOR_MAP_SWIZZLE_32B implies the bounding box inner dimension will be <= 32. + * - CU_TENSOR_MAP_SWIZZLE_64B implies the bounding box inner dimension will be <= 64. + * - CU_TENSOR_MAP_SWIZZLE_128B implies the bounding box inner dimension will be <= 128. + * + * - \p swizzle, which specifies the shared memory bank swizzling pattern, has to be of type ::CUtensorMapSwizzle which is defined as: + * \code + typedef enum CUtensorMapSwizzle_enum { + CU_TENSOR_MAP_SWIZZLE_NONE = 0, + CU_TENSOR_MAP_SWIZZLE_32B, + CU_TENSOR_MAP_SWIZZLE_64B, + CU_TENSOR_MAP_SWIZZLE_128B + } CUtensorMapSwizzle; + * \endcode + * Data is organized in specific order in global memory; however, it may not match the order in which data are accessed by application in + * the shared memory. This difference in data organization may cause bank conflicts when shared memory is accessed. In order to avoid this + * problem, data can be loaded to shard memory with shuffling across shared memory banks. + * Note that it’s expected that when \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_32B, \p swizzle should be ::CU_TENSOR_MAP_SWIZZLE_32B mode. + * Other interleave modes can have any swizzling patterns. + * + * - \p l2Promotion specifies L2 fetch size which indicates the byte granurality at which L2 requests is filled from DRAM. It must be of + * type ::CUtensorMapL2promotion, which is defined as: + * \code + typedef enum CUtensorMapL2promotion_enum { + CU_TENSOR_MAP_L2_PROMOTION_NONE = 0, + CU_TENSOR_MAP_L2_PROMOTION_L2_64B, + CU_TENSOR_MAP_L2_PROMOTION_L2_128B, + CU_TENSOR_MAP_L2_PROMOTION_L2_256B + } CUtensorMapL2promotion; + * \endcode + * + * - \p oobFill, which indicates whether zero or a special NaN constant should be used to fill out-of-bound elements, must be of type + * ::CUtensorMapFloatOOBfill which is defined as: + * \code + typedef enum CUtensorMapFloatOOBfill_enum { + CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE = 0, + CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA + } CUtensorMapFloatOOBfill; + * \endcode + * Note that ::CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA can only be used when \p tensorDataType represents a floating data type. + * + * \param tensorMap - Tensor map object to create + * \param tensorDataType - Tensor data type + * \param tensorRank - Dimensionality of tensor + * \param globalAddress - Starting address of memory region described by tensor + * \param globalDim - Array containing tensor size (number of elements) along each of the \p tensorRank dimensions + * \param globalStrides - Array containing stride size (in bytes) along each of the \p tensorRank - 1 dimensions + * \param boxDim - Array containing traversal box size (number of elments) along each of the \p tensorRank dimensions. Specifies how many elements to be traversed along each tensor dimension. + * \param elementStrides - Array containing traversal stride in each of the \p tensorRank dimensions + * \param interleave - Type of interleaved layout the tensor addresses + * \param swizzle - Bank swizzling pattern inside shared memory + * \param l2Promotion - L2 promotion size + * \param oobFill - Indicate whether zero or special NaN constant must be used to fill out-of-bound elements + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTensorMapEncodeIm2col, + * ::cuTensorMapReplaceAddress + */ +CUresult CUDAAPI cuTensorMapEncodeTiled(CUtensorMap *tensorMap, CUtensorMapDataType tensorDataType, cuuint32_t tensorRank, void *globalAddress, const cuuint64_t *globalDim, const cuuint64_t *globalStrides, const cuuint32_t *boxDim, const cuuint32_t *elementStrides, CUtensorMapInterleave interleave, CUtensorMapSwizzle swizzle, CUtensorMapL2promotion l2Promotion, CUtensorMapFloatOOBfill oobFill); + + +/** + * \brief Create a tensor map descriptor object representing im2col memory region + * + * Creates a descriptor for Tensor Memory Access (TMA) object specified + * by the parameters describing a im2col memory layout and returns it in \p tensorMap. + * + * Tensor map objects are only supported on devices of compute capability 9.0 or higher. + * Additionally, a tensor map object is an opaque value, and, as such, should only be + * accessed through CUDA API calls. + * + * The parameters passed are bound to the following requirements: + * + * - \p tensorMap address must be aligned to 64 bytes. + * + * - \p tensorDataType has to be an enum from ::CUtensorMapDataType which is defined as: + * \code + typedef enum CUtensorMapDataType_enum { + CU_TENSOR_MAP_DATA_TYPE_UINT8 = 0, // 1 byte + CU_TENSOR_MAP_DATA_TYPE_UINT16, // 2 bytes + CU_TENSOR_MAP_DATA_TYPE_UINT32, // 4 bytes + CU_TENSOR_MAP_DATA_TYPE_INT32, // 4 bytes + CU_TENSOR_MAP_DATA_TYPE_UINT64, // 8 bytes + CU_TENSOR_MAP_DATA_TYPE_INT64, // 8 bytes + CU_TENSOR_MAP_DATA_TYPE_FLOAT16, // 2 bytes + CU_TENSOR_MAP_DATA_TYPE_FLOAT32, // 4 bytes + CU_TENSOR_MAP_DATA_TYPE_FLOAT64, // 8 bytes + CU_TENSOR_MAP_DATA_TYPE_BFLOAT16, // 2 bytes + CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ, // 4 bytes + CU_TENSOR_MAP_DATA_TYPE_TFLOAT32, // 4 bytes + CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ // 4 bytes + } CUtensorMapDataType; + * \endcode + * + * - \p tensorRank must be one of dimensions 3, 4, or 5. + * + * - \p globalAddress, which specifies the starting address of the memory region described, must be 32 byte aligned when \p interleave is + * ::CU_TENSOR_MAP_INTERLEAVE_32B and 16 byte aligned otherwise. + * + * - \p globalDim array, which specifies tensor size of each of the \p tensorRank dimensions, must be non-zero and less than or + * equal to 2^32. + * + * - \p globalStrides array, which specifies tensor stride of each of the lower \p tensorRank - 1 dimensions in bytes, must be a + * multiple of 16 and less than 2^40. Additionally, the stride must be a multiple of 32 when \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_32B. + * Each following dimension specified includes previous dimension stride: + * \code + globalStrides[0] = globalDim[0] * elementSizeInBytes(tensorDataType) + padding[0]; + for (i = 1; i < tensorRank - 1; i++) + globalStrides[i] = globalStrides[i – 1] * (globalDim[i] + padding[i]); + assert(globalStrides[i] >= globalDim[i]); + * \endcode + * + * - \p pixelBoxLowerCorner array specifies the coordinate offsets {D, H, W} of the bounding box from top/left/front corner. The number of + * offsets and their precision depends on the tensor dimensionality: + * - When \p tensorRank is 3, one signed offset within range [-32768, 32767] is supported. + * - When \p tensorRank is 4, two signed offsets each within range [-128, 127] are supported. + * - When \p tensorRank is 5, three offsets each within range [-16, 15] are supported. + * + * - \p pixelBoxUpperCorner array specifies the coordinate offsets {D, H, W} of the bounding box from bottom/right/back corner. The number of + * offsets and their precision depends on the tensor dimensionality: + * - When \p tensorRank is 3, one signed offset within range [-32768, 32767] is supported. + * - When \p tensorRank is 4, two signed offsets each within range [-128, 127] are supported. + * - When \p tensorRank is 5, three offsets each within range [-16, 15] are supported. + * The bounding box specified by \p pixelBoxLowerCorner and \p pixelBoxUpperCorner must have non-zero area. + * + * - \p channelsPerPixel, which specifies the number of elements which must be accessed along C dimension, must be less than or equal to 256. + * + * - \p pixelsPerColumn, which specifies the number of elements that must be accessed along the {N, D, H, W} dimensions, must be less than or + * equal to 1024. + * + * - \p elementStrides array, which specifies the iteration step along each of the \p tensorRank dimensions, must be non-zero and less + * than or equal to 8. Note that when \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_NONE, the first element of this array is ignored since + * TMA doesn’t support the stride for dimension zero. + * When all elemets of \p elementStrides array is one, \p boxDim specifies the number of elements to load. However, if the \p elementStrides[i] + * is not equal to one, then TMA loads ceil( \p boxDim[i] / \p elementStrides[i]) number of elements along i-th dimension. To load N elements along + * i-th dimension, \p boxDim[i] must be set to N * \p elementStrides[i]. + * + * - \p interleave specifies the interleaved layout of type ::CUtensorMapInterleave, which is defined as: + * \code + typedef enum CUtensorMapInterleave_enum { + CU_TENSOR_MAP_INTERLEAVE_NONE = 0, + CU_TENSOR_MAP_INTERLEAVE_16B, + CU_TENSOR_MAP_INTERLEAVE_32B + } CUtensorMapInterleave; + * \endcode + * TMA supports interleaved layouts like NC/8HWC8 where C8 utilizes 16 bytes in memory assuming 2 byte per channel or NC/16HWC16 where C16 + * uses 32 bytes. + * When \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_NONE and \p swizzle is not ::CU_TENSOR_MAP_SWIZZLE_NONE, the bounding box inner dimension + * (computed as \p boxDim[0] multiplied by element size derived from \p tensorDataType) must be less than or equal to the swizzle size. + * - CU_TENSOR_MAP_SWIZZLE_32B implies the bounding box inner dimension will be <= 32. + * - CU_TENSOR_MAP_SWIZZLE_64B implies the bounding box inner dimension will be <= 64. + * - CU_TENSOR_MAP_SWIZZLE_128B implies the bounding box inner dimension will be <= 128. + * + * - \p swizzle, which specifies the shared memory bank swizzling pattern, has to be of type ::CUtensorMapSwizzle which is defined as: + * \code + typedef enum CUtensorMapSwizzle_enum { + CU_TENSOR_MAP_SWIZZLE_NONE = 0, + CU_TENSOR_MAP_SWIZZLE_32B, + CU_TENSOR_MAP_SWIZZLE_64B, + CU_TENSOR_MAP_SWIZZLE_128B + } CUtensorMapSwizzle; + * \endcode + * Data is organized in specific order in global memory; however, it may not match the order in which data are accessed by application in + * the shared memory. This difference in data organization may cause bank conflicts when shared memory is accessed. In order to avoid this + * problem, data can be loaded to shard memory with shuffling across shared memory banks. + * Note that it’s expected that when \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_32B, \p swizzle should be ::CU_TENSOR_MAP_SWIZZLE_32B mode. + * Other interleave modes can have any swizzling patterns. + * + * - \p l2Promotion specifies L2 fetch size which indicates the byte granurality at which L2 requests is filled from DRAM. It must be of + * type ::CUtensorMapL2promotion, which is defined as: + * \code + typedef enum CUtensorMapL2promotion_enum { + CU_TENSOR_MAP_L2_PROMOTION_NONE = 0, + CU_TENSOR_MAP_L2_PROMOTION_L2_64B, + CU_TENSOR_MAP_L2_PROMOTION_L2_128B, + CU_TENSOR_MAP_L2_PROMOTION_L2_256B + } CUtensorMapL2promotion; + * \endcode + * + * - \p oobFill, which indicates whether zero or a special NaN constant should be used to fill out-of-bound elements, must be of type + * ::CUtensorMapFloatOOBfill which is defined as: + * \code + typedef enum CUtensorMapFloatOOBfill_enum { + CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE = 0, + CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA + } CUtensorMapFloatOOBfill; + * \endcode + * Note that ::CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA can only be used when \p tensorDataType represents a floating data type. + * + * \param tensorMap - Tensor map object to create + * \param tensorDataType - Tensor data type + * \param tensorRank - Dimensionality of tensor, needs to be at least of dimension 3 + * \param globalAddress - Starting address of memory region described by tensor + * \param globalDim - Array containing tensor size (number of elements) along each of the \p tensorRank dimensions + * \param globalStrides - Array containing stride size (in bytes) along each of the \p tensorRank - 1 dimensions + * \param pixelBoxLowerCorner - Array containing DHW dimentions of lower box corner + * \param pixelBoxUpperCorner - Array containing DHW dimentions of upper box corner + * \param channelsPerPixel - Number of channels per pixel + * \param pixelsPerColumn - Number of pixels per column + * \param elementStrides - Array containing traversal stride in each of the \p tensorRank dimensions + * \param interleave - Type of interleaved layout the tensor addresses + * \param swizzle - Bank swizzling pattern inside shared memory + * \param l2Promotion - L2 promotion size + * \param oobFill - Indicate whether zero or special NaN constant must be used to fill out-of-bound elements + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTensorMapEncodeTiled, + * ::cuTensorMapReplaceAddress + */ +CUresult CUDAAPI cuTensorMapEncodeIm2col(CUtensorMap *tensorMap, CUtensorMapDataType tensorDataType, cuuint32_t tensorRank, void *globalAddress, const cuuint64_t *globalDim, const cuuint64_t *globalStrides, const int *pixelBoxLowerCorner, const int *pixelBoxUpperCorner, cuuint32_t channelsPerPixel, cuuint32_t pixelsPerColumn, const cuuint32_t *elementStrides, CUtensorMapInterleave interleave, CUtensorMapSwizzle swizzle, CUtensorMapL2promotion l2Promotion, CUtensorMapFloatOOBfill oobFill); + +/** + * \brief Modify an existing tensor map descriptor with an updated global address + * + * Modifies the descriptor for Tensor Memory Access (TMA) object passed in \p tensorMap with + * an updated \p globalAddress. + * + * Tensor map objects are only supported on devices of compute capability 9.0 or higher. + * Additionally, a tensor map object is an opaque value, and, as such, should only be + * accessed through CUDA API calls. + * + * \param tensorMap - Tensor map object to modify + * \param globalAddress - Starting address of memory region described by tensor, must follow previous alignment requirements + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTensorMapEncodeTiled, + * ::cuTensorMapEncodeIm2col + */ +CUresult CUDAAPI cuTensorMapReplaceAddress(CUtensorMap *tensorMap, void *globalAddress); + +/** @} */ +/* END CUDA_TENSOR_MEMORY */ + +/** + * \defgroup CUDA_PEER_ACCESS Peer Context Memory Access + * + * ___MANBRIEF___ direct peer context memory access functions of the low-level + * CUDA driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the direct peer context memory access functions + * of the low-level CUDA driver application programming interface. + * + * @{ + */ + +/** + * \brief Queries if a device may directly access a peer device's memory. + * + * Returns in \p *canAccessPeer a value of 1 if contexts on \p dev are capable of + * directly accessing memory from contexts on \p peerDev and 0 otherwise. + * If direct access of \p peerDev from \p dev is possible, then access may be + * enabled on two specific contexts by calling ::cuCtxEnablePeerAccess(). + * + * \param canAccessPeer - Returned access capability + * \param dev - Device from which allocations on \p peerDev are to + * be directly accessed. + * \param peerDev - Device on which the allocations to be directly accessed + * by \p dev reside. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuCtxEnablePeerAccess, + * ::cuCtxDisablePeerAccess, + * ::cudaDeviceCanAccessPeer + */ +CUresult CUDAAPI cuDeviceCanAccessPeer(int *canAccessPeer, CUdevice dev, CUdevice peerDev); + +/** + * \brief Enables direct access to memory allocations in a peer context. + * + * If both the current context and \p peerContext are on devices which support unified + * addressing (as may be queried using ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING) and same + * major compute capability, then on success all allocations from \p peerContext will + * immediately be accessible by the current context. See \ref CUDA_UNIFIED for additional + * details. + * + * Note that access granted by this call is unidirectional and that in order to access + * memory from the current context in \p peerContext, a separate symmetric call + * to ::cuCtxEnablePeerAccess() is required. + * + * Note that there are both device-wide and system-wide limitations per system + * configuration, as noted in the CUDA Programming Guide under the section + * "Peer-to-Peer Memory Access". + * + * Returns ::CUDA_ERROR_PEER_ACCESS_UNSUPPORTED if ::cuDeviceCanAccessPeer() indicates + * that the ::CUdevice of the current context cannot directly access memory + * from the ::CUdevice of \p peerContext. + * + * Returns ::CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED if direct access of + * \p peerContext from the current context has already been enabled. + * + * Returns ::CUDA_ERROR_TOO_MANY_PEERS if direct peer access is not possible + * because hardware resources required for peer access have been exhausted. + * + * Returns ::CUDA_ERROR_INVALID_CONTEXT if there is no current context, \p peerContext + * is not a valid context, or if the current context is \p peerContext. + * + * Returns ::CUDA_ERROR_INVALID_VALUE if \p Flags is not 0. + * + * \param peerContext - Peer context to enable direct access to from the current context + * \param Flags - Reserved for future use and must be set to 0 + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED, + * ::CUDA_ERROR_TOO_MANY_PEERS, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_PEER_ACCESS_UNSUPPORTED, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa + * ::cuDeviceCanAccessPeer, + * ::cuCtxDisablePeerAccess, + * ::cudaDeviceEnablePeerAccess + */ +CUresult CUDAAPI cuCtxEnablePeerAccess(CUcontext peerContext, unsigned int Flags); + +/** + * \brief Disables direct access to memory allocations in a peer context and + * unregisters any registered allocations. + * + Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has + * not yet been enabled from \p peerContext to the current context. + * + * Returns ::CUDA_ERROR_INVALID_CONTEXT if there is no current context, or if + * \p peerContext is not a valid context. + * + * \param peerContext - Peer context to disable direct access to + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * \notefnerr + * + * \sa + * ::cuDeviceCanAccessPeer, + * ::cuCtxEnablePeerAccess, + * ::cudaDeviceDisablePeerAccess + */ +CUresult CUDAAPI cuCtxDisablePeerAccess(CUcontext peerContext); + +/** + * \brief Queries attributes of the link between two devices. + * + * Returns in \p *value the value of the requested attribute \p attrib of the + * link between \p srcDevice and \p dstDevice. The supported attributes are: + * - ::CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK: A relative value indicating the + * performance of the link between two devices. + * - ::CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED P2P: 1 if P2P Access is enable. + * - ::CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED: 1 if Atomic operations over + * the link are supported. + * - ::CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED: 1 if cudaArray can + * be accessed over the link. + * + * Returns ::CUDA_ERROR_INVALID_DEVICE if \p srcDevice or \p dstDevice are not valid + * or if they represent the same device. + * + * Returns ::CUDA_ERROR_INVALID_VALUE if \p attrib is not valid or if \p value is + * a null pointer. + * + * \param value - Returned value of the requested attribute + * \param attrib - The requested attribute of the link between \p srcDevice and \p dstDevice. + * \param srcDevice - The source device of the target link. + * \param dstDevice - The destination device of the target link. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa + * ::cuCtxEnablePeerAccess, + * ::cuCtxDisablePeerAccess, + * ::cuDeviceCanAccessPeer, + * ::cudaDeviceGetP2PAttribute + */ +CUresult CUDAAPI cuDeviceGetP2PAttribute(int* value, CUdevice_P2PAttribute attrib, CUdevice srcDevice, CUdevice dstDevice); + +/** @} */ /* END CUDA_PEER_ACCESS */ + +/** + * \defgroup CUDA_GRAPHICS Graphics Interoperability + * + * ___MANBRIEF___ graphics interoperability functions of the low-level CUDA + * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the graphics interoperability functions of the + * low-level CUDA driver application programming interface. + * + * @{ + */ + +/** + * \brief Unregisters a graphics resource for access by CUDA + * + * Unregisters the graphics resource \p resource so it is not accessible by + * CUDA unless registered again. + * + * If \p resource is invalid then ::CUDA_ERROR_INVALID_HANDLE is + * returned. + * + * \param resource - Resource to unregister + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa + * ::cuGraphicsD3D9RegisterResource, + * ::cuGraphicsD3D10RegisterResource, + * ::cuGraphicsD3D11RegisterResource, + * ::cuGraphicsGLRegisterBuffer, + * ::cuGraphicsGLRegisterImage, + * ::cudaGraphicsUnregisterResource + */ +CUresult CUDAAPI cuGraphicsUnregisterResource(CUgraphicsResource resource); + +/** + * \brief Get an array through which to access a subresource of a mapped graphics resource. + * + * Returns in \p *pArray an array through which the subresource of the mapped + * graphics resource \p resource which corresponds to array index \p arrayIndex + * and mipmap level \p mipLevel may be accessed. The value set in \p *pArray may + * change every time that \p resource is mapped. + * + * If \p resource is not a texture then it cannot be accessed via an array and + * ::CUDA_ERROR_NOT_MAPPED_AS_ARRAY is returned. + * If \p arrayIndex is not a valid array index for \p resource then + * ::CUDA_ERROR_INVALID_VALUE is returned. + * If \p mipLevel is not a valid mipmap level for \p resource then + * ::CUDA_ERROR_INVALID_VALUE is returned. + * If \p resource is not mapped then ::CUDA_ERROR_NOT_MAPPED is returned. + * + * \param pArray - Returned array through which a subresource of \p resource may be accessed + * \param resource - Mapped resource to access + * \param arrayIndex - Array index for array textures or cubemap face + * index as defined by ::CUarray_cubemap_face for + * cubemap textures for the subresource to access + * \param mipLevel - Mipmap level for the subresource to access + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_MAPPED, + * ::CUDA_ERROR_NOT_MAPPED_AS_ARRAY + * \notefnerr + * + * \sa + * ::cuGraphicsResourceGetMappedPointer, + * ::cudaGraphicsSubResourceGetMappedArray + */ +CUresult CUDAAPI cuGraphicsSubResourceGetMappedArray(CUarray *pArray, CUgraphicsResource resource, unsigned int arrayIndex, unsigned int mipLevel); + +/** + * \brief Get a mipmapped array through which to access a mapped graphics resource. + * + * Returns in \p *pMipmappedArray a mipmapped array through which the mapped graphics + * resource \p resource. The value set in \p *pMipmappedArray may change every time + * that \p resource is mapped. + * + * If \p resource is not a texture then it cannot be accessed via a mipmapped array and + * ::CUDA_ERROR_NOT_MAPPED_AS_ARRAY is returned. + * If \p resource is not mapped then ::CUDA_ERROR_NOT_MAPPED is returned. + * + * \param pMipmappedArray - Returned mipmapped array through which \p resource may be accessed + * \param resource - Mapped resource to access + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_MAPPED, + * ::CUDA_ERROR_NOT_MAPPED_AS_ARRAY + * \notefnerr + * + * \sa + * ::cuGraphicsResourceGetMappedPointer, + * ::cudaGraphicsResourceGetMappedMipmappedArray + */ +CUresult CUDAAPI cuGraphicsResourceGetMappedMipmappedArray(CUmipmappedArray *pMipmappedArray, CUgraphicsResource resource); + +/** + * \brief Get a device pointer through which to access a mapped graphics resource. + * + * Returns in \p *pDevPtr a pointer through which the mapped graphics resource + * \p resource may be accessed. + * Returns in \p pSize the size of the memory in bytes which may be accessed from that pointer. + * The value set in \p pPointer may change every time that \p resource is mapped. + * + * If \p resource is not a buffer then it cannot be accessed via a pointer and + * ::CUDA_ERROR_NOT_MAPPED_AS_POINTER is returned. + * If \p resource is not mapped then ::CUDA_ERROR_NOT_MAPPED is returned. + * * + * \param pDevPtr - Returned pointer through which \p resource may be accessed + * \param pSize - Returned size of the buffer accessible starting at \p *pPointer + * \param resource - Mapped resource to access + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_MAPPED, + * ::CUDA_ERROR_NOT_MAPPED_AS_POINTER + * \notefnerr + * + * \sa + * ::cuGraphicsMapResources, + * ::cuGraphicsSubResourceGetMappedArray, + * ::cudaGraphicsResourceGetMappedPointer + */ +CUresult CUDAAPI cuGraphicsResourceGetMappedPointer(CUdeviceptr *pDevPtr, size_t *pSize, CUgraphicsResource resource); + +/** + * \brief Set usage flags for mapping a graphics resource + * + * Set \p flags for mapping the graphics resource \p resource. + * + * Changes to \p flags will take effect the next time \p resource is mapped. + * The \p flags argument may be any of the following: + + * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this + * resource will be used. It is therefore assumed that this resource will be + * read from and written to by CUDA kernels. This is the default value. + * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READONLY: Specifies that CUDA kernels which + * access this resource will not write to this resource. + * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITEDISCARD: Specifies that CUDA kernels + * which access this resource will not read from this resource and will + * write over the entire contents of the resource, so none of the data + * previously stored in the resource will be preserved. + * + * If \p resource is presently mapped for access by CUDA then + * ::CUDA_ERROR_ALREADY_MAPPED is returned. + * If \p flags is not one of the above values then ::CUDA_ERROR_INVALID_VALUE is returned. + * + * \param resource - Registered resource to set flags for + * \param flags - Parameters for resource mapping + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_ALREADY_MAPPED + * \notefnerr + * + * \sa + * ::cuGraphicsMapResources, + * ::cudaGraphicsResourceSetMapFlags + */ +CUresult CUDAAPI cuGraphicsResourceSetMapFlags(CUgraphicsResource resource, unsigned int flags); + +/** + * \brief Map graphics resources for access by CUDA + * + * Maps the \p count graphics resources in \p resources for access by CUDA. + * + * The resources in \p resources may be accessed by CUDA until they + * are unmapped. The graphics API from which \p resources were registered + * should not access any resources while they are mapped by CUDA. If an + * application does so, the results are undefined. + * + * This function provides the synchronization guarantee that any graphics calls + * issued before ::cuGraphicsMapResources() will complete before any subsequent CUDA + * work issued in \p stream begins. + * + * If \p resources includes any duplicate entries then ::CUDA_ERROR_INVALID_HANDLE is returned. + * If any of \p resources are presently mapped for access by CUDA then ::CUDA_ERROR_ALREADY_MAPPED is returned. + * + * \param count - Number of resources to map + * \param resources - Resources to map for CUDA usage + * \param hStream - Stream with which to synchronize + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_ALREADY_MAPPED, + * ::CUDA_ERROR_UNKNOWN + * \note_null_stream + * \notefnerr + * + * \sa + * ::cuGraphicsResourceGetMappedPointer, + * ::cuGraphicsSubResourceGetMappedArray, + * ::cuGraphicsUnmapResources, + * ::cudaGraphicsMapResources + */ +CUresult CUDAAPI cuGraphicsMapResources(unsigned int count, CUgraphicsResource *resources, CUstream hStream); + +/** + * \brief Unmap graphics resources. + * + * Unmaps the \p count graphics resources in \p resources. + * + * Once unmapped, the resources in \p resources may not be accessed by CUDA + * until they are mapped again. + * + * This function provides the synchronization guarantee that any CUDA work issued + * in \p stream before ::cuGraphicsUnmapResources() will complete before any + * subsequently issued graphics work begins. + * + * + * If \p resources includes any duplicate entries then ::CUDA_ERROR_INVALID_HANDLE is returned. + * If any of \p resources are not presently mapped for access by CUDA then ::CUDA_ERROR_NOT_MAPPED is returned. + * + * \param count - Number of resources to unmap + * \param resources - Resources to unmap + * \param hStream - Stream with which to synchronize + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_MAPPED, + * ::CUDA_ERROR_UNKNOWN + * \note_null_stream + * \notefnerr + * + * \sa + * ::cuGraphicsMapResources, + * ::cudaGraphicsUnmapResources + */ +CUresult CUDAAPI cuGraphicsUnmapResources(unsigned int count, CUgraphicsResource *resources, CUstream hStream); + +/** @} */ /* END CUDA_GRAPHICS */ + +/** + * \defgroup CUDA_DRIVER_ENTRY_POINT Driver Entry Point Access + * + * ___MANBRIEF___ driver entry point access functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the driver entry point access functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** + * \brief Returns the requested driver API function pointer + * + * Returns in \p **pfn the address of the CUDA driver function for the requested + * CUDA version and flags. + * + * The CUDA version is specified as (1000 * major + 10 * minor), so CUDA 11.2 + * should be specified as 11020. For a requested driver symbol, if the specified + * CUDA version is greater than or equal to the CUDA version in which the driver symbol + * was introduced, this API will return the function pointer to the corresponding + * versioned function. + * + * The pointer returned by the API should be cast to a function pointer matching the + * requested driver function's definition in the API header file. The function pointer + * typedef can be picked up from the corresponding typedefs header file. For example, + * cudaTypedefs.h consists of function pointer typedefs for driver APIs defined in cuda.h. + * + * The API will return ::CUDA_SUCCESS and set the returned \p pfn to NULL if the + * requested driver function is not supported on the platform, no ABI + * compatible driver function exists for the specified \p cudaVersion or if the + * driver symbol is invalid. + * + * It will also set the optional \p symbolStatus to one of the values in + * ::CUdriverProcAddressQueryResult with the following meanings: + * - ::CU_GET_PROC_ADDRESS_SUCCESS - The requested symbol was succesfully found based + * on input arguments and \p pfn is valid + * - ::CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND - The requested symbol was not found + * - ::CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT - The requested symbol was found but is + * not supported by cudaVersion specified + * + * The requested flags can be: + * - ::CU_GET_PROC_ADDRESS_DEFAULT: This is the default mode. This is equivalent to + * ::CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM if the code is compiled with + * --default-stream per-thread compilation flag or the macro CUDA_API_PER_THREAD_DEFAULT_STREAM + * is defined; ::CU_GET_PROC_ADDRESS_LEGACY_STREAM otherwise. + * - ::CU_GET_PROC_ADDRESS_LEGACY_STREAM: This will enable the search for all driver symbols + * that match the requested driver symbol name except the corresponding per-thread versions. + * - ::CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM: This will enable the search for all + * driver symbols that match the requested driver symbol name including the per-thread + * versions. If a per-thread version is not found, the API will return the legacy version + * of the driver function. + * + * \param symbol - The base name of the driver API function to look for. As an example, + * for the driver API ::cuMemAlloc_v2, \p symbol would be cuMemAlloc and + * \p cudaVersion would be the ABI compatible CUDA version for the _v2 variant. + * \param pfn - Location to return the function pointer to the requested driver function + * \param cudaVersion - The CUDA version to look for the requested driver symbol + * \param flags - Flags to specify search options. + * \param symbolStatus - Optional location to store the status of the search for + * \p symbol based on \p cudaVersion. See ::CUdriverProcAddressQueryResult + * for possible values. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_SUPPORTED + * \note_version_mixing + * + * \sa + * ::cudaGetDriverEntryPoint + */ +CUresult CUDAAPI cuGetProcAddress(const char *symbol, void **pfn, int cudaVersion, cuuint64_t flags, CUdriverProcAddressQueryResult *symbolStatus); + +/** @} */ /* END CUDA_DRIVER_ENTRY_POINT */ + + +/** + * \defgroup CUDA_COREDUMP Coredump Attributes Control API + * + * ___MANBRIEF___ coredump attribute control functions for the low-level CUDA API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the coredump attribute control functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** + * Flags for choosing a coredump attribute to get/set + */ +typedef enum CUcoredumpSettings_enum { + CU_COREDUMP_ENABLE_ON_EXCEPTION = 1, + CU_COREDUMP_TRIGGER_HOST, + CU_COREDUMP_LIGHTWEIGHT, + CU_COREDUMP_ENABLE_USER_TRIGGER, + CU_COREDUMP_FILE, + CU_COREDUMP_PIPE, + CU_COREDUMP_MAX +} CUcoredumpSettings; + +/** + * \brief Allows caller to fetch a coredump attribute value for the current context + * + * Returns in \p *value the requested value specified by \p attrib. It is up to the caller + * to ensure that the data type and size of \p *value matches the request. + * + * If the caller calls this function with \p *value equal to NULL, the size of the memory + * region (in bytes) expected for \p attrib will be placed in \p size. + * + * The supported attributes are: + * - ::CU_COREDUMP_ENABLE_ON_EXCEPTION: Bool where ::true means that GPU exceptions from + * this context will create a coredump at the location specified by ::CU_COREDUMP_FILE. + * The default value is ::false unless set to ::true globally or locally, or the + * CU_CTX_USER_COREDUMP_ENABLE flag was set during context creation. + * - ::CU_COREDUMP_TRIGGER_HOST: Bool where ::true means that the host CPU will + * also create a coredump. The default value is ::true unless set to ::false globally or + * or locally. + * - ::CU_COREDUMP_LIGHTWEIGHT: Bool where ::true means that any resulting coredumps + * will not have a dump of GPU memory or non-reloc ELF images. The default value is + * ::false unless set to ::true globally or locally. + * - ::CU_COREDUMP_ENABLE_USER_TRIGGER: Bool where ::true means that a coredump can be + * created by writing to the system pipe specified by ::CU_COREDUMP_PIPE. The default + * value is ::false unless set to ::true globally or locally. + * - ::CU_COREDUMP_FILE: String of up to 1023 characters that defines the location where + * any coredumps generated by this context will be written. The default value is + * ::core.cuda.HOSTNAME.PID where ::HOSTNAME is the host name of the machine running + * the CUDA applications and ::PID is the process ID of the CUDA application. + * - ::CU_COREDUMP_PIPE: String of up to 1023 characters that defines the name of the pipe + * that will be monitored if user-triggered coredumps are enabled. The default value is + * ::corepipe.cuda.HOSTNAME.PID where ::HOSTNAME is the host name of the machine running + * the CUDA application and ::PID is the process ID of the CUDA application. + * + * \param attrib - The enum defining which value to fetch. + * \param value - void* containing the requested data. + * \param size - The size of the memory region \p value points to. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_PERMITTED, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_CONTEXT_IS_DESTROYED + * + * \sa + * ::cuCoredumpGetAttributeGlobal, + * ::cuCoredumpSetAttribute, + * ::cuCoredumpSetAttributeGlobal + */ +CUresult CUDAAPI cuCoredumpGetAttribute(CUcoredumpSettings attrib, void* value, size_t *size); + +/** + * \brief Allows caller to fetch a coredump attribute value for the entire application + * + * Returns in \p *value the requested value specified by \p attrib. It is up to the caller + * to ensure that the data type and size of \p *value matches the request. + * + * If the caller calls this function with \p *value equal to NULL, the size of the memory + * region (in bytes) expected for \p attrib will be placed in \p size. + * + * The supported attributes are: + * - ::CU_COREDUMP_ENABLE_ON_EXCEPTION: Bool where ::true means that GPU exceptions from + * this context will create a coredump at the location specified by ::CU_COREDUMP_FILE. + * The default value is ::false. + * - ::CU_COREDUMP_TRIGGER_HOST: Bool where ::true means that the host CPU will + * also create a coredump. The default value is ::true. + * - ::CU_COREDUMP_LIGHTWEIGHT: Bool where ::true means that any resulting coredumps + * will not have a dump of GPU memory or non-reloc ELF images. The default value is + * ::false. + * - ::CU_COREDUMP_ENABLE_USER_TRIGGER: Bool where ::true means that a coredump can be + * created by writing to the system pipe specified by ::CU_COREDUMP_PIPE. The default + * value is ::false. + * - ::CU_COREDUMP_FILE: String of up to 1023 characters that defines the location where + * any coredumps generated by this context will be written. The default value is + * ::core.cuda.HOSTNAME.PID where ::HOSTNAME is the host name of the machine running + * the CUDA applications and ::PID is the process ID of the CUDA application. + * - ::CU_COREDUMP_PIPE: String of up to 1023 characters that defines the name of the pipe + * that will be monitored if user-triggered coredumps are enabled. The default value is + * ::corepipe.cuda.HOSTNAME.PID where ::HOSTNAME is the host name of the machine running + * the CUDA application and ::PID is the process ID of the CUDA application. + * + * \param attrib - The enum defining which value to fetch. + * \param value - void* containing the requested data. + * \param size - The size of the memory region \p value points to. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuCoredumpGetAttribute, + * ::cuCoredumpSetAttribute, + * ::cuCoredumpSetAttributeGlobal + */ +CUresult CUDAAPI cuCoredumpGetAttributeGlobal(CUcoredumpSettings attrib, void *value, size_t *size); + +/** + * \brief Allows caller to set a coredump attribute value for the current context + * + * This function should be considered an alternate interface to the CUDA-GDB environment + * variables defined in this document: https://docs.nvidia.com/cuda/cuda-gdb/index.html#gpu-coredump + * + * An important design decision to note is that any coredump environment variable values + * set before CUDA initializes will take permanent precedence over any values set with this + * this function. This decision was made to ensure no change in behavior for any users that + * may be currently using these variables to get coredumps. + * + * \p *value shall contain the requested value specified by \p set. It is up to the caller + * to ensure that the data type and size of \p *value matches the request. + * + * If the caller calls this function with \p *value equal to NULL, the size of the memory + * region (in bytes) expected for \p set will be placed in \p size. + * + * /note This function will return ::CUDA_ERROR_NOT_SUPPORTED if the caller attempts to set + * ::CU_COREDUMP_ENABLE_ON_EXCEPTION on a GPU of with Compute Capability < 6.0. ::cuCoredumpSetAttributeGlobal + * works on those platforms as an alternative. + * + * /note ::CU_COREDUMP_ENABLE_USER_TRIGGER and ::CU_COREDUMP_PIPE cannot be set on a per-context basis. + * + * The supported attributes are: + * - ::CU_COREDUMP_ENABLE_ON_EXCEPTION: Bool where ::true means that GPU exceptions from + * this context will create a coredump at the location specified by ::CU_COREDUMP_FILE. + * The default value is ::false. + * - ::CU_COREDUMP_TRIGGER_HOST: Bool where ::true means that the host CPU will + * also create a coredump. The default value is ::true. + * - ::CU_COREDUMP_LIGHTWEIGHT: Bool where ::true means that any resulting coredumps + * will not have a dump of GPU memory or non-reloc ELF images. The default value is + * ::false. + * - ::CU_COREDUMP_FILE: String of up to 1023 characters that defines the location where + * any coredumps generated by this context will be written. The default value is + * ::core.cuda.HOSTNAME.PID where ::HOSTNAME is the host name of the machine running + * the CUDA applications and ::PID is the process ID of the CUDA application. + * + * \param attrib - The enum defining which value to set. + * \param value - void* containing the requested data. + * \param size - The size of the memory region \p value points to. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_PERMITTED, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_CONTEXT_IS_DESTROYED, + * ::CUDA_ERROR_NOT_SUPPORTED + * + * \sa + * ::cuCoredumpGetAttributeGlobal, + * ::cuCoredumpGetAttribute, + * ::cuCoredumpSetAttributeGlobal + */ +CUresult CUDAAPI cuCoredumpSetAttribute(CUcoredumpSettings attrib, void* value, size_t *size); + +/** + * \brief Allows caller to set a coredump attribute value globally + * + * This function should be considered an alternate interface to the CUDA-GDB environment + * variables defined in this document: https://docs.nvidia.com/cuda/cuda-gdb/index.html#gpu-coredump + * + * An important design decision to note is that any coredump environment variable values + * set before CUDA initializes will take permanent precedence over any values set with this + * this function. This decision was made to ensure no change in behavior for any users that + * may be currently using these variables to get coredumps. + * + * \p *value shall contain the requested value specified by \p set. It is up to the caller + * to ensure that the data type and size of \p *value matches the request. + * + * If the caller calls this function with \p *value equal to NULL, the size of the memory + * region (in bytes) expected for \p set will be placed in \p size. + * + * The supported attributes are: + * - ::CU_COREDUMP_ENABLE_ON_EXCEPTION: Bool where ::true means that GPU exceptions from + * this context will create a coredump at the location specified by ::CU_COREDUMP_FILE. + * The default value is ::false. + * - ::CU_COREDUMP_TRIGGER_HOST: Bool where ::true means that the host CPU will + * also create a coredump. The default value is ::true. + * - ::CU_COREDUMP_LIGHTWEIGHT: Bool where ::true means that any resulting coredumps + * will not have a dump of GPU memory or non-reloc ELF images. The default value is + * ::false. + * - ::CU_COREDUMP_ENABLE_USER_TRIGGER: Bool where ::true means that a coredump can be + * created by writing to the system pipe specified by ::CU_COREDUMP_PIPE. The default + * value is ::false. + * - ::CU_COREDUMP_FILE: String of up to 1023 characters that defines the location where + * any coredumps generated by this context will be written. The default value is + * ::core.cuda.HOSTNAME.PID where ::HOSTNAME is the host name of the machine running + * the CUDA applications and ::PID is the process ID of the CUDA application. + * - ::CU_COREDUMP_PIPE: String of up to 1023 characters that defines the name of the pipe + * that will be monitored if user-triggered coredumps are enabled. This value may not be + * changed after ::CU_COREDUMP_ENABLE_USER_TRIGGER is set to ::true. The default + * value is ::corepipe.cuda.HOSTNAME.PID where ::HOSTNAME is the host name of the machine + * running the CUDA application and ::PID is the process ID of the CUDA application. + * + * \param attrib - The enum defining which value to set. + * \param value - void* containing the requested data. + * \param size - The size of the memory region \p value points to. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_PERMITTED + * + * \sa + * ::cuCoredumpGetAttribute, + * ::cuCoredumpGetAttributeGlobal, + * ::cuCoredumpSetAttribute + */ +CUresult CUDAAPI cuCoredumpSetAttributeGlobal(CUcoredumpSettings attrib, void *value, size_t *size); + +/** @} */ /* END CUDA_COREDUMP */ + +CUresult CUDAAPI cuGetExportTable(const void **ppExportTable, const CUuuid *pExportTableId); + +/** + * CUDA API versioning support + */ +#if defined(__CUDA_API_VERSION_INTERNAL) + #undef cuMemHostRegister + #undef cuGraphicsResourceSetMapFlags + #undef cuLinkCreate + #undef cuLinkAddData + #undef cuLinkAddFile + #undef cuDeviceTotalMem + #undef cuCtxCreate + #undef cuModuleGetGlobal + #undef cuMemGetInfo + #undef cuMemAlloc + #undef cuMemAllocPitch + #undef cuMemFree + #undef cuMemGetAddressRange + #undef cuMemAllocHost + #undef cuMemHostGetDevicePointer + #undef cuMemcpyHtoD + #undef cuMemcpyDtoH + #undef cuMemcpyDtoD + #undef cuMemcpyDtoA + #undef cuMemcpyAtoD + #undef cuMemcpyHtoA + #undef cuMemcpyAtoH + #undef cuMemcpyAtoA + #undef cuMemcpyHtoAAsync + #undef cuMemcpyAtoHAsync + #undef cuMemcpy2D + #undef cuMemcpy2DUnaligned + #undef cuMemcpy3D + #undef cuMemcpyHtoDAsync + #undef cuMemcpyDtoHAsync + #undef cuMemcpyDtoDAsync + #undef cuMemcpy2DAsync + #undef cuMemcpy3DAsync + #undef cuMemsetD8 + #undef cuMemsetD16 + #undef cuMemsetD32 + #undef cuMemsetD2D8 + #undef cuMemsetD2D16 + #undef cuMemsetD2D32 + #undef cuArrayCreate + #undef cuArrayGetDescriptor + #undef cuArray3DCreate + #undef cuArray3DGetDescriptor + #undef cuTexRefSetAddress + #undef cuTexRefSetAddress2D + #undef cuTexRefGetAddress + #undef cuGraphicsResourceGetMappedPointer + #undef cuCtxDestroy + #undef cuCtxPopCurrent + #undef cuCtxPushCurrent + #undef cuStreamDestroy + #undef cuEventDestroy + #undef cuMemcpy + #undef cuMemcpyAsync + #undef cuMemcpyPeer + #undef cuMemcpyPeerAsync + #undef cuMemcpy3DPeer + #undef cuMemcpy3DPeerAsync + #undef cuMemsetD8Async + #undef cuMemsetD16Async + #undef cuMemsetD32Async + #undef cuMemsetD2D8Async + #undef cuMemsetD2D16Async + #undef cuMemsetD2D32Async + #undef cuStreamGetPriority + #undef cuStreamGetId + #undef cuStreamGetFlags + #undef cuStreamGetCtx + #undef cuStreamWaitEvent + #undef cuStreamAddCallback + #undef cuStreamAttachMemAsync + #undef cuStreamQuery + #undef cuStreamSynchronize + #undef cuEventRecord + #undef cuEventRecordWithFlags + #undef cuLaunchKernel + #undef cuLaunchKernelEx + #undef cuLaunchHostFunc + #undef cuGraphicsMapResources + #undef cuGraphicsUnmapResources + #undef cuStreamWriteValue32 + #undef cuStreamWaitValue32 + #undef cuStreamWriteValue64 + #undef cuStreamWaitValue64 + #undef cuStreamBatchMemOp + #undef cuStreamWriteValue32_v2 + #undef cuStreamWaitValue32_v2 + #undef cuStreamWriteValue64_v2 + #undef cuStreamWaitValue64_v2 + #undef cuStreamBatchMemOp_v2 + #undef cuMemPrefetchAsync + #undef cuLaunchCooperativeKernel + #undef cuSignalExternalSemaphoresAsync + #undef cuWaitExternalSemaphoresAsync + #undef cuStreamBeginCapture + #undef cuStreamEndCapture + #undef cuStreamIsCapturing + #undef cuStreamGetCaptureInfo + #undef cuStreamGetCaptureInfo_v2 + #undef cuGraphInstantiateWithParams + #undef cuGraphExecUpdate + #undef cuGraphUpload + #undef cuGraphLaunch + #undef cuDevicePrimaryCtxRelease + #undef cuDevicePrimaryCtxReset + #undef cuDevicePrimaryCtxSetFlags + #undef cuIpcOpenMemHandle + #undef cuStreamCopyAttributes + #undef cuStreamSetAttribute + #undef cuStreamGetAttribute + #undef cuGraphInstantiate + #undef cuGraphAddKernelNode + #undef cuGraphKernelNodeGetParams + #undef cuGraphKernelNodeSetParams + #undef cuGraphExecKernelNodeSetParams + #undef cuMemMapArrayAsync + #undef cuMemFreeAsync + #undef cuMemAllocAsync + #undef cuMemAllocFromPoolAsync + #undef cuStreamUpdateCaptureDependencies + #undef cuGetProcAddress + + CUresult CUDAAPI cuMemHostRegister(void *p, size_t bytesize, unsigned int Flags); + CUresult CUDAAPI cuGraphicsResourceSetMapFlags(CUgraphicsResource resource, unsigned int flags); + CUresult CUDAAPI cuLinkCreate(unsigned int numOptions, CUjit_option *options, void **optionValues, CUlinkState *stateOut); + CUresult CUDAAPI cuLinkAddData(CUlinkState state, CUjitInputType type, void *data, size_t size, const char *name, + unsigned int numOptions, CUjit_option *options, void **optionValues); + CUresult CUDAAPI cuLinkAddFile(CUlinkState state, CUjitInputType type, const char *path, + unsigned int numOptions, CUjit_option *options, void **optionValues); + CUresult CUDAAPI cuTexRefSetAddress2D_v2(CUtexref hTexRef, const CUDA_ARRAY_DESCRIPTOR *desc, CUdeviceptr dptr, size_t Pitch); + + typedef unsigned int CUdeviceptr_v1; + + typedef struct CUDA_MEMCPY2D_v1_st + { + unsigned int srcXInBytes; /**< Source X in bytes */ + unsigned int srcY; /**< Source Y */ + CUmemorytype srcMemoryType; /**< Source memory type (host, device, array) */ + const void *srcHost; /**< Source host pointer */ + CUdeviceptr_v1 srcDevice; /**< Source device pointer */ + CUarray srcArray; /**< Source array reference */ + unsigned int srcPitch; /**< Source pitch (ignored when src is array) */ + + unsigned int dstXInBytes; /**< Destination X in bytes */ + unsigned int dstY; /**< Destination Y */ + CUmemorytype dstMemoryType; /**< Destination memory type (host, device, array) */ + void *dstHost; /**< Destination host pointer */ + CUdeviceptr_v1 dstDevice; /**< Destination device pointer */ + CUarray dstArray; /**< Destination array reference */ + unsigned int dstPitch; /**< Destination pitch (ignored when dst is array) */ + + unsigned int WidthInBytes; /**< Width of 2D memory copy in bytes */ + unsigned int Height; /**< Height of 2D memory copy */ + } CUDA_MEMCPY2D_v1; + + typedef struct CUDA_MEMCPY3D_v1_st + { + unsigned int srcXInBytes; /**< Source X in bytes */ + unsigned int srcY; /**< Source Y */ + unsigned int srcZ; /**< Source Z */ + unsigned int srcLOD; /**< Source LOD */ + CUmemorytype srcMemoryType; /**< Source memory type (host, device, array) */ + const void *srcHost; /**< Source host pointer */ + CUdeviceptr_v1 srcDevice; /**< Source device pointer */ + CUarray srcArray; /**< Source array reference */ + void *reserved0; /**< Must be NULL */ + unsigned int srcPitch; /**< Source pitch (ignored when src is array) */ + unsigned int srcHeight; /**< Source height (ignored when src is array; may be 0 if Depth==1) */ + + unsigned int dstXInBytes; /**< Destination X in bytes */ + unsigned int dstY; /**< Destination Y */ + unsigned int dstZ; /**< Destination Z */ + unsigned int dstLOD; /**< Destination LOD */ + CUmemorytype dstMemoryType; /**< Destination memory type (host, device, array) */ + void *dstHost; /**< Destination host pointer */ + CUdeviceptr_v1 dstDevice; /**< Destination device pointer */ + CUarray dstArray; /**< Destination array reference */ + void *reserved1; /**< Must be NULL */ + unsigned int dstPitch; /**< Destination pitch (ignored when dst is array) */ + unsigned int dstHeight; /**< Destination height (ignored when dst is array; may be 0 if Depth==1) */ + + unsigned int WidthInBytes; /**< Width of 3D memory copy in bytes */ + unsigned int Height; /**< Height of 3D memory copy */ + unsigned int Depth; /**< Depth of 3D memory copy */ + } CUDA_MEMCPY3D_v1; + + typedef struct CUDA_ARRAY_DESCRIPTOR_v1_st + { + unsigned int Width; /**< Width of array */ + unsigned int Height; /**< Height of array */ + + CUarray_format Format; /**< Array format */ + unsigned int NumChannels; /**< Channels per array element */ + } CUDA_ARRAY_DESCRIPTOR_v1; + + typedef struct CUDA_ARRAY3D_DESCRIPTOR_v1_st + { + unsigned int Width; /**< Width of 3D array */ + unsigned int Height; /**< Height of 3D array */ + unsigned int Depth; /**< Depth of 3D array */ + + CUarray_format Format; /**< Array format */ + unsigned int NumChannels; /**< Channels per array element */ + unsigned int Flags; /**< Flags */ + } CUDA_ARRAY3D_DESCRIPTOR_v1; + + CUresult CUDAAPI cuDeviceTotalMem(unsigned int *bytes, CUdevice dev); + CUresult CUDAAPI cuCtxCreate(CUcontext *pctx, unsigned int flags, CUdevice dev); + CUresult CUDAAPI cuModuleGetGlobal(CUdeviceptr_v1 *dptr, unsigned int *bytes, CUmodule hmod, const char *name); + CUresult CUDAAPI cuMemGetInfo(unsigned int *free, unsigned int *total); + CUresult CUDAAPI cuMemAlloc(CUdeviceptr_v1 *dptr, unsigned int bytesize); + CUresult CUDAAPI cuMemAllocPitch(CUdeviceptr_v1 *dptr, unsigned int *pPitch, unsigned int WidthInBytes, unsigned int Height, unsigned int ElementSizeBytes); + CUresult CUDAAPI cuMemFree(CUdeviceptr_v1 dptr); + CUresult CUDAAPI cuMemGetAddressRange(CUdeviceptr_v1 *pbase, unsigned int *psize, CUdeviceptr_v1 dptr); + CUresult CUDAAPI cuMemAllocHost(void **pp, unsigned int bytesize); + CUresult CUDAAPI cuMemHostGetDevicePointer(CUdeviceptr_v1 *pdptr, void *p, unsigned int Flags); + CUresult CUDAAPI cuMemcpyHtoD(CUdeviceptr_v1 dstDevice, const void *srcHost, unsigned int ByteCount); + CUresult CUDAAPI cuMemcpyDtoH(void *dstHost, CUdeviceptr_v1 srcDevice, unsigned int ByteCount); + CUresult CUDAAPI cuMemcpyDtoD(CUdeviceptr_v1 dstDevice, CUdeviceptr_v1 srcDevice, unsigned int ByteCount); + CUresult CUDAAPI cuMemcpyDtoA(CUarray dstArray, unsigned int dstOffset, CUdeviceptr_v1 srcDevice, unsigned int ByteCount); + CUresult CUDAAPI cuMemcpyAtoD(CUdeviceptr_v1 dstDevice, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount); + CUresult CUDAAPI cuMemcpyHtoA(CUarray dstArray, unsigned int dstOffset, const void *srcHost, unsigned int ByteCount); + CUresult CUDAAPI cuMemcpyAtoH(void *dstHost, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount); + CUresult CUDAAPI cuMemcpyAtoA(CUarray dstArray, unsigned int dstOffset, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount); + CUresult CUDAAPI cuMemcpyHtoAAsync(CUarray dstArray, unsigned int dstOffset, const void *srcHost, unsigned int ByteCount, CUstream hStream); + CUresult CUDAAPI cuMemcpyAtoHAsync(void *dstHost, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount, CUstream hStream); + CUresult CUDAAPI cuMemcpy2D(const CUDA_MEMCPY2D_v1 *pCopy); + CUresult CUDAAPI cuMemcpy2DUnaligned(const CUDA_MEMCPY2D_v1 *pCopy); + CUresult CUDAAPI cuMemcpy3D(const CUDA_MEMCPY3D_v1 *pCopy); + CUresult CUDAAPI cuMemcpyHtoDAsync(CUdeviceptr_v1 dstDevice, const void *srcHost, unsigned int ByteCount, CUstream hStream); + CUresult CUDAAPI cuMemcpyDtoHAsync(void *dstHost, CUdeviceptr_v1 srcDevice, unsigned int ByteCount, CUstream hStream); + CUresult CUDAAPI cuMemcpyDtoDAsync(CUdeviceptr_v1 dstDevice, CUdeviceptr_v1 srcDevice, unsigned int ByteCount, CUstream hStream); + CUresult CUDAAPI cuMemcpy2DAsync(const CUDA_MEMCPY2D_v1 *pCopy, CUstream hStream); + CUresult CUDAAPI cuMemcpy3DAsync(const CUDA_MEMCPY3D_v1 *pCopy, CUstream hStream); + CUresult CUDAAPI cuMemsetD8(CUdeviceptr_v1 dstDevice, unsigned char uc, unsigned int N); + CUresult CUDAAPI cuMemsetD16(CUdeviceptr_v1 dstDevice, unsigned short us, unsigned int N); + CUresult CUDAAPI cuMemsetD32(CUdeviceptr_v1 dstDevice, unsigned int ui, unsigned int N); + CUresult CUDAAPI cuMemsetD2D8(CUdeviceptr_v1 dstDevice, unsigned int dstPitch, unsigned char uc, unsigned int Width, unsigned int Height); + CUresult CUDAAPI cuMemsetD2D16(CUdeviceptr_v1 dstDevice, unsigned int dstPitch, unsigned short us, unsigned int Width, unsigned int Height); + CUresult CUDAAPI cuMemsetD2D32(CUdeviceptr_v1 dstDevice, unsigned int dstPitch, unsigned int ui, unsigned int Width, unsigned int Height); + CUresult CUDAAPI cuArrayCreate(CUarray *pHandle, const CUDA_ARRAY_DESCRIPTOR_v1 *pAllocateArray); + CUresult CUDAAPI cuArrayGetDescriptor(CUDA_ARRAY_DESCRIPTOR_v1 *pArrayDescriptor, CUarray hArray); + CUresult CUDAAPI cuArray3DCreate(CUarray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR_v1 *pAllocateArray); + CUresult CUDAAPI cuArray3DGetDescriptor(CUDA_ARRAY3D_DESCRIPTOR_v1 *pArrayDescriptor, CUarray hArray); + CUresult CUDAAPI cuTexRefSetAddress(unsigned int *ByteOffset, CUtexref hTexRef, CUdeviceptr_v1 dptr, unsigned int bytes); + CUresult CUDAAPI cuTexRefSetAddress2D(CUtexref hTexRef, const CUDA_ARRAY_DESCRIPTOR_v1 *desc, CUdeviceptr_v1 dptr, unsigned int Pitch); + CUresult CUDAAPI cuTexRefGetAddress(CUdeviceptr_v1 *pdptr, CUtexref hTexRef); + CUresult CUDAAPI cuGraphicsResourceGetMappedPointer(CUdeviceptr_v1 *pDevPtr, unsigned int *pSize, CUgraphicsResource resource); + + CUresult CUDAAPI cuCtxDestroy(CUcontext ctx); + CUresult CUDAAPI cuCtxPopCurrent(CUcontext *pctx); + CUresult CUDAAPI cuCtxPushCurrent(CUcontext ctx); + CUresult CUDAAPI cuStreamDestroy(CUstream hStream); + CUresult CUDAAPI cuEventDestroy(CUevent hEvent); + CUresult CUDAAPI cuDevicePrimaryCtxRelease(CUdevice dev); + CUresult CUDAAPI cuDevicePrimaryCtxReset(CUdevice dev); + CUresult CUDAAPI cuDevicePrimaryCtxSetFlags(CUdevice dev, unsigned int flags); + + CUresult CUDAAPI cuMemcpyHtoD_v2(CUdeviceptr dstDevice, const void *srcHost, size_t ByteCount); + CUresult CUDAAPI cuMemcpyDtoH_v2(void *dstHost, CUdeviceptr srcDevice, size_t ByteCount); + CUresult CUDAAPI cuMemcpyDtoD_v2(CUdeviceptr dstDevice, CUdeviceptr srcDevice, size_t ByteCount); + CUresult CUDAAPI cuMemcpyDtoA_v2(CUarray dstArray, size_t dstOffset, CUdeviceptr srcDevice, size_t ByteCount); + CUresult CUDAAPI cuMemcpyAtoD_v2(CUdeviceptr dstDevice, CUarray srcArray, size_t srcOffset, size_t ByteCount); + CUresult CUDAAPI cuMemcpyHtoA_v2(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount); + CUresult CUDAAPI cuMemcpyAtoH_v2(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount); + CUresult CUDAAPI cuMemcpyAtoA_v2(CUarray dstArray, size_t dstOffset, CUarray srcArray, size_t srcOffset, size_t ByteCount); + CUresult CUDAAPI cuMemcpyHtoAAsync_v2(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount, CUstream hStream); + CUresult CUDAAPI cuMemcpyAtoHAsync_v2(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount, CUstream hStream); + CUresult CUDAAPI cuMemcpy2D_v2(const CUDA_MEMCPY2D *pCopy); + CUresult CUDAAPI cuMemcpy2DUnaligned_v2(const CUDA_MEMCPY2D *pCopy); + CUresult CUDAAPI cuMemcpy3D_v2(const CUDA_MEMCPY3D *pCopy); + CUresult CUDAAPI cuMemcpyHtoDAsync_v2(CUdeviceptr dstDevice, const void *srcHost, size_t ByteCount, CUstream hStream); + CUresult CUDAAPI cuMemcpyDtoHAsync_v2(void *dstHost, CUdeviceptr srcDevice, size_t ByteCount, CUstream hStream); + CUresult CUDAAPI cuMemcpyDtoDAsync_v2(CUdeviceptr dstDevice, CUdeviceptr srcDevice, size_t ByteCount, CUstream hStream); + CUresult CUDAAPI cuMemcpy2DAsync_v2(const CUDA_MEMCPY2D *pCopy, CUstream hStream); + CUresult CUDAAPI cuMemcpy3DAsync_v2(const CUDA_MEMCPY3D *pCopy, CUstream hStream); + CUresult CUDAAPI cuMemsetD8_v2(CUdeviceptr dstDevice, unsigned char uc, size_t N); + CUresult CUDAAPI cuMemsetD16_v2(CUdeviceptr dstDevice, unsigned short us, size_t N); + CUresult CUDAAPI cuMemsetD32_v2(CUdeviceptr dstDevice, unsigned int ui, size_t N); + CUresult CUDAAPI cuMemsetD2D8_v2(CUdeviceptr dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height); + CUresult CUDAAPI cuMemsetD2D16_v2(CUdeviceptr dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height); + CUresult CUDAAPI cuMemsetD2D32_v2(CUdeviceptr dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height); + CUresult CUDAAPI cuMemcpy(CUdeviceptr dst, CUdeviceptr src, size_t ByteCount); + CUresult CUDAAPI cuMemcpyAsync(CUdeviceptr dst, CUdeviceptr src, size_t ByteCount, CUstream hStream); + CUresult CUDAAPI cuMemcpyPeer(CUdeviceptr dstDevice, CUcontext dstContext, CUdeviceptr srcDevice, CUcontext srcContext, size_t ByteCount); + CUresult CUDAAPI cuMemcpyPeerAsync(CUdeviceptr dstDevice, CUcontext dstContext, CUdeviceptr srcDevice, CUcontext srcContext, size_t ByteCount, CUstream hStream); + CUresult CUDAAPI cuMemcpy3DPeer(const CUDA_MEMCPY3D_PEER *pCopy); + CUresult CUDAAPI cuMemcpy3DPeerAsync(const CUDA_MEMCPY3D_PEER *pCopy, CUstream hStream); + + CUresult CUDAAPI cuMemsetD8Async(CUdeviceptr dstDevice, unsigned char uc, size_t N, CUstream hStream); + CUresult CUDAAPI cuMemsetD16Async(CUdeviceptr dstDevice, unsigned short us, size_t N, CUstream hStream); + CUresult CUDAAPI cuMemsetD32Async(CUdeviceptr dstDevice, unsigned int ui, size_t N, CUstream hStream); + CUresult CUDAAPI cuMemsetD2D8Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height, CUstream hStream); + CUresult CUDAAPI cuMemsetD2D16Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height, CUstream hStream); + CUresult CUDAAPI cuMemsetD2D32Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height, CUstream hStream); + + CUresult CUDAAPI cuStreamGetPriority(CUstream hStream, int *priority); + CUresult CUDAAPI cuStreamGetId(CUstream hStream, unsigned long long *streamId); + CUresult CUDAAPI cuStreamGetFlags(CUstream hStream, unsigned int *flags); + CUresult CUDAAPI cuStreamGetCtx(CUstream hStream, CUcontext *pctx); + CUresult CUDAAPI cuStreamWaitEvent(CUstream hStream, CUevent hEvent, unsigned int Flags); + CUresult CUDAAPI cuStreamAddCallback(CUstream hStream, CUstreamCallback callback, void *userData, unsigned int flags); + CUresult CUDAAPI cuStreamAttachMemAsync(CUstream hStream, CUdeviceptr dptr, size_t length, unsigned int flags); + CUresult CUDAAPI cuStreamQuery(CUstream hStream); + CUresult CUDAAPI cuStreamSynchronize(CUstream hStream); + CUresult CUDAAPI cuEventRecord(CUevent hEvent, CUstream hStream); + CUresult CUDAAPI cuEventRecordWithFlags(CUevent hEvent, CUstream hStream, unsigned int flags); + CUresult CUDAAPI cuLaunchKernel(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void **kernelParams, void **extra); + CUresult CUDAAPI cuLaunchKernelEx(const CUlaunchConfig *config, CUfunction f, void **kernelParams, void **extra); + CUresult CUDAAPI cuLaunchHostFunc(CUstream hStream, CUhostFn fn, void *userData); + CUresult CUDAAPI cuGraphicsMapResources(unsigned int count, CUgraphicsResource *resources, CUstream hStream); + CUresult CUDAAPI cuGraphicsUnmapResources(unsigned int count, CUgraphicsResource *resources, CUstream hStream); + CUresult CUDAAPI cuStreamWriteValue32(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags); + CUresult CUDAAPI cuStreamWaitValue32(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags); + CUresult CUDAAPI cuStreamWriteValue64(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags); + CUresult CUDAAPI cuStreamWaitValue64(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags); + CUresult CUDAAPI cuStreamBatchMemOp(CUstream stream, unsigned int count, CUstreamBatchMemOpParams *paramArray, unsigned int flags); + + CUresult CUDAAPI cuStreamWriteValue32_ptsz(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags); + CUresult CUDAAPI cuStreamWaitValue32_ptsz(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags); + CUresult CUDAAPI cuStreamWriteValue64_ptsz(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags); + CUresult CUDAAPI cuStreamWaitValue64_ptsz(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags); + CUresult CUDAAPI cuStreamBatchMemOp_ptsz(CUstream stream, unsigned int count, CUstreamBatchMemOpParams *paramArray, unsigned int flags); + + CUresult CUDAAPI cuStreamWriteValue32_v2(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags); + CUresult CUDAAPI cuStreamWaitValue32_v2(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags); + CUresult CUDAAPI cuStreamWriteValue64_v2(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags); + CUresult CUDAAPI cuStreamWaitValue64_v2(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags); + CUresult CUDAAPI cuStreamBatchMemOp_v2(CUstream stream, unsigned int count, CUstreamBatchMemOpParams *paramArray, unsigned int flags); + CUresult CUDAAPI cuMemPrefetchAsync(CUdeviceptr devPtr, size_t count, CUdevice dstDevice, CUstream hStream); + CUresult CUDAAPI cuLaunchCooperativeKernel(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void **kernelParams); + CUresult CUDAAPI cuSignalExternalSemaphoresAsync(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS *paramsArray, unsigned int numExtSems, CUstream stream); + CUresult CUDAAPI cuWaitExternalSemaphoresAsync(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS *paramsArray, unsigned int numExtSems, CUstream stream); + CUresult CUDAAPI cuStreamBeginCapture(CUstream hStream); + CUresult CUDAAPI cuStreamBeginCapture_ptsz(CUstream hStream); + CUresult CUDAAPI cuStreamBeginCapture_v2(CUstream hStream, CUstreamCaptureMode mode); + CUresult CUDAAPI cuStreamEndCapture(CUstream hStream, CUgraph *phGraph); + CUresult CUDAAPI cuStreamIsCapturing(CUstream hStream, CUstreamCaptureStatus *captureStatus); + CUresult CUDAAPI cuStreamGetCaptureInfo(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, cuuint64_t *id_out); + CUresult CUDAAPI cuStreamGetCaptureInfo_ptsz(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, cuuint64_t *id_out); + CUresult CUDAAPI cuStreamGetCaptureInfo_v2(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, cuuint64_t *id_out, CUgraph *graph_out, const CUgraphNode **dependencies_out, size_t *numDependencies_out); + CUresult CUDAAPI cuGraphAddKernelNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams); + CUresult CUDAAPI cuGraphKernelNodeGetParams(CUgraphNode hNode, CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams); + CUresult CUDAAPI cuGraphKernelNodeSetParams(CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams); + CUresult CUDAAPI cuGraphExecKernelNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams); + CUresult CUDAAPI cuGraphInstantiateWithParams(CUgraphExec *phGraphExec, CUgraph hGraph, CUDA_GRAPH_INSTANTIATE_PARAMS *instantiateParams); + CUresult CUDAAPI cuGraphExecUpdate(CUgraphExec hGraphExec, CUgraph hGraph, CUgraphNode *hErrorNode_out, CUgraphExecUpdateResult *updateResult_out); + CUresult CUDAAPI cuGraphUpload(CUgraphExec hGraph, CUstream hStream); + CUresult CUDAAPI cuGraphLaunch(CUgraphExec hGraph, CUstream hStream); + CUresult CUDAAPI cuStreamCopyAttributes(CUstream dstStream, CUstream srcStream); + CUresult CUDAAPI cuStreamGetAttribute(CUstream hStream, CUstreamAttrID attr, CUstreamAttrValue *value); + CUresult CUDAAPI cuStreamSetAttribute(CUstream hStream, CUstreamAttrID attr, const CUstreamAttrValue *param); + + CUresult CUDAAPI cuIpcOpenMemHandle(CUdeviceptr *pdptr, CUipcMemHandle handle, unsigned int Flags); + CUresult CUDAAPI cuGraphInstantiate(CUgraphExec *phGraphExec, CUgraph hGraph, CUgraphNode *phErrorNode, char *logBuffer, size_t bufferSize); + CUresult CUDAAPI cuGraphInstantiate_v2(CUgraphExec *phGraphExec, CUgraph hGraph, CUgraphNode *phErrorNode, char *logBuffer, size_t bufferSize); + + CUresult CUDAAPI cuMemMapArrayAsync(CUarrayMapInfo *mapInfoList, unsigned int count, CUstream hStream); + + CUresult CUDAAPI cuMemFreeAsync(CUdeviceptr dptr, CUstream hStream); + CUresult CUDAAPI cuMemAllocAsync(CUdeviceptr *dptr, size_t bytesize, CUstream hStream); + CUresult CUDAAPI cuMemAllocFromPoolAsync(CUdeviceptr *dptr, size_t bytesize, CUmemoryPool pool, CUstream hStream); + + CUresult CUDAAPI cuStreamUpdateCaptureDependencies(CUstream hStream, CUgraphNode *dependencies, size_t numDependencies, unsigned int flags); + CUresult CUDAAPI cuGetProcAddress(const char *symbol, void **pfn, int cudaVersion, cuuint64_t flags); + +#elif defined(__CUDA_API_PER_THREAD_DEFAULT_STREAM) +static inline CUresult cuGetProcAddress_v2_ptsz(const char *symbol, void **funcPtr, int driverVersion, cuuint64_t flags, CUdriverProcAddressQueryResult *symbolStatus) { + const int procAddressMask = (CU_GET_PROC_ADDRESS_LEGACY_STREAM| + CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM); + if ((flags & procAddressMask) == 0) { + flags |= CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM; + } + return cuGetProcAddress_v2(symbol, funcPtr, driverVersion, flags, symbolStatus); +} +#define cuGetProcAddress_v2 cuGetProcAddress_v2_ptsz +#endif + +#ifdef __cplusplus +} +#endif + +#if defined(__GNUC__) + #if defined(__CUDA_API_PUSH_VISIBILITY_DEFAULT) + #pragma GCC visibility pop + #endif +#endif + +#undef __CUDA_DEPRECATED + +#endif /* __cuda_cuda_h__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaEGL.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaEGL.h new file mode 100644 index 0000000000000000000000000000000000000000..e304ae904c42dc2f05fa403b15ec02c0bb6e376c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaEGL.h @@ -0,0 +1,659 @@ +/* + * Copyright 2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef CUDAEGL_H +#define CUDAEGL_H + +#include "cuda.h" +#include "EGL/egl.h" +#include "EGL/eglext.h" + + +#ifdef CUDA_FORCE_API_VERSION +#error "CUDA_FORCE_API_VERSION is no longer supported." +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \addtogroup CUDA_TYPES + * @{ + */ + +/** + * Maximum number of planes per frame + */ +#define MAX_PLANES 3 + +/** + * CUDA EglFrame type - array or pointer + */ +typedef enum CUeglFrameType_enum { + CU_EGL_FRAME_TYPE_ARRAY = 0, /**< Frame type CUDA array */ + CU_EGL_FRAME_TYPE_PITCH = 1, /**< Frame type pointer */ +} CUeglFrameType; + +/** + * Indicates that timeout for ::cuEGLStreamConsumerAcquireFrame is infinite. + */ +#define CUDA_EGL_INFINITE_TIMEOUT 0xFFFFFFFF + +/** + * Resource location flags- sysmem or vidmem + * + * For CUDA context on iGPU, since video and system memory are equivalent - + * these flags will not have an effect on the execution. + * + * For CUDA context on dGPU, applications can use the flag ::CUeglResourceLocationFlags + * to give a hint about the desired location. + * + * ::CU_EGL_RESOURCE_LOCATION_SYSMEM - the frame data is made resident on the system memory + * to be accessed by CUDA. + * + * ::CU_EGL_RESOURCE_LOCATION_VIDMEM - the frame data is made resident on the dedicated + * video memory to be accessed by CUDA. + * + * There may be an additional latency due to new allocation and data migration, + * if the frame is produced on a different memory. + + */ +typedef enum CUeglResourceLocationFlags_enum { + CU_EGL_RESOURCE_LOCATION_SYSMEM = 0x00, /**< Resource location sysmem */ + CU_EGL_RESOURCE_LOCATION_VIDMEM = 0x01 /**< Resource location vidmem */ +} CUeglResourceLocationFlags; + +/** + * CUDA EGL Color Format - The different planar and multiplanar formats currently supported for CUDA_EGL interops. + * Three channel formats are currently not supported for ::CU_EGL_FRAME_TYPE_ARRAY + */ +typedef enum CUeglColorFormat_enum { + CU_EGL_COLOR_FORMAT_YUV420_PLANAR = 0x00, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR = 0x01, /**< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV420Planar. */ + CU_EGL_COLOR_FORMAT_YUV422_PLANAR = 0x02, /**< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR = 0x03, /**< Y, UV in two surfaces with VU byte ordering, width, height ratio same as YUV422Planar. */ + CU_EGL_COLOR_FORMAT_RGB = 0x04, /**< R/G/B three channels in one surface with BGR byte ordering. Only pitch linear format supported. */ + CU_EGL_COLOR_FORMAT_BGR = 0x05, /**< R/G/B three channels in one surface with RGB byte ordering. Only pitch linear format supported. */ + CU_EGL_COLOR_FORMAT_ARGB = 0x06, /**< R/G/B/A four channels in one surface with BGRA byte ordering. */ + CU_EGL_COLOR_FORMAT_RGBA = 0x07, /**< R/G/B/A four channels in one surface with ABGR byte ordering. */ + CU_EGL_COLOR_FORMAT_L = 0x08, /**< single luminance channel in one surface. */ + CU_EGL_COLOR_FORMAT_R = 0x09, /**< single color channel in one surface. */ + CU_EGL_COLOR_FORMAT_YUV444_PLANAR = 0x0A, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR = 0x0B, /**< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV444Planar. */ + CU_EGL_COLOR_FORMAT_YUYV_422 = 0x0C, /**< Y, U, V in one surface, interleaved as UYVY in one channel. */ + CU_EGL_COLOR_FORMAT_UYVY_422 = 0x0D, /**< Y, U, V in one surface, interleaved as YUYV in one channel. */ + CU_EGL_COLOR_FORMAT_ABGR = 0x0E, /**< R/G/B/A four channels in one surface with RGBA byte ordering. */ + CU_EGL_COLOR_FORMAT_BGRA = 0x0F, /**< R/G/B/A four channels in one surface with ARGB byte ordering. */ + CU_EGL_COLOR_FORMAT_A = 0x10, /**< Alpha color format - one channel in one surface. */ + CU_EGL_COLOR_FORMAT_RG = 0x11, /**< R/G color format - two channels in one surface with GR byte ordering */ + CU_EGL_COLOR_FORMAT_AYUV = 0x12, /**< Y, U, V, A four channels in one surface, interleaved as VUYA. */ + CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR = 0x13, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR = 0x14, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR = 0x15, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR = 0x16, /**< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR = 0x17, /**< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR = 0x18, /**< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR = 0x19, /**< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_VYUY_ER = 0x1A, /**< Extended Range Y, U, V in one surface, interleaved as YVYU in one channel. */ + CU_EGL_COLOR_FORMAT_UYVY_ER = 0x1B, /**< Extended Range Y, U, V in one surface, interleaved as YUYV in one channel. */ + CU_EGL_COLOR_FORMAT_YUYV_ER = 0x1C, /**< Extended Range Y, U, V in one surface, interleaved as UYVY in one channel. */ + CU_EGL_COLOR_FORMAT_YVYU_ER = 0x1D, /**< Extended Range Y, U, V in one surface, interleaved as VYUY in one channel. */ + CU_EGL_COLOR_FORMAT_YUV_ER = 0x1E, /**< Extended Range Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported. */ + CU_EGL_COLOR_FORMAT_YUVA_ER = 0x1F, /**< Extended Range Y, U, V, A four channels in one surface, interleaved as AVUY. */ + CU_EGL_COLOR_FORMAT_AYUV_ER = 0x20, /**< Extended Range Y, U, V, A four channels in one surface, interleaved as VUYA. */ + CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER = 0x21, /**< Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER = 0x22, /**< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER = 0x23, /**< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER = 0x24, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER = 0x25, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER = 0x26, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER = 0x27, /**< Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER = 0x28, /**< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER = 0x29, /**< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER = 0x2A, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER = 0x2B, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER = 0x2C, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_BAYER_RGGB = 0x2D, /**< Bayer format - one channel in one surface with interleaved RGGB ordering. */ + CU_EGL_COLOR_FORMAT_BAYER_BGGR = 0x2E, /**< Bayer format - one channel in one surface with interleaved BGGR ordering. */ + CU_EGL_COLOR_FORMAT_BAYER_GRBG = 0x2F, /**< Bayer format - one channel in one surface with interleaved GRBG ordering. */ + CU_EGL_COLOR_FORMAT_BAYER_GBRG = 0x30, /**< Bayer format - one channel in one surface with interleaved GBRG ordering. */ + CU_EGL_COLOR_FORMAT_BAYER10_RGGB = 0x31, /**< Bayer10 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 10 bits used 6 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER10_BGGR = 0x32, /**< Bayer10 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 10 bits used 6 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER10_GRBG = 0x33, /**< Bayer10 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 10 bits used 6 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER10_GBRG = 0x34, /**< Bayer10 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 10 bits used 6 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER12_RGGB = 0x35, /**< Bayer12 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER12_BGGR = 0x36, /**< Bayer12 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER12_GRBG = 0x37, /**< Bayer12 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER12_GBRG = 0x38, /**< Bayer12 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER14_RGGB = 0x39, /**< Bayer14 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 14 bits used 2 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER14_BGGR = 0x3A, /**< Bayer14 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 14 bits used 2 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER14_GRBG = 0x3B, /**< Bayer14 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 14 bits used 2 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER14_GBRG = 0x3C, /**< Bayer14 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 14 bits used 2 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER20_RGGB = 0x3D, /**< Bayer20 format - one channel in one surface with interleaved RGGB ordering. Out of 32 bits, 20 bits used 12 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER20_BGGR = 0x3E, /**< Bayer20 format - one channel in one surface with interleaved BGGR ordering. Out of 32 bits, 20 bits used 12 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER20_GRBG = 0x3F, /**< Bayer20 format - one channel in one surface with interleaved GRBG ordering. Out of 32 bits, 20 bits used 12 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER20_GBRG = 0x40, /**< Bayer20 format - one channel in one surface with interleaved GBRG ordering. Out of 32 bits, 20 bits used 12 bits No-op. */ + CU_EGL_COLOR_FORMAT_YVU444_PLANAR = 0x41, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YVU422_PLANAR = 0x42, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_YVU420_PLANAR = 0x43, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB = 0x44, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved RGGB ordering and mapped to opaque integer datatype. */ + CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR = 0x45, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved BGGR ordering and mapped to opaque integer datatype. */ + CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG = 0x46, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GRBG ordering and mapped to opaque integer datatype. */ + CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG = 0x47, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GBRG ordering and mapped to opaque integer datatype. */ + CU_EGL_COLOR_FORMAT_BAYER_BCCR = 0x48, /**< Bayer format - one channel in one surface with interleaved BCCR ordering. */ + CU_EGL_COLOR_FORMAT_BAYER_RCCB = 0x49, /**< Bayer format - one channel in one surface with interleaved RCCB ordering. */ + CU_EGL_COLOR_FORMAT_BAYER_CRBC = 0x4A, /**< Bayer format - one channel in one surface with interleaved CRBC ordering. */ + CU_EGL_COLOR_FORMAT_BAYER_CBRC = 0x4B, /**< Bayer format - one channel in one surface with interleaved CBRC ordering. */ + CU_EGL_COLOR_FORMAT_BAYER10_CCCC = 0x4C, /**< Bayer10 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 10 bits used 6 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER12_BCCR = 0x4D, /**< Bayer12 format - one channel in one surface with interleaved BCCR ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER12_RCCB = 0x4E, /**< Bayer12 format - one channel in one surface with interleaved RCCB ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER12_CRBC = 0x4F, /**< Bayer12 format - one channel in one surface with interleaved CRBC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER12_CBRC = 0x50, /**< Bayer12 format - one channel in one surface with interleaved CBRC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + CU_EGL_COLOR_FORMAT_BAYER12_CCCC = 0x51, /**< Bayer12 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + CU_EGL_COLOR_FORMAT_Y = 0x52, /**< Color format for single Y plane. */ + CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020 = 0x53, /**< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020 = 0x54, /**< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020 = 0x55, /**< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height= 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020 = 0x56, /**< Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height += 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709 = 0x57, /**< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709 = 0x58, /**< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709 = 0x59, /**< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height += 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709 = 0x5A, /**< Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709 = 0x5B, /**< Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020 = 0x5C, /**< Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020 = 0x5D, /**< Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR = 0x5E, /**< Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709 = 0x5F, /**< Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_Y_ER = 0x60, /**< Extended Range Color format for single Y plane. */ + CU_EGL_COLOR_FORMAT_Y_709_ER = 0x61, /**< Extended Range Color format for single Y plane. */ + CU_EGL_COLOR_FORMAT_Y10_ER = 0x62, /**< Extended Range Color format for single Y10 plane. */ + CU_EGL_COLOR_FORMAT_Y10_709_ER = 0x63, /**< Extended Range Color format for single Y10 plane. */ + CU_EGL_COLOR_FORMAT_Y12_ER = 0x64, /**< Extended Range Color format for single Y12 plane. */ + CU_EGL_COLOR_FORMAT_Y12_709_ER = 0x65, /**< Extended Range Color format for single Y12 plane. */ + CU_EGL_COLOR_FORMAT_YUVA = 0x66, /**< Y, U, V, A four channels in one surface, interleaved as AVUY. */ + CU_EGL_COLOR_FORMAT_YUV = 0x67, /**< Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported. */ + CU_EGL_COLOR_FORMAT_YVYU = 0x68, /**< Y, U, V in one surface, interleaved as YVYU in one channel. */ + CU_EGL_COLOR_FORMAT_VYUY = 0x69, /**< Y, U, V in one surface, interleaved as VYUY in one channel. */ + CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER = 0x6A, /**< Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER = 0x6B, /**< Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER = 0x6C, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER = 0x6D, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER = 0x6E, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER = 0x6F, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER = 0x70, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER = 0x71, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */ + CU_EGL_COLOR_FORMAT_MAX +} CUeglColorFormat; + +/** + * CUDA EGLFrame structure Descriptor - structure defining one frame of EGL. + * + * Each frame may contain one or more planes depending on whether the surface * is Multiplanar or not. + */ +typedef struct CUeglFrame_st { + union { + CUarray pArray[MAX_PLANES]; /**< Array of CUarray corresponding to each plane*/ + void* pPitch[MAX_PLANES]; /**< Array of Pointers corresponding to each plane*/ + } frame; + unsigned int width; /**< Width of first plane */ + unsigned int height; /**< Height of first plane */ + unsigned int depth; /**< Depth of first plane */ + unsigned int pitch; /**< Pitch of first plane */ + unsigned int planeCount; /**< Number of planes */ + unsigned int numChannels; /**< Number of channels for the plane */ + CUeglFrameType frameType; /**< Array or Pitch */ + CUeglColorFormat eglColorFormat; /**< CUDA EGL Color Format*/ + CUarray_format cuFormat; /**< CUDA Array Format*/ +} CUeglFrame_v1; +typedef CUeglFrame_v1 CUeglFrame; + +/** + * CUDA EGLSream Connection + */ +typedef struct CUeglStreamConnection_st* CUeglStreamConnection; + +/** @} */ /* END CUDA_TYPES */ + +/** + * \file cudaEGL.h + * \brief Header file for the EGL interoperability functions of the + * low-level CUDA driver application programming interface. + */ + +/** + * \defgroup CUDA_EGL EGL Interoperability + * \ingroup CUDA_DRIVER + * + * ___MANBRIEF___ EGL interoperability functions of the low-level CUDA + * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the EGL interoperability functions of the + * low-level CUDA driver application programming interface. + * + * @{ + */ + +/** + * \brief Registers an EGL image + * + * Registers the EGLImageKHR specified by \p image for access by + * CUDA. A handle to the registered object is returned as \p pCudaResource. + * Additional Mapping/Unmapping is not required for the registered resource and + * ::cuGraphicsResourceGetMappedEglFrame can be directly called on the \p pCudaResource. + * + * The application will be responsible for synchronizing access to shared objects. + * The application must ensure that any pending operation which access the objects have completed + * before passing control to CUDA. This may be accomplished by issuing and waiting for + * glFinish command on all GLcontexts (for OpenGL and likewise for other APIs). + * The application will be also responsible for ensuring that any pending operation on the + * registered CUDA resource has completed prior to executing subsequent commands in other APIs + * accesing the same memory objects. + * This can be accomplished by calling cuCtxSynchronize or cuEventSynchronize (preferably). + * + * The surface's intended usage is specified using \p flags, as follows: + * + * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this + * resource will be used. It is therefore assumed that this resource will be + * read from and written to by CUDA. This is the default value. + * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY: Specifies that CUDA + * will not write to this resource. + * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD: Specifies that + * CUDA will not read from this resource and will write over the + * entire contents of the resource, so none of the data previously + * stored in the resource will be preserved. + * + * The EGLImageKHR is an object which can be used to create EGLImage target resource. It is defined as a void pointer. + * typedef void* EGLImageKHR + * + * \param pCudaResource - Pointer to the returned object handle + * \param image - An EGLImageKHR image which can be used to create target resource. + * \param flags - Map flags + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_ALREADY_MAPPED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * + * \sa ::cuGraphicsEGLRegisterImage, ::cuGraphicsUnregisterResource, + * ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources, + * ::cuGraphicsUnmapResources, + * ::cudaGraphicsEGLRegisterImage + */ +CUresult CUDAAPI cuGraphicsEGLRegisterImage(CUgraphicsResource *pCudaResource, EGLImageKHR image, unsigned int flags); + +/** + * \brief Connect CUDA to EGLStream as a consumer. + * + * Connect CUDA as a consumer to EGLStreamKHR specified by \p stream. + * + * The EGLStreamKHR is an EGL object that transfers a sequence of image frames from one + * API to another. + * + * \param conn - Pointer to the returned connection handle + * \param stream - EGLStreamKHR handle + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_CONTEXT, + * + * \sa ::cuEGLStreamConsumerConnect, ::cuEGLStreamConsumerDisconnect, + * ::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame, + * ::cudaEGLStreamConsumerConnect + */ +CUresult CUDAAPI cuEGLStreamConsumerConnect(CUeglStreamConnection *conn, EGLStreamKHR stream); + +/** + * \brief Connect CUDA to EGLStream as a consumer with given flags. + * + * Connect CUDA as a consumer to EGLStreamKHR specified by \p stream with specified \p flags defined by CUeglResourceLocationFlags. + * + * The flags specify whether the consumer wants to access frames from system memory or video memory. + * Default is ::CU_EGL_RESOURCE_LOCATION_VIDMEM. + * + * \param conn - Pointer to the returned connection handle + * \param stream - EGLStreamKHR handle + * \param flags - Flags denote intended location - system or video. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_CONTEXT, + * + * \sa ::cuEGLStreamConsumerConnect, ::cuEGLStreamConsumerDisconnect, + * ::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame, + * ::cudaEGLStreamConsumerConnectWithFlags + */ + +CUresult CUDAAPI cuEGLStreamConsumerConnectWithFlags(CUeglStreamConnection *conn, EGLStreamKHR stream, unsigned int flags); + +/** + * \brief Disconnect CUDA as a consumer to EGLStream . + * + * Disconnect CUDA as a consumer to EGLStreamKHR. + * + * \param conn - Conection to disconnect. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_CONTEXT, + * + * \sa ::cuEGLStreamConsumerConnect, ::cuEGLStreamConsumerDisconnect, + * ::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame, + * ::cudaEGLStreamConsumerDisconnect + */ +CUresult CUDAAPI cuEGLStreamConsumerDisconnect(CUeglStreamConnection *conn); + +/** + * \brief Acquire an image frame from the EGLStream with CUDA as a consumer. + * + * Acquire an image frame from EGLStreamKHR. This API can also acquire an old frame presented + * by the producer unless explicitly disabled by setting EGL_SUPPORT_REUSE_NV flag to EGL_FALSE + * during stream initialization. By default, EGLStream is created with this flag set to EGL_TRUE. + * ::cuGraphicsResourceGetMappedEglFrame can be called on \p pCudaResource to get + * ::CUeglFrame. + * + * \param conn - Connection on which to acquire + * \param pCudaResource - CUDA resource on which the stream frame will be mapped for use. + * \param pStream - CUDA stream for synchronization and any data migrations + * implied by ::CUeglResourceLocationFlags. + * \param timeout - Desired timeout in usec for a new frame to be acquired. + * If set as ::CUDA_EGL_INFINITE_TIMEOUT, acquire waits infinitely. + * After timeout occurs CUDA consumer tries to acquire an old frame + * if available and EGL_SUPPORT_REUSE_NV flag is set. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_LAUNCH_TIMEOUT, + * + * \sa ::cuEGLStreamConsumerConnect, ::cuEGLStreamConsumerDisconnect, + * ::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame, + * ::cudaEGLStreamConsumerAcquireFrame + */ +CUresult CUDAAPI cuEGLStreamConsumerAcquireFrame(CUeglStreamConnection *conn, + CUgraphicsResource *pCudaResource, CUstream *pStream, unsigned int timeout); +/** + * \brief Releases the last frame acquired from the EGLStream. + * + * Release the acquired image frame specified by \p pCudaResource to EGLStreamKHR. + * If EGL_SUPPORT_REUSE_NV flag is set to EGL_TRUE, at the time of EGL creation + * this API doesn't release the last frame acquired on the EGLStream. + * By default, EGLStream is created with this flag set to EGL_TRUE. + * + * \param conn - Connection on which to release + * \param pCudaResource - CUDA resource whose corresponding frame is to be released + * \param pStream - CUDA stream on which release will be done. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * + * \sa ::cuEGLStreamConsumerConnect, ::cuEGLStreamConsumerDisconnect, + * ::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame, + * ::cudaEGLStreamConsumerReleaseFrame + */ +CUresult CUDAAPI cuEGLStreamConsumerReleaseFrame(CUeglStreamConnection *conn, + CUgraphicsResource pCudaResource, CUstream *pStream); + +/** + * \brief Connect CUDA to EGLStream as a producer. + * + * Connect CUDA as a producer to EGLStreamKHR specified by \p stream. + * + * The EGLStreamKHR is an EGL object that transfers a sequence of image frames from one + * API to another. + * + * \param conn - Pointer to the returned connection handle + * \param stream - EGLStreamKHR handle + * \param width - width of the image to be submitted to the stream + * \param height - height of the image to be submitted to the stream + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_CONTEXT, + * + * \sa ::cuEGLStreamProducerConnect, ::cuEGLStreamProducerDisconnect, + * ::cuEGLStreamProducerPresentFrame, + * ::cudaEGLStreamProducerConnect + */ +CUresult CUDAAPI cuEGLStreamProducerConnect(CUeglStreamConnection *conn, EGLStreamKHR stream, + EGLint width, EGLint height); + +/** + * \brief Disconnect CUDA as a producer to EGLStream . + * + * Disconnect CUDA as a producer to EGLStreamKHR. + * + * \param conn - Conection to disconnect. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_CONTEXT, + * + * \sa ::cuEGLStreamProducerConnect, ::cuEGLStreamProducerDisconnect, + * ::cuEGLStreamProducerPresentFrame, + * ::cudaEGLStreamProducerDisconnect + */ +CUresult CUDAAPI cuEGLStreamProducerDisconnect(CUeglStreamConnection *conn); + +/** + * \brief Present a CUDA eglFrame to the EGLStream with CUDA as a producer. + * + * When a frame is presented by the producer, it gets associated with the EGLStream + * and thus it is illegal to free the frame before the producer is disconnected. + * If a frame is freed and reused it may lead to undefined behavior. + * + * If producer and consumer are on different GPUs (iGPU and dGPU) then frametype + * ::CU_EGL_FRAME_TYPE_ARRAY is not supported. ::CU_EGL_FRAME_TYPE_PITCH can be used for + * such cross-device applications. + * + * The ::CUeglFrame is defined as: + * \code + * typedef struct CUeglFrame_st { + * union { + * CUarray pArray[MAX_PLANES]; + * void* pPitch[MAX_PLANES]; + * } frame; + * unsigned int width; + * unsigned int height; + * unsigned int depth; + * unsigned int pitch; + * unsigned int planeCount; + * unsigned int numChannels; + * CUeglFrameType frameType; + * CUeglColorFormat eglColorFormat; + * CUarray_format cuFormat; + * } CUeglFrame; + * \endcode + * + * For ::CUeglFrame of type ::CU_EGL_FRAME_TYPE_PITCH, the application may present sub-region of a memory + * allocation. In that case, the pitched pointer will specify the start address of the sub-region in + * the allocation and corresponding ::CUeglFrame fields will specify the dimensions of the sub-region. + * + * \param conn - Connection on which to present the CUDA array + * \param eglframe - CUDA Eglstream Proucer Frame handle to be sent to the consumer over EglStream. + * \param pStream - CUDA stream on which to present the frame. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * + * \sa ::cuEGLStreamProducerConnect, ::cuEGLStreamProducerDisconnect, + * ::cuEGLStreamProducerReturnFrame, + * ::cudaEGLStreamProducerPresentFrame + */ +CUresult CUDAAPI cuEGLStreamProducerPresentFrame(CUeglStreamConnection *conn, + CUeglFrame eglframe, CUstream *pStream); + +/** + * \brief Return the CUDA eglFrame to the EGLStream released by the consumer. + * + * This API can potentially return CUDA_ERROR_LAUNCH_TIMEOUT if the consumer has not + * returned a frame to EGL stream. If timeout is returned the application can retry. + * + * \param conn - Connection on which to return + * \param eglframe - CUDA Eglstream Proucer Frame handle returned from the consumer over EglStream. + * \param pStream - CUDA stream on which to return the frame. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_LAUNCH_TIMEOUT + * + * \sa ::cuEGLStreamProducerConnect, ::cuEGLStreamProducerDisconnect, + * ::cuEGLStreamProducerPresentFrame, + * ::cudaEGLStreamProducerReturnFrame + */ +CUresult CUDAAPI cuEGLStreamProducerReturnFrame(CUeglStreamConnection *conn, + CUeglFrame *eglframe, CUstream *pStream); + +/** + * \brief Get an eglFrame through which to access a registered EGL graphics resource. + * + * Returns in \p *eglFrame an eglFrame pointer through which the registered graphics resource + * \p resource may be accessed. + * This API can only be called for registered EGL graphics resources. + * + * The ::CUeglFrame is defined as: + * \code + * typedef struct CUeglFrame_st { + * union { + * CUarray pArray[MAX_PLANES]; + * void* pPitch[MAX_PLANES]; + * } frame; + * unsigned int width; + * unsigned int height; + * unsigned int depth; + * unsigned int pitch; + * unsigned int planeCount; + * unsigned int numChannels; + * CUeglFrameType frameType; + * CUeglColorFormat eglColorFormat; + * CUarray_format cuFormat; + * } CUeglFrame; + * \endcode + * + * If \p resource is not registered then ::CUDA_ERROR_NOT_MAPPED is returned. + * * + * \param eglFrame - Returned eglFrame. + * \param resource - Registered resource to access. + * \param index - Index for cubemap surfaces. + * \param mipLevel - Mipmap level for the subresource to access. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_MAPPED + * + * \sa + * ::cuGraphicsMapResources, + * ::cuGraphicsSubResourceGetMappedArray, + * ::cuGraphicsResourceGetMappedPointer, + * ::cudaGraphicsResourceGetMappedEglFrame + */ +CUresult CUDAAPI cuGraphicsResourceGetMappedEglFrame(CUeglFrame* eglFrame, CUgraphicsResource resource, unsigned int index, unsigned int mipLevel); + +/** + * \brief Creates an event from EGLSync object + * + * Creates an event *phEvent from an EGLSyncKHR eglSync with the flags specified + * via \p flags. Valid flags include: + * - ::CU_EVENT_DEFAULT: Default event creation flag. + * - ::CU_EVENT_BLOCKING_SYNC: Specifies that the created event should use blocking + * synchronization. A CPU thread that uses ::cuEventSynchronize() to wait on + * an event created with this flag will block until the event has actually + * been completed. + * + * Once the \p eglSync gets destroyed, ::cuEventDestroy is the only API + * that can be invoked on the event. + * + * ::cuEventRecord and TimingData are not supported for events created from EGLSync. + * + * The EGLSyncKHR is an opaque handle to an EGL sync object. + * typedef void* EGLSyncKHR + * + * \param phEvent - Returns newly created event + * \param eglSync - Opaque handle to EGLSync object + * \param flags - Event creation flags + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * + * \sa + * ::cuEventQuery, + * ::cuEventSynchronize, + * ::cuEventDestroy + */ +CUresult CUDAAPI cuEventCreateFromEGLSync(CUevent *phEvent, EGLSyncKHR eglSync, unsigned int flags); + +/** @} */ /* END CUDA_EGL */ + +#ifdef __cplusplus +}; +#endif + +#endif + diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaEGLTypedefs.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaEGLTypedefs.h new file mode 100644 index 0000000000000000000000000000000000000000..61b82337dc4bb280869934b11c2105db62ae20c3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaEGLTypedefs.h @@ -0,0 +1,96 @@ +/* + * Copyright 2020-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef CUDAEGLTYPEDEFS_H +#define CUDAEGLTYPEDEFS_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +/* + * Macros for the latest version for each driver function in cudaEGL.h + */ +#define PFN_cuGraphicsEGLRegisterImage PFN_cuGraphicsEGLRegisterImage_v7000 +#define PFN_cuEGLStreamConsumerConnect PFN_cuEGLStreamConsumerConnect_v7000 +#define PFN_cuEGLStreamConsumerConnectWithFlags PFN_cuEGLStreamConsumerConnectWithFlags_v8000 +#define PFN_cuEGLStreamConsumerDisconnect PFN_cuEGLStreamConsumerDisconnect_v7000 +#define PFN_cuEGLStreamConsumerAcquireFrame PFN_cuEGLStreamConsumerAcquireFrame_v7000 +#define PFN_cuEGLStreamConsumerReleaseFrame PFN_cuEGLStreamConsumerReleaseFrame_v7000 +#define PFN_cuEGLStreamProducerConnect PFN_cuEGLStreamProducerConnect_v7000 +#define PFN_cuEGLStreamProducerDisconnect PFN_cuEGLStreamProducerDisconnect_v7000 +#define PFN_cuEGLStreamProducerPresentFrame PFN_cuEGLStreamProducerPresentFrame_v7000 +#define PFN_cuEGLStreamProducerReturnFrame PFN_cuEGLStreamProducerReturnFrame_v7000 +#define PFN_cuGraphicsResourceGetMappedEglFrame PFN_cuGraphicsResourceGetMappedEglFrame_v7000 +#define PFN_cuEventCreateFromEGLSync PFN_cuEventCreateFromEGLSync_v9000 + + +/** + * Type definitions for functions defined in cudaEGL.h + */ +typedef CUresult (CUDAAPI *PFN_cuGraphicsEGLRegisterImage_v7000)(CUgraphicsResource CUDAAPI *pCudaResource, EGLImageKHR image, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerConnect_v7000)(CUeglStreamConnection CUDAAPI *conn, EGLStreamKHR stream); +typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerConnectWithFlags_v8000)(CUeglStreamConnection CUDAAPI *conn, EGLStreamKHR stream, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerDisconnect_v7000)(CUeglStreamConnection CUDAAPI *conn); +typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerAcquireFrame_v7000)(CUeglStreamConnection CUDAAPI *conn, CUgraphicsResource CUDAAPI *pCudaResource, CUstream CUDAAPI *pStream, unsigned int timeout); +typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerReleaseFrame_v7000)(CUeglStreamConnection CUDAAPI *conn, CUgraphicsResource pCudaResource, CUstream CUDAAPI *pStream); +typedef CUresult (CUDAAPI *PFN_cuEGLStreamProducerConnect_v7000)(CUeglStreamConnection CUDAAPI *conn, EGLStreamKHR stream, EGLint width, EGLint height); +typedef CUresult (CUDAAPI *PFN_cuEGLStreamProducerDisconnect_v7000)(CUeglStreamConnection CUDAAPI *conn); +typedef CUresult (CUDAAPI *PFN_cuEGLStreamProducerPresentFrame_v7000)(CUeglStreamConnection CUDAAPI *conn, CUeglFrame_v1 eglframe, CUstream CUDAAPI *pStream); +typedef CUresult (CUDAAPI *PFN_cuEGLStreamProducerReturnFrame_v7000)(CUeglStreamConnection CUDAAPI *conn, CUeglFrame_v1 CUDAAPI *eglframe, CUstream CUDAAPI *pStream); +typedef CUresult (CUDAAPI *PFN_cuGraphicsResourceGetMappedEglFrame_v7000)(CUeglFrame_v1 CUDAAPI *eglFrame, CUgraphicsResource resource, unsigned int index, unsigned int mipLevel); +typedef CUresult (CUDAAPI *PFN_cuEventCreateFromEGLSync_v9000)(CUevent CUDAAPI *phEvent, EGLSyncKHR eglSync, unsigned int flags); + +#ifdef __cplusplus +} +#endif // __cplusplus + +#endif // file guard diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaGL.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaGL.h new file mode 100644 index 0000000000000000000000000000000000000000..1a9c70e881774c8f3cf8b6430e7aa53a98d74669 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaGL.h @@ -0,0 +1,608 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef CUDAGL_H +#define CUDAGL_H + +#include +#include + +#if defined(__CUDA_API_VERSION_INTERNAL) || defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED) +#define __CUDA_DEPRECATED +#elif defined(_MSC_VER) +#define __CUDA_DEPRECATED __declspec(deprecated) +#elif defined(__GNUC__) +#define __CUDA_DEPRECATED __attribute__((deprecated)) +#else +#define __CUDA_DEPRECATED +#endif + +#ifdef CUDA_FORCE_API_VERSION +#error "CUDA_FORCE_API_VERSION is no longer supported." +#endif + +#if defined(__CUDA_API_VERSION_INTERNAL) || defined(CUDA_API_PER_THREAD_DEFAULT_STREAM) + #define __CUDA_API_PER_THREAD_DEFAULT_STREAM + #define __CUDA_API_PTDS(api) api ## _ptds + #define __CUDA_API_PTSZ(api) api ## _ptsz +#else + #define __CUDA_API_PTDS(api) api + #define __CUDA_API_PTSZ(api) api +#endif + +#define cuGLCtxCreate cuGLCtxCreate_v2 +#define cuGLMapBufferObject __CUDA_API_PTDS(cuGLMapBufferObject_v2) +#define cuGLMapBufferObjectAsync __CUDA_API_PTSZ(cuGLMapBufferObjectAsync_v2) +#define cuGLGetDevices cuGLGetDevices_v2 + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \file cudaGL.h + * \brief Header file for the OpenGL interoperability functions of the + * low-level CUDA driver application programming interface. + */ + +/** + * \defgroup CUDA_GL OpenGL Interoperability + * \ingroup CUDA_DRIVER + * + * ___MANBRIEF___ OpenGL interoperability functions of the low-level CUDA + * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the OpenGL interoperability functions of the + * low-level CUDA driver application programming interface. Note that mapping + * of OpenGL resources is performed with the graphics API agnostic, resource + * mapping interface described in \ref CUDA_GRAPHICS "Graphics Interoperability". + * + * @{ + */ + +#if defined(_WIN32) +#if !defined(WGL_NV_gpu_affinity) +typedef void* HGPUNV; +#endif +#endif /* _WIN32 */ + +/** + * \brief Registers an OpenGL buffer object + * + * Registers the buffer object specified by \p buffer for access by + * CUDA. A handle to the registered object is returned as \p + * pCudaResource. The register flags \p Flags specify the intended usage, + * as follows: + * + * - ::CU_GRAPHICS_REGISTER_FLAGS_NONE: Specifies no hints about how this + * resource will be used. It is therefore assumed that this resource will be + * read from and written to by CUDA. This is the default value. + * - ::CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY: Specifies that CUDA + * will not write to this resource. + * - ::CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD: Specifies that + * CUDA will not read from this resource and will write over the + * entire contents of the resource, so none of the data previously + * stored in the resource will be preserved. + * + * \param pCudaResource - Pointer to the returned object handle + * \param buffer - name of buffer object to be registered + * \param Flags - Register flags + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_ALREADY_MAPPED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_OPERATING_SYSTEM + * \notefnerr + * + * \sa + * ::cuGraphicsUnregisterResource, + * ::cuGraphicsMapResources, + * ::cuGraphicsResourceGetMappedPointer, + * ::cudaGraphicsGLRegisterBuffer + */ +CUresult CUDAAPI cuGraphicsGLRegisterBuffer(CUgraphicsResource *pCudaResource, GLuint buffer, unsigned int Flags); + +/** + * \brief Register an OpenGL texture or renderbuffer object + * + * Registers the texture or renderbuffer object specified by \p image for access by CUDA. + * A handle to the registered object is returned as \p pCudaResource. + * + * \p target must match the type of the object, and must be one of ::GL_TEXTURE_2D, + * ::GL_TEXTURE_RECTANGLE, ::GL_TEXTURE_CUBE_MAP, ::GL_TEXTURE_3D, ::GL_TEXTURE_2D_ARRAY, + * or ::GL_RENDERBUFFER. + * + * The register flags \p Flags specify the intended usage, as follows: + * + * - ::CU_GRAPHICS_REGISTER_FLAGS_NONE: Specifies no hints about how this + * resource will be used. It is therefore assumed that this resource will be + * read from and written to by CUDA. This is the default value. + * - ::CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY: Specifies that CUDA + * will not write to this resource. + * - ::CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD: Specifies that + * CUDA will not read from this resource and will write over the + * entire contents of the resource, so none of the data previously + * stored in the resource will be preserved. + * - ::CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST: Specifies that CUDA will + * bind this resource to a surface reference. + * - ::CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER: Specifies that CUDA will perform + * texture gather operations on this resource. + * + * The following image formats are supported. For brevity's sake, the list is abbreviated. + * For ex., {GL_R, GL_RG} X {8, 16} would expand to the following 4 formats + * {GL_R8, GL_R16, GL_RG8, GL_RG16} : + * - GL_RED, GL_RG, GL_RGBA, GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY + * - {GL_R, GL_RG, GL_RGBA} X {8, 16, 16F, 32F, 8UI, 16UI, 32UI, 8I, 16I, 32I} + * - {GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY} X + * {8, 16, 16F_ARB, 32F_ARB, 8UI_EXT, 16UI_EXT, 32UI_EXT, 8I_EXT, 16I_EXT, 32I_EXT} + * + * The following image classes are currently disallowed: + * - Textures with borders + * - Multisampled renderbuffers + * + * \param pCudaResource - Pointer to the returned object handle + * \param image - name of texture or renderbuffer object to be registered + * \param target - Identifies the type of object specified by \p image + * \param Flags - Register flags + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_ALREADY_MAPPED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_OPERATING_SYSTEM + * \notefnerr + * + * \sa + * ::cuGraphicsUnregisterResource, + * ::cuGraphicsMapResources, + * ::cuGraphicsSubResourceGetMappedArray, + * ::cudaGraphicsGLRegisterImage + */ +CUresult CUDAAPI cuGraphicsGLRegisterImage(CUgraphicsResource *pCudaResource, GLuint image, GLenum target, unsigned int Flags); + +#ifdef _WIN32 +/** + * \brief Gets the CUDA device associated with hGpu + * + * Returns in \p *pDevice the CUDA device associated with a \p hGpu, if + * applicable. + * + * \param pDevice - Device associated with hGpu + * \param hGpu - Handle to a GPU, as queried via ::WGL_NV_gpu_affinity() + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuGLMapBufferObject, + * ::cuGLRegisterBufferObject, ::cuGLUnmapBufferObject, + * ::cuGLUnregisterBufferObject, ::cuGLUnmapBufferObjectAsync, + * ::cuGLSetBufferObjectMapFlags, + * ::cudaWGLGetDevice + */ +CUresult CUDAAPI cuWGLGetDevice(CUdevice *pDevice, HGPUNV hGpu); +#endif /* _WIN32 */ + +/** + * CUDA devices corresponding to an OpenGL device + */ +typedef enum CUGLDeviceList_enum { + CU_GL_DEVICE_LIST_ALL = 0x01, /**< The CUDA devices for all GPUs used by the current OpenGL context */ + CU_GL_DEVICE_LIST_CURRENT_FRAME = 0x02, /**< The CUDA devices for the GPUs used by the current OpenGL context in its currently rendering frame */ + CU_GL_DEVICE_LIST_NEXT_FRAME = 0x03, /**< The CUDA devices for the GPUs to be used by the current OpenGL context in the next frame */ +} CUGLDeviceList; + +/** + * \brief Gets the CUDA devices associated with the current OpenGL context + * + * Returns in \p *pCudaDeviceCount the number of CUDA-compatible devices + * corresponding to the current OpenGL context. Also returns in \p *pCudaDevices + * at most cudaDeviceCount of the CUDA-compatible devices corresponding to + * the current OpenGL context. If any of the GPUs being used by the current OpenGL + * context are not CUDA capable then the call will return CUDA_ERROR_NO_DEVICE. + * + * The \p deviceList argument may be any of the following: + * - ::CU_GL_DEVICE_LIST_ALL: Query all devices used by the current OpenGL context. + * - ::CU_GL_DEVICE_LIST_CURRENT_FRAME: Query the devices used by the current OpenGL context to + * render the current frame (in SLI). + * - ::CU_GL_DEVICE_LIST_NEXT_FRAME: Query the devices used by the current OpenGL context to + * render the next frame (in SLI). Note that this is a prediction, it can't be guaranteed that + * this is correct in all cases. + * + * \param pCudaDeviceCount - Returned number of CUDA devices. + * \param pCudaDevices - Returned CUDA devices. + * \param cudaDeviceCount - The size of the output device array pCudaDevices. + * \param deviceList - The set of devices to return. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NO_DEVICE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_GRAPHICS_CONTEXT, + * ::CUDA_ERROR_OPERATING_SYSTEM + * + * \notefnerr + * + * \sa + * ::cuWGLGetDevice, + * ::cudaGLGetDevices + */ +CUresult CUDAAPI cuGLGetDevices(unsigned int *pCudaDeviceCount, CUdevice *pCudaDevices, unsigned int cudaDeviceCount, CUGLDeviceList deviceList); + +/** + * \defgroup CUDA_GL_DEPRECATED OpenGL Interoperability [DEPRECATED] + * + * ___MANBRIEF___ deprecated OpenGL interoperability functions of the low-level + * CUDA driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes deprecated OpenGL interoperability functionality. + * + * @{ + */ + +/** Flags to map or unmap a resource */ +typedef enum CUGLmap_flags_enum { + CU_GL_MAP_RESOURCE_FLAGS_NONE = 0x00, + CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY = 0x01, + CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD = 0x02, +} CUGLmap_flags; + +/** + * \brief Create a CUDA context for interoperability with OpenGL + * + * \deprecated This function is deprecated as of Cuda 5.0. + * + * This function is deprecated and should no longer be used. It is + * no longer necessary to associate a CUDA context with an OpenGL + * context in order to achieve maximum interoperability performance. + * + * \param pCtx - Returned CUDA context + * \param Flags - Options for CUDA context creation + * \param device - Device on which to create the context + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \notefnerr + * + * \sa ::cuCtxCreate, ::cuGLInit, ::cuGLMapBufferObject, + * ::cuGLRegisterBufferObject, ::cuGLUnmapBufferObject, + * ::cuGLUnregisterBufferObject, ::cuGLMapBufferObjectAsync, + * ::cuGLUnmapBufferObjectAsync, ::cuGLSetBufferObjectMapFlags, + * ::cuWGLGetDevice + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuGLCtxCreate(CUcontext *pCtx, unsigned int Flags, CUdevice device ); + +/** + * \brief Initializes OpenGL interoperability + * + * \deprecated This function is deprecated as of Cuda 3.0. + * + * Initializes OpenGL interoperability. This function is deprecated + * and calling it is no longer required. It may fail if the needed + * OpenGL driver facilities are not available. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa ::cuGLMapBufferObject, + * ::cuGLRegisterBufferObject, ::cuGLUnmapBufferObject, + * ::cuGLUnregisterBufferObject, ::cuGLMapBufferObjectAsync, + * ::cuGLUnmapBufferObjectAsync, ::cuGLSetBufferObjectMapFlags, + * ::cuWGLGetDevice + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuGLInit(void); + +/** + * \brief Registers an OpenGL buffer object + * + * \deprecated This function is deprecated as of Cuda 3.0. + * + * Registers the buffer object specified by \p buffer for access by + * CUDA. This function must be called before CUDA can map the buffer + * object. There must be a valid OpenGL context bound to the current + * thread when this function is called, and the buffer name is + * resolved by that context. + * + * \param buffer - The name of the buffer object to register. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_ALREADY_MAPPED + * \notefnerr + * + * \sa ::cuGraphicsGLRegisterBuffer + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuGLRegisterBufferObject(GLuint buffer); + +/** + * \brief Maps an OpenGL buffer object + * + * \deprecated This function is deprecated as of Cuda 3.0. + * + * Maps the buffer object specified by \p buffer into the address space of the + * current CUDA context and returns in \p *dptr and \p *size the base pointer + * and size of the resulting mapping. + * + * There must be a valid OpenGL context bound to the current thread + * when this function is called. This must be the same context, or a + * member of the same shareGroup, as the context that was bound when + * the buffer was registered. + * + * All streams in the current CUDA context are synchronized with the + * current GL context. + * + * \param dptr - Returned mapped base pointer + * \param size - Returned size of mapping + * \param buffer - The name of the buffer object to map + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_MAP_FAILED + * \notefnerr + * + * \sa ::cuGraphicsMapResources + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuGLMapBufferObject(CUdeviceptr *dptr, size_t *size, GLuint buffer); + +/** + * \brief Unmaps an OpenGL buffer object + * + * \deprecated This function is deprecated as of Cuda 3.0. + * + * Unmaps the buffer object specified by \p buffer for access by CUDA. + * + * There must be a valid OpenGL context bound to the current thread + * when this function is called. This must be the same context, or a + * member of the same shareGroup, as the context that was bound when + * the buffer was registered. + * + * All streams in the current CUDA context are synchronized with the + * current GL context. + * + * \param buffer - Buffer object to unmap + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuGraphicsUnmapResources + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuGLUnmapBufferObject(GLuint buffer); + +/** + * \brief Unregister an OpenGL buffer object + * + * \deprecated This function is deprecated as of Cuda 3.0. + * + * Unregisters the buffer object specified by \p buffer. This + * releases any resources associated with the registered buffer. + * After this call, the buffer may no longer be mapped for access by + * CUDA. + * + * There must be a valid OpenGL context bound to the current thread + * when this function is called. This must be the same context, or a + * member of the same shareGroup, as the context that was bound when + * the buffer was registered. + * + * \param buffer - Name of the buffer object to unregister + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuGraphicsUnregisterResource + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuGLUnregisterBufferObject(GLuint buffer); + +/** + * \brief Set the map flags for an OpenGL buffer object + * + * \deprecated This function is deprecated as of Cuda 3.0. + * + * Sets the map flags for the buffer object specified by \p buffer. + * + * Changes to \p Flags will take effect the next time \p buffer is mapped. + * The \p Flags argument may be any of the following: + * - ::CU_GL_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this + * resource will be used. It is therefore assumed that this resource will be + * read from and written to by CUDA kernels. This is the default value. + * - ::CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY: Specifies that CUDA kernels which + * access this resource will not write to this resource. + * - ::CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD: Specifies that CUDA kernels + * which access this resource will not read from this resource and will + * write over the entire contents of the resource, so none of the data + * previously stored in the resource will be preserved. + * + * If \p buffer has not been registered for use with CUDA, then + * ::CUDA_ERROR_INVALID_HANDLE is returned. If \p buffer is presently + * mapped for access by CUDA, then ::CUDA_ERROR_ALREADY_MAPPED is returned. + * + * There must be a valid OpenGL context bound to the current thread + * when this function is called. This must be the same context, or a + * member of the same shareGroup, as the context that was bound when + * the buffer was registered. + * + * \param buffer - Buffer object to unmap + * \param Flags - Map flags + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_ALREADY_MAPPED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * \notefnerr + * + * \sa ::cuGraphicsResourceSetMapFlags + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuGLSetBufferObjectMapFlags(GLuint buffer, unsigned int Flags); + +/** + * \brief Maps an OpenGL buffer object + * + * \deprecated This function is deprecated as of Cuda 3.0. + * + * Maps the buffer object specified by \p buffer into the address space of the + * current CUDA context and returns in \p *dptr and \p *size the base pointer + * and size of the resulting mapping. + * + * There must be a valid OpenGL context bound to the current thread + * when this function is called. This must be the same context, or a + * member of the same shareGroup, as the context that was bound when + * the buffer was registered. + * + * Stream \p hStream in the current CUDA context is synchronized with + * the current GL context. + * + * \param dptr - Returned mapped base pointer + * \param size - Returned size of mapping + * \param buffer - The name of the buffer object to map + * \param hStream - Stream to synchronize + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_MAP_FAILED + * \notefnerr + * + * \sa ::cuGraphicsMapResources + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuGLMapBufferObjectAsync(CUdeviceptr *dptr, size_t *size, GLuint buffer, CUstream hStream); + +/** + * \brief Unmaps an OpenGL buffer object + * + * \deprecated This function is deprecated as of Cuda 3.0. + * + * Unmaps the buffer object specified by \p buffer for access by CUDA. + * + * There must be a valid OpenGL context bound to the current thread + * when this function is called. This must be the same context, or a + * member of the same shareGroup, as the context that was bound when + * the buffer was registered. + * + * Stream \p hStream in the current CUDA context is synchronized with + * the current GL context. + * + * \param buffer - Name of the buffer object to unmap + * \param hStream - Stream to synchronize + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuGraphicsUnmapResources + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuGLUnmapBufferObjectAsync(GLuint buffer, CUstream hStream); + +/** @} */ /* END CUDA_GL_DEPRECATED */ +/** @} */ /* END CUDA_GL */ + + +#if defined(__CUDA_API_VERSION_INTERNAL) + #undef cuGLCtxCreate + #undef cuGLMapBufferObject + #undef cuGLMapBufferObjectAsync + #undef cuGLGetDevices + + CUresult CUDAAPI cuGLGetDevices(unsigned int *pCudaDeviceCount, CUdevice *pCudaDevices, unsigned int cudaDeviceCount, CUGLDeviceList deviceList); + CUresult CUDAAPI cuGLMapBufferObject_v2(CUdeviceptr *dptr, size_t *size, GLuint buffer); + CUresult CUDAAPI cuGLMapBufferObjectAsync_v2(CUdeviceptr *dptr, size_t *size, GLuint buffer, CUstream hStream); + CUresult CUDAAPI cuGLCtxCreate(CUcontext *pCtx, unsigned int Flags, CUdevice device ); + CUresult CUDAAPI cuGLMapBufferObject(CUdeviceptr_v1 *dptr, unsigned int *size, GLuint buffer); + CUresult CUDAAPI cuGLMapBufferObjectAsync(CUdeviceptr_v1 *dptr, unsigned int *size, GLuint buffer, CUstream hStream); +#endif /* __CUDA_API_VERSION_INTERNAL */ + +#ifdef __cplusplus +}; +#endif + +#undef __CUDA_DEPRECATED + +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaGLTypedefs.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaGLTypedefs.h new file mode 100644 index 0000000000000000000000000000000000000000..81f0d5349e435159647af9af379d1e8e8441221c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaGLTypedefs.h @@ -0,0 +1,123 @@ +/* + * Copyright 2020-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef CUDAGLTYPEDEFS_H +#define CUDAGLTYPEDEFS_H + +// Dependent includes for cudagl.h +#include + +#include + +#if defined(CUDA_API_PER_THREAD_DEFAULT_STREAM) + #define __API_TYPEDEF_PTDS(api, default_version, ptds_version) api ## _v ## ptds_version ## _ptds + #define __API_TYPEDEF_PTSZ(api, default_version, ptds_version) api ## _v ## ptds_version ## _ptsz +#else + #define __API_TYPEDEF_PTDS(api, default_version, ptds_version) api ## _v ## default_version + #define __API_TYPEDEF_PTSZ(api, default_version, ptds_version) api ## _v ## default_version +#endif + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +/* + * Macros for the latest version for each driver function in cudaGL.h + */ +#define PFN_cuGraphicsGLRegisterBuffer PFN_cuGraphicsGLRegisterBuffer_v3000 +#define PFN_cuGraphicsGLRegisterImage PFN_cuGraphicsGLRegisterImage_v3000 +#define PFN_cuWGLGetDevice PFN_cuWGLGetDevice_v2020 +#define PFN_cuGLGetDevices PFN_cuGLGetDevices_v6050 +#define PFN_cuGLCtxCreate PFN_cuGLCtxCreate_v3020 +#define PFN_cuGLInit PFN_cuGLInit_v2000 +#define PFN_cuGLRegisterBufferObject PFN_cuGLRegisterBufferObject_v2000 +#define PFN_cuGLMapBufferObject __API_TYPEDEF_PTDS(PFN_cuGLMapBufferObject, 3020, 7000) +#define PFN_cuGLUnmapBufferObject PFN_cuGLUnmapBufferObject_v2000 +#define PFN_cuGLUnregisterBufferObject PFN_cuGLUnregisterBufferObject_v2000 +#define PFN_cuGLSetBufferObjectMapFlags PFN_cuGLSetBufferObjectMapFlags_v2030 +#define PFN_cuGLMapBufferObjectAsync __API_TYPEDEF_PTSZ(PFN_cuGLMapBufferObjectAsync, 3020, 7000) +#define PFN_cuGLUnmapBufferObjectAsync PFN_cuGLUnmapBufferObjectAsync_v2030 + + +/** + * Type definitions for functions defined in cudaGL.h + */ +typedef CUresult (CUDAAPI *PFN_cuGraphicsGLRegisterBuffer_v3000)(CUgraphicsResource *pCudaResource, GLuint buffer, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuGraphicsGLRegisterImage_v3000)(CUgraphicsResource *pCudaResource, GLuint image, GLenum target, unsigned int Flags); +#ifdef _WIN32 +typedef CUresult (CUDAAPI *PFN_cuWGLGetDevice_v2020)(CUdevice_v1 *pDevice, HGPUNV hGpu); +#endif +typedef CUresult (CUDAAPI *PFN_cuGLGetDevices_v6050)(unsigned int *pCudaDeviceCount, CUdevice_v1 *pCudaDevices, unsigned int cudaDeviceCount, CUGLDeviceList deviceList); +typedef CUresult (CUDAAPI *PFN_cuGLCtxCreate_v3020)(CUcontext *pCtx, unsigned int Flags, CUdevice_v1 device); +typedef CUresult (CUDAAPI *PFN_cuGLInit_v2000)(void); +typedef CUresult (CUDAAPI *PFN_cuGLRegisterBufferObject_v2000)(GLuint buffer); +typedef CUresult (CUDAAPI *PFN_cuGLMapBufferObject_v7000_ptds)(CUdeviceptr_v2 *dptr, size_t *size, GLuint buffer); +typedef CUresult (CUDAAPI *PFN_cuGLUnmapBufferObject_v2000)(GLuint buffer); +typedef CUresult (CUDAAPI *PFN_cuGLUnregisterBufferObject_v2000)(GLuint buffer); +typedef CUresult (CUDAAPI *PFN_cuGLSetBufferObjectMapFlags_v2030)(GLuint buffer, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuGLMapBufferObjectAsync_v7000_ptsz)(CUdeviceptr_v2 *dptr, size_t *size, GLuint buffer, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuGLUnmapBufferObjectAsync_v2030)(GLuint buffer, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuGLMapBufferObject_v3020)(CUdeviceptr_v2 *dptr, size_t *size, GLuint buffer); +typedef CUresult (CUDAAPI *PFN_cuGLMapBufferObjectAsync_v3020)(CUdeviceptr_v2 *dptr, size_t *size, GLuint buffer, CUstream hStream); + +/* + * Type definitions for older versioned functions in cuda.h + */ +#if defined(__CUDA_API_VERSION_INTERNAL) +typedef CUresult (CUDAAPI *PFN_cuGLGetDevices_v4010)(unsigned int *pCudaDeviceCount, CUdevice_v1 *pCudaDevices, unsigned int cudaDeviceCount, CUGLDeviceList deviceList); +typedef CUresult (CUDAAPI *PFN_cuGLMapBufferObject_v2000)(CUdeviceptr_v1 *dptr, unsigned int *size, GLuint buffer); +typedef CUresult (CUDAAPI *PFN_cuGLMapBufferObjectAsync_v2030)(CUdeviceptr_v1 *dptr, unsigned int *size, GLuint buffer, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuGLCtxCreate_v2000)(CUcontext *pCtx, unsigned int Flags, CUdevice_v1 device); +#endif + +#ifdef __cplusplus +} +#endif // __cplusplus + +#endif // file guard diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaProfilerTypedefs.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaProfilerTypedefs.h new file mode 100644 index 0000000000000000000000000000000000000000..bea7df4573aff2fa5b0d0029ce9d40a7ebe2de46 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaProfilerTypedefs.h @@ -0,0 +1,78 @@ +/* + * Copyright 2020-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef CUDAPROFILERTYPEDEFS_H +#define CUDAPROFILERTYPEDEFS_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +/* + * Macros for the latest version for each driver function in cudaProfiler.h + */ +#define PFN_cuProfilerInitialize PFN_cuProfilerInitialize_v4000 +#define PFN_cuProfilerStart PFN_cuProfilerStart_v4000 +#define PFN_cuProfilerStop PFN_cuProfilerStop_v4000 + + +/** + * Type definitions for functions defined in cudaProfiler.h + */ +typedef CUresult (CUDAAPI *PFN_cuProfilerInitialize_v4000)(const char *configFile, const char *outputFile, CUoutput_mode outputMode); +typedef CUresult (CUDAAPI *PFN_cuProfilerStart_v4000)(void); +typedef CUresult (CUDAAPI *PFN_cuProfilerStop_v4000)(void); + +#ifdef __cplusplus +} +#endif // __cplusplus + +#endif // file guard diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaTypedefs.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaTypedefs.h new file mode 100644 index 0000000000000000000000000000000000000000..c3c7117a9044d428f0ced8ade419bcf520c72732 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaTypedefs.h @@ -0,0 +1,1029 @@ +/* + * Copyright 2020-2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef CUDATYPEDEFS_H +#define CUDATYPEDEFS_H + +#include + +#if defined(CUDA_API_PER_THREAD_DEFAULT_STREAM) + #define __API_TYPEDEF_PTDS(api, default_version, ptds_version) api ## _v ## ptds_version ## _ptds + #define __API_TYPEDEF_PTSZ(api, default_version, ptds_version) api ## _v ## ptds_version ## _ptsz +#else + #define __API_TYPEDEF_PTDS(api, default_version, ptds_version) api ## _v ## default_version + #define __API_TYPEDEF_PTSZ(api, default_version, ptds_version) api ## _v ## default_version +#endif + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +/* + * Macros for the latest version for each driver function in cuda.h + */ +#define PFN_cuGetErrorString PFN_cuGetErrorString_v6000 +#define PFN_cuGetErrorName PFN_cuGetErrorName_v6000 +#define PFN_cuInit PFN_cuInit_v2000 +#define PFN_cuDriverGetVersion PFN_cuDriverGetVersion_v2020 +#define PFN_cuDeviceGet PFN_cuDeviceGet_v2000 +#define PFN_cuDeviceGetCount PFN_cuDeviceGetCount_v2000 +#define PFN_cuDeviceGetName PFN_cuDeviceGetName_v2000 +#define PFN_cuDeviceGetUuid PFN_cuDeviceGetUuid_v11040 +#define PFN_cuDeviceGetLuid PFN_cuDeviceGetLuid_v10000 +#define PFN_cuDeviceTotalMem PFN_cuDeviceTotalMem_v3020 +#define PFN_cuDeviceGetTexture1DLinearMaxWidth PFN_cuDeviceGetTexture1DLinearMaxWidth_v11010 +#define PFN_cuDeviceGetAttribute PFN_cuDeviceGetAttribute_v2000 +#define PFN_cuDeviceGetNvSciSyncAttributes PFN_cuDeviceGetNvSciSyncAttributes_v10020 +#define PFN_cuDeviceSetMemPool PFN_cuDeviceSetMemPool_v11020 +#define PFN_cuDeviceGetMemPool PFN_cuDeviceGetMemPool_v11020 +#define PFN_cuDeviceGetDefaultMemPool PFN_cuDeviceGetDefaultMemPool_v11020 +#define PFN_cuDeviceGetProperties PFN_cuDeviceGetProperties_v2000 +#define PFN_cuDeviceComputeCapability PFN_cuDeviceComputeCapability_v2000 +#define PFN_cuDevicePrimaryCtxRetain PFN_cuDevicePrimaryCtxRetain_v7000 +#define PFN_cuDevicePrimaryCtxRelease PFN_cuDevicePrimaryCtxRelease_v11000 +#define PFN_cuDevicePrimaryCtxSetFlags PFN_cuDevicePrimaryCtxSetFlags_v11000 +#define PFN_cuDevicePrimaryCtxGetState PFN_cuDevicePrimaryCtxGetState_v7000 +#define PFN_cuDevicePrimaryCtxReset PFN_cuDevicePrimaryCtxReset_v11000 +#define PFN_cuDeviceGetExecAffinitySupport PFN_cuDeviceGetExecAffinitySupport_v11040 +#define PFN_cuCtxCreate PFN_cuCtxCreate_v11040 +#define PFN_cuCtxGetId PFN_cuCtxGetId_v12000 +#define PFN_cuCtxDestroy PFN_cuCtxDestroy_v4000 +#define PFN_cuCtxPushCurrent PFN_cuCtxPushCurrent_v4000 +#define PFN_cuCtxPopCurrent PFN_cuCtxPopCurrent_v4000 +#define PFN_cuCtxSetCurrent PFN_cuCtxSetCurrent_v4000 +#define PFN_cuCtxGetCurrent PFN_cuCtxGetCurrent_v4000 +#define PFN_cuCtxGetDevice PFN_cuCtxGetDevice_v2000 +#define PFN_cuCtxGetFlags PFN_cuCtxGetFlags_v7000 +#define PFN_cuCtxSetFlags PFN_cuCtxSetFlags_v12010 +#define PFN_cuCtxSynchronize PFN_cuCtxSynchronize_v2000 +#define PFN_cuCtxSetLimit PFN_cuCtxSetLimit_v3010 +#define PFN_cuCtxGetLimit PFN_cuCtxGetLimit_v3010 +#define PFN_cuCtxGetCacheConfig PFN_cuCtxGetCacheConfig_v3020 +#define PFN_cuCtxSetCacheConfig PFN_cuCtxSetCacheConfig_v3020 +#define PFN_cuCtxGetSharedMemConfig PFN_cuCtxGetSharedMemConfig_v4020 +#define PFN_cuCtxSetSharedMemConfig PFN_cuCtxSetSharedMemConfig_v4020 +#define PFN_cuCtxGetApiVersion PFN_cuCtxGetApiVersion_v3020 +#define PFN_cuCtxGetStreamPriorityRange PFN_cuCtxGetStreamPriorityRange_v5050 +#define PFN_cuCtxResetPersistingL2Cache PFN_cuCtxResetPersistingL2Cache_v11000 +#define PFN_cuCtxAttach PFN_cuCtxAttach_v2000 +#define PFN_cuCtxDetach PFN_cuCtxDetach_v2000 +#define PFN_cuCtxGetExecAffinity PFN_cuCtxGetExecAffinity_v11040 +#define PFN_cuModuleLoad PFN_cuModuleLoad_v2000 +#define PFN_cuModuleLoadData PFN_cuModuleLoadData_v2000 +#define PFN_cuModuleLoadDataEx PFN_cuModuleLoadDataEx_v2010 +#define PFN_cuModuleLoadFatBinary PFN_cuModuleLoadFatBinary_v2000 +#define PFN_cuModuleUnload PFN_cuModuleUnload_v2000 +#define PFN_cuModuleGetFunction PFN_cuModuleGetFunction_v2000 +#define PFN_cuModuleGetGlobal PFN_cuModuleGetGlobal_v3020 +#define PFN_cuModuleGetTexRef PFN_cuModuleGetTexRef_v2000 +#define PFN_cuModuleGetSurfRef PFN_cuModuleGetSurfRef_v3000 +#define PFN_cuLinkCreate PFN_cuLinkCreate_v6050 +#define PFN_cuLinkAddData PFN_cuLinkAddData_v6050 +#define PFN_cuLinkAddFile PFN_cuLinkAddFile_v6050 +#define PFN_cuLinkComplete PFN_cuLinkComplete_v5050 +#define PFN_cuLinkDestroy PFN_cuLinkDestroy_v5050 +#define PFN_cuMemGetInfo PFN_cuMemGetInfo_v3020 +#define PFN_cuMemAlloc PFN_cuMemAlloc_v3020 +#define PFN_cuMemAllocPitch PFN_cuMemAllocPitch_v3020 +#define PFN_cuMemFree PFN_cuMemFree_v3020 +#define PFN_cuMemGetAddressRange PFN_cuMemGetAddressRange_v3020 +#define PFN_cuMemAllocHost PFN_cuMemAllocHost_v3020 +#define PFN_cuMemFreeHost PFN_cuMemFreeHost_v2000 +#define PFN_cuMemHostAlloc PFN_cuMemHostAlloc_v2020 +#define PFN_cuMemHostGetDevicePointer PFN_cuMemHostGetDevicePointer_v3020 +#define PFN_cuMemHostGetFlags PFN_cuMemHostGetFlags_v2030 +#define PFN_cuMemAllocManaged PFN_cuMemAllocManaged_v6000 +#define PFN_cuDeviceGetByPCIBusId PFN_cuDeviceGetByPCIBusId_v4010 +#define PFN_cuDeviceGetPCIBusId PFN_cuDeviceGetPCIBusId_v4010 +#define PFN_cuIpcGetEventHandle PFN_cuIpcGetEventHandle_v4010 +#define PFN_cuIpcOpenEventHandle PFN_cuIpcOpenEventHandle_v4010 +#define PFN_cuIpcGetMemHandle PFN_cuIpcGetMemHandle_v4010 +#define PFN_cuIpcOpenMemHandle PFN_cuIpcOpenMemHandle_v11000 +#define PFN_cuIpcCloseMemHandle PFN_cuIpcCloseMemHandle_v4010 +#define PFN_cuMemHostRegister PFN_cuMemHostRegister_v6050 +#define PFN_cuMemHostUnregister PFN_cuMemHostUnregister_v4000 +#define PFN_cuMemcpy __API_TYPEDEF_PTDS(PFN_cuMemcpy, 4000, 7000) +#define PFN_cuMemcpyPeer __API_TYPEDEF_PTDS(PFN_cuMemcpyPeer, 4000, 7000) +#define PFN_cuMemcpyHtoD __API_TYPEDEF_PTDS(PFN_cuMemcpyHtoD, 3020, 7000) +#define PFN_cuMemcpyDtoH __API_TYPEDEF_PTDS(PFN_cuMemcpyDtoH, 3020, 7000) +#define PFN_cuMemcpyDtoD __API_TYPEDEF_PTDS(PFN_cuMemcpyDtoD, 3020, 7000) +#define PFN_cuMemcpyDtoA __API_TYPEDEF_PTDS(PFN_cuMemcpyDtoA, 3020, 7000) +#define PFN_cuMemcpyAtoD __API_TYPEDEF_PTDS(PFN_cuMemcpyAtoD, 3020, 7000) +#define PFN_cuMemcpyHtoA __API_TYPEDEF_PTDS(PFN_cuMemcpyHtoA, 3020, 7000) +#define PFN_cuMemcpyAtoH __API_TYPEDEF_PTDS(PFN_cuMemcpyAtoH, 3020, 7000) +#define PFN_cuMemcpyAtoA __API_TYPEDEF_PTDS(PFN_cuMemcpyAtoA, 3020, 7000) +#define PFN_cuMemcpy2D __API_TYPEDEF_PTDS(PFN_cuMemcpy2D, 3020, 7000) +#define PFN_cuMemcpy2DUnaligned __API_TYPEDEF_PTDS(PFN_cuMemcpy2DUnaligned, 3020, 7000) +#define PFN_cuMemcpy3D __API_TYPEDEF_PTDS(PFN_cuMemcpy3D, 3020, 7000) +#define PFN_cuMemcpy3DPeer __API_TYPEDEF_PTDS(PFN_cuMemcpy3DPeer, 4000, 7000) +#define PFN_cuMemcpyAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpyAsync, 4000, 7000) +#define PFN_cuMemcpyPeerAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpyPeerAsync, 4000, 7000) +#define PFN_cuMemcpyHtoDAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpyHtoDAsync, 3020, 7000) +#define PFN_cuMemcpyDtoHAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpyDtoHAsync, 3020, 7000) +#define PFN_cuMemcpyDtoDAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpyDtoDAsync, 3020, 7000) +#define PFN_cuMemcpyHtoAAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpyHtoAAsync, 3020, 7000) +#define PFN_cuMemcpyAtoHAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpyAtoHAsync, 3020, 7000) +#define PFN_cuMemcpy2DAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpy2DAsync, 3020, 7000) +#define PFN_cuMemcpy3DAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpy3DAsync, 3020, 7000) +#define PFN_cuMemcpy3DPeerAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpy3DPeerAsync, 4000, 7000) +#define PFN_cuMemsetD8 __API_TYPEDEF_PTDS(PFN_cuMemsetD8, 3020, 7000) +#define PFN_cuMemsetD16 __API_TYPEDEF_PTDS(PFN_cuMemsetD16, 3020, 7000) +#define PFN_cuMemsetD32 __API_TYPEDEF_PTDS(PFN_cuMemsetD32, 3020, 7000) +#define PFN_cuMemsetD2D8 __API_TYPEDEF_PTDS(PFN_cuMemsetD2D8, 3020, 7000) +#define PFN_cuMemsetD2D16 __API_TYPEDEF_PTDS(PFN_cuMemsetD2D16, 3020, 7000) +#define PFN_cuMemsetD2D32 __API_TYPEDEF_PTDS(PFN_cuMemsetD2D32, 3020, 7000) +#define PFN_cuMemsetD8Async __API_TYPEDEF_PTSZ(PFN_cuMemsetD8Async, 3020, 7000) +#define PFN_cuMemsetD16Async __API_TYPEDEF_PTSZ(PFN_cuMemsetD16Async, 3020, 7000) +#define PFN_cuMemsetD32Async __API_TYPEDEF_PTSZ(PFN_cuMemsetD32Async, 3020, 7000) +#define PFN_cuMemsetD2D8Async __API_TYPEDEF_PTSZ(PFN_cuMemsetD2D8Async, 3020, 7000) +#define PFN_cuMemsetD2D16Async __API_TYPEDEF_PTSZ(PFN_cuMemsetD2D16Async, 3020, 7000) +#define PFN_cuMemsetD2D32Async __API_TYPEDEF_PTSZ(PFN_cuMemsetD2D32Async, 3020, 7000) +#define PFN_cuArrayCreate PFN_cuArrayCreate_v3020 +#define PFN_cuArrayGetDescriptor PFN_cuArrayGetDescriptor_v3020 +#define PFN_cuArrayGetSparseProperties PFN_cuArrayGetSparseProperties_v11010 +#define PFN_cuMipmappedArrayGetSparseProperties PFN_cuMipmappedArrayGetSparseProperties_v11010 +#define PFN_cuArrayGetMemoryRequirements PFN_cuArrayGetMemoryRequirements_v11060 +#define PFN_cuMipmappedArrayGetMemoryRequirements PFN_cuMipmappedArrayGetMemoryRequirements_v11060 +#define PFN_cuArrayGetPlane PFN_cuArrayGetPlane_v11020 +#define PFN_cuArrayDestroy PFN_cuArrayDestroy_v2000 +#define PFN_cuArray3DCreate PFN_cuArray3DCreate_v3020 +#define PFN_cuArray3DGetDescriptor PFN_cuArray3DGetDescriptor_v3020 +#define PFN_cuMipmappedArrayCreate PFN_cuMipmappedArrayCreate_v5000 +#define PFN_cuMipmappedArrayGetLevel PFN_cuMipmappedArrayGetLevel_v5000 +#define PFN_cuMipmappedArrayDestroy PFN_cuMipmappedArrayDestroy_v5000 +#define PFN_cuMemAddressReserve PFN_cuMemAddressReserve_v10020 +#define PFN_cuMemAddressFree PFN_cuMemAddressFree_v10020 +#define PFN_cuMemCreate PFN_cuMemCreate_v10020 +#define PFN_cuMemRelease PFN_cuMemRelease_v10020 +#define PFN_cuMemMap PFN_cuMemMap_v10020 +#define PFN_cuMemMapArrayAsync __API_TYPEDEF_PTSZ(PFN_cuMemMapArrayAsync, 11010, 11010) +#define PFN_cuMemUnmap PFN_cuMemUnmap_v10020 +#define PFN_cuMemSetAccess PFN_cuMemSetAccess_v10020 +#define PFN_cuMemGetAccess PFN_cuMemGetAccess_v10020 +#define PFN_cuMemExportToShareableHandle PFN_cuMemExportToShareableHandle_v10020 +#define PFN_cuMemImportFromShareableHandle PFN_cuMemImportFromShareableHandle_v10020 +#define PFN_cuMemGetAllocationGranularity PFN_cuMemGetAllocationGranularity_v10020 +#define PFN_cuMemGetAllocationPropertiesFromHandle PFN_cuMemGetAllocationPropertiesFromHandle_v10020 +#define PFN_cuMemRetainAllocationHandle PFN_cuMemRetainAllocationHandle_v11000 +#define PFN_cuMemFreeAsync __API_TYPEDEF_PTSZ(PFN_cuMemFreeAsync, 11020, 11020) +#define PFN_cuMemAllocAsync __API_TYPEDEF_PTSZ(PFN_cuMemAllocAsync, 11020, 11020) +#define PFN_cuMemPoolTrimTo PFN_cuMemPoolTrimTo_v11020 +#define PFN_cuMemPoolSetAttribute PFN_cuMemPoolSetAttribute_v11020 +#define PFN_cuMemPoolGetAttribute PFN_cuMemPoolGetAttribute_v11020 +#define PFN_cuMemPoolSetAccess PFN_cuMemPoolSetAccess_v11020 +#define PFN_cuMemPoolGetAccess PFN_cuMemPoolGetAccess_v11020 +#define PFN_cuMemPoolCreate PFN_cuMemPoolCreate_v11020 +#define PFN_cuMemPoolDestroy PFN_cuMemPoolDestroy_v11020 +#define PFN_cuMemAllocFromPoolAsync __API_TYPEDEF_PTSZ(PFN_cuMemAllocFromPoolAsync, 11020, 11020) +#define PFN_cuMemPoolExportToShareableHandle PFN_cuMemPoolExportToShareableHandle_v11020 +#define PFN_cuMemPoolImportFromShareableHandle PFN_cuMemPoolImportFromShareableHandle_v11020 +#define PFN_cuMemPoolExportPointer PFN_cuMemPoolExportPointer_v11020 +#define PFN_cuMemPoolImportPointer PFN_cuMemPoolImportPointer_v11020 +#define PFN_cuPointerGetAttribute PFN_cuPointerGetAttribute_v4000 +#define PFN_cuMemPrefetchAsync __API_TYPEDEF_PTSZ(PFN_cuMemPrefetchAsync, 8000, 8000) +#define PFN_cuMemAdvise PFN_cuMemAdvise_v8000 +#define PFN_cuMemRangeGetAttribute PFN_cuMemRangeGetAttribute_v8000 +#define PFN_cuMemRangeGetAttributes PFN_cuMemRangeGetAttributes_v8000 +#define PFN_cuMulticastCreate PFN_cuMulticastCreate_v12010 +#define PFN_cuMulticastAddDevice PFN_cuMulticastAddDevice_v12010 +#define PFN_cuMulticastBindMem PFN_cuMulticastBindMem_v12010 +#define PFN_cuMulticastBindAddr PFN_cuMulticastBindAddr_v12010 +#define PFN_cuMulticastUnbind PFN_cuMulticastUnbind_v12010 +#define PFN_cuMulticastGetGranularity PFN_cuMulticastGetGranularity_v12010 +#define PFN_cuPointerSetAttribute PFN_cuPointerSetAttribute_v6000 +#define PFN_cuPointerGetAttributes PFN_cuPointerGetAttributes_v7000 +#define PFN_cuStreamCreate PFN_cuStreamCreate_v2000 +#define PFN_cuStreamCreateWithPriority PFN_cuStreamCreateWithPriority_v5050 +#define PFN_cuStreamGetId __API_TYPEDEF_PTSZ(PFN_cuStreamGetId_v12000, 12000, 12000) +#define PFN_cuStreamGetPriority __API_TYPEDEF_PTSZ(PFN_cuStreamGetPriority, 5050, 7000) +#define PFN_cuStreamGetFlags __API_TYPEDEF_PTSZ(PFN_cuStreamGetFlags, 5050, 7000) +#define PFN_cuStreamGetCtx __API_TYPEDEF_PTSZ(PFN_cuStreamGetCtx, 9020, 9020) +#define PFN_cuStreamWaitEvent __API_TYPEDEF_PTSZ(PFN_cuStreamWaitEvent, 3020, 7000) +#define PFN_cuStreamAddCallback __API_TYPEDEF_PTSZ(PFN_cuStreamAddCallback, 5000, 7000) +#define PFN_cuStreamBeginCapture __API_TYPEDEF_PTSZ(PFN_cuStreamBeginCapture, 10010, 10010) +#define PFN_cuThreadExchangeStreamCaptureMode PFN_cuThreadExchangeStreamCaptureMode_v10010 +#define PFN_cuStreamEndCapture __API_TYPEDEF_PTSZ(PFN_cuStreamEndCapture, 10000, 10000) +#define PFN_cuStreamIsCapturing __API_TYPEDEF_PTSZ(PFN_cuStreamIsCapturing, 10000, 10000) +#define PFN_cuStreamGetCaptureInfo __API_TYPEDEF_PTSZ(PFN_cuStreamGetCaptureInfo, 10010, 10010) +#define PFN_cuStreamGetCaptureInfo_v2 __API_TYPEDEF_PTSZ(PFN_cuStreamGetCaptureInfo, 11030, 11030) +#define PFN_cuStreamUpdateCaptureDependencies __API_TYPEDEF_PTSZ(PFN_cuStreamUpdateCaptureDependencies, 11030, 11030) +#define PFN_cuStreamAttachMemAsync __API_TYPEDEF_PTSZ(PFN_cuStreamAttachMemAsync, 6000, 7000) +#define PFN_cuStreamQuery __API_TYPEDEF_PTSZ(PFN_cuStreamQuery, 2000, 7000) +#define PFN_cuStreamSynchronize __API_TYPEDEF_PTSZ(PFN_cuStreamSynchronize, 2000, 7000) +#define PFN_cuStreamDestroy PFN_cuStreamDestroy_v4000 +#define PFN_cuStreamCopyAttributes __API_TYPEDEF_PTSZ(PFN_cuStreamCopyAttributes, 11000, 11000) +#define PFN_cuStreamGetAttribute __API_TYPEDEF_PTSZ(PFN_cuStreamGetAttribute, 11000, 11000) +#define PFN_cuStreamSetAttribute __API_TYPEDEF_PTSZ(PFN_cuStreamSetAttribute, 11000, 11000) +#define PFN_cuEventCreate PFN_cuEventCreate_v2000 +#define PFN_cuEventRecord __API_TYPEDEF_PTSZ(PFN_cuEventRecord, 2000, 7000) +#define PFN_cuEventRecordWithFlags __API_TYPEDEF_PTSZ(PFN_cuEventRecordWithFlags, 11010, 11010) +#define PFN_cuEventQuery PFN_cuEventQuery_v2000 +#define PFN_cuEventSynchronize PFN_cuEventSynchronize_v2000 +#define PFN_cuEventDestroy PFN_cuEventDestroy_v4000 +#define PFN_cuEventElapsedTime PFN_cuEventElapsedTime_v2000 +#define PFN_cuImportExternalMemory PFN_cuImportExternalMemory_v10000 +#define PFN_cuExternalMemoryGetMappedBuffer PFN_cuExternalMemoryGetMappedBuffer_v10000 +#define PFN_cuExternalMemoryGetMappedMipmappedArray PFN_cuExternalMemoryGetMappedMipmappedArray_v10000 +#define PFN_cuDestroyExternalMemory PFN_cuDestroyExternalMemory_v10000 +#define PFN_cuImportExternalSemaphore PFN_cuImportExternalSemaphore_v10000 +#define PFN_cuSignalExternalSemaphoresAsync __API_TYPEDEF_PTSZ(PFN_cuSignalExternalSemaphoresAsync, 10000, 10000) +#define PFN_cuWaitExternalSemaphoresAsync __API_TYPEDEF_PTSZ(PFN_cuWaitExternalSemaphoresAsync, 10000, 10000) +#define PFN_cuDestroyExternalSemaphore PFN_cuDestroyExternalSemaphore_v10000 +#define PFN_cuStreamWaitValue32 __API_TYPEDEF_PTSZ(PFN_cuStreamWaitValue32, 8000, 8000) +#define PFN_cuStreamWaitValue64 __API_TYPEDEF_PTSZ(PFN_cuStreamWaitValue64, 9000, 9000) +#define PFN_cuStreamWriteValue32 __API_TYPEDEF_PTSZ(PFN_cuStreamWriteValue32, 8000, 8000) +#define PFN_cuStreamWriteValue64 __API_TYPEDEF_PTSZ(PFN_cuStreamWriteValue64, 9000, 9000) +#define PFN_cuStreamBatchMemOp __API_TYPEDEF_PTSZ(PFN_cuStreamBatchMemOp, 8000, 8000) +#define PFN_cuStreamWaitValue32_v2 __API_TYPEDEF_PTSZ(PFN_cuStreamWaitValue32, 11070, 11070) +#define PFN_cuStreamWaitValue64_v2 __API_TYPEDEF_PTSZ(PFN_cuStreamWaitValue64, 11070, 11070) +#define PFN_cuStreamWriteValue32_v2 __API_TYPEDEF_PTSZ(PFN_cuStreamWriteValue32, 11070, 11070) +#define PFN_cuStreamWriteValue64_v2 __API_TYPEDEF_PTSZ(PFN_cuStreamWriteValue64, 11070, 11070) +#define PFN_cuStreamBatchMemOp_v2 __API_TYPEDEF_PTSZ(PFN_cuStreamBatchMemOp, 11070, 11070) +#define PFN_cuFuncGetAttribute PFN_cuFuncGetAttribute_v2020 +#define PFN_cuFuncSetAttribute PFN_cuFuncSetAttribute_v9000 +#define PFN_cuFuncSetCacheConfig PFN_cuFuncSetCacheConfig_v3000 +#define PFN_cuFuncSetSharedMemConfig PFN_cuFuncSetSharedMemConfig_v4020 +#define PFN_cuLaunchKernel __API_TYPEDEF_PTSZ(PFN_cuLaunchKernel, 4000, 7000) +#define PFN_cuLaunchKernelEx __API_TYPEDEF_PTSZ(PFN_cuLaunchKernelEx, 11060, 11060) +#define PFN_cuLaunchCooperativeKernel __API_TYPEDEF_PTSZ(PFN_cuLaunchCooperativeKernel, 9000, 9000) +#define PFN_cuLaunchCooperativeKernelMultiDevice PFN_cuLaunchCooperativeKernelMultiDevice_v9000 +#define PFN_cuLaunchHostFunc __API_TYPEDEF_PTSZ(PFN_cuLaunchHostFunc, 10000, 10000) +#define PFN_cuFuncSetBlockShape PFN_cuFuncSetBlockShape_v2000 +#define PFN_cuFuncSetSharedSize PFN_cuFuncSetSharedSize_v2000 +#define PFN_cuParamSetSize PFN_cuParamSetSize_v2000 +#define PFN_cuParamSeti PFN_cuParamSeti_v2000 +#define PFN_cuParamSetf PFN_cuParamSetf_v2000 +#define PFN_cuParamSetv PFN_cuParamSetv_v2000 +#define PFN_cuLaunch PFN_cuLaunch_v2000 +#define PFN_cuLaunchGrid PFN_cuLaunchGrid_v2000 +#define PFN_cuLaunchGridAsync PFN_cuLaunchGridAsync_v2000 +#define PFN_cuParamSetTexRef PFN_cuParamSetTexRef_v2000 +#define PFN_cuGraphCreate PFN_cuGraphCreate_v10000 +#define PFN_cuGraphAddKernelNode PFN_cuGraphAddKernelNode_v12000 +#define PFN_cuGraphKernelNodeGetParams PFN_cuGraphKernelNodeGetParams_v12000 +#define PFN_cuGraphKernelNodeSetParams PFN_cuGraphKernelNodeSetParams_v12000 +#define PFN_cuGraphAddMemcpyNode PFN_cuGraphAddMemcpyNode_v10000 +#define PFN_cuGraphMemcpyNodeGetParams PFN_cuGraphMemcpyNodeGetParams_v10000 +#define PFN_cuGraphMemcpyNodeSetParams PFN_cuGraphMemcpyNodeSetParams_v10000 +#define PFN_cuGraphAddMemsetNode PFN_cuGraphAddMemsetNode_v10000 +#define PFN_cuGraphMemsetNodeGetParams PFN_cuGraphMemsetNodeGetParams_v10000 +#define PFN_cuGraphMemsetNodeSetParams PFN_cuGraphMemsetNodeSetParams_v10000 +#define PFN_cuGraphAddHostNode PFN_cuGraphAddHostNode_v10000 +#define PFN_cuGraphHostNodeGetParams PFN_cuGraphHostNodeGetParams_v10000 +#define PFN_cuGraphHostNodeSetParams PFN_cuGraphHostNodeSetParams_v10000 +#define PFN_cuGraphAddChildGraphNode PFN_cuGraphAddChildGraphNode_v10000 +#define PFN_cuGraphChildGraphNodeGetGraph PFN_cuGraphChildGraphNodeGetGraph_v10000 +#define PFN_cuGraphAddEmptyNode PFN_cuGraphAddEmptyNode_v10000 +#define PFN_cuGraphAddEventRecordNode PFN_cuGraphAddEventRecordNode_v11010 +#define PFN_cuGraphEventRecordNodeGetEvent PFN_cuGraphEventRecordNodeGetEvent_v11010 +#define PFN_cuGraphEventRecordNodeSetEvent PFN_cuGraphEventRecordNodeSetEvent_v11010 +#define PFN_cuGraphAddEventWaitNode PFN_cuGraphAddEventWaitNode_v11010 +#define PFN_cuGraphEventWaitNodeGetEvent PFN_cuGraphEventWaitNodeGetEvent_v11010 +#define PFN_cuGraphEventWaitNodeSetEvent PFN_cuGraphEventWaitNodeSetEvent_v11010 +#define PFN_cuGraphAddExternalSemaphoresSignalNode PFN_cuGraphAddExternalSemaphoresSignalNode_v11020 +#define PFN_cuGraphExternalSemaphoresSignalNodeGetParams PFN_cuGraphExternalSemaphoresSignalNodeGetParams_v11020 +#define PFN_cuGraphExternalSemaphoresSignalNodeSetParams PFN_cuGraphExternalSemaphoresSignalNodeSetParams_v11020 +#define PFN_cuGraphAddExternalSemaphoresWaitNode PFN_cuGraphAddExternalSemaphoresWaitNode_v11020 +#define PFN_cuGraphExternalSemaphoresWaitNodeGetParams PFN_cuGraphExternalSemaphoresWaitNodeGetParams_v11020 +#define PFN_cuGraphExternalSemaphoresWaitNodeSetParams PFN_cuGraphExternalSemaphoresWaitNodeSetParams_v11020 +#define PFN_cuGraphAddBatchMemOpNode PFN_cuGraphAddBatchMemOpNode_v11070 +#define PFN_cuGraphBatchMemOpNodeGetParams PFN_cuGraphBatchMemOpNodeGetParams_v11070 +#define PFN_cuGraphBatchMemOpNodeSetParams PFN_cuGraphBatchMemOpNodeSetParams _v11070 +#define PFN_cuGraphExecBatchMemOpNodeSetParams PFN_cuGraphExecBatchMemOpNodeSetParams_v11070 +#define PFN_cuGraphClone PFN_cuGraphClone_v10000 +#define PFN_cuGraphNodeFindInClone PFN_cuGraphNodeFindInClone_v10000 +#define PFN_cuGraphNodeGetType PFN_cuGraphNodeGetType_v10000 +#define PFN_cuGraphGetNodes PFN_cuGraphGetNodes_v10000 +#define PFN_cuGraphGetRootNodes PFN_cuGraphGetRootNodes_v10000 +#define PFN_cuGraphGetEdges PFN_cuGraphGetEdges_v10000 +#define PFN_cuGraphNodeGetDependencies PFN_cuGraphNodeGetDependencies_v10000 +#define PFN_cuGraphNodeGetDependentNodes PFN_cuGraphNodeGetDependentNodes_v10000 +#define PFN_cuGraphAddDependencies PFN_cuGraphAddDependencies_v10000 +#define PFN_cuGraphRemoveDependencies PFN_cuGraphRemoveDependencies_v10000 +#define PFN_cuGraphDestroyNode PFN_cuGraphDestroyNode_v10000 + +#define PFN_cuGraphInstantiate PFN_cuGraphInstantiateWithFlags_v11040 + +#define PFN_cuGraphInstantiateWithFlags PFN_cuGraphInstantiateWithFlags_v11040 +#define PFN_cuGraphInstantiateWithParams __API_TYPEDEF_PTSZ(PFN_cuGraphInstantiateWithParams, 12000, 12000) +#define PFN_cuGraphExecGetFlags PFN_cuGraphExecGetFlags_v12000 +#define PFN_cuGraphExecKernelNodeSetParams PFN_cuGraphExecKernelNodeSetParams_v12000 +#define PFN_cuGraphExecMemcpyNodeSetParams PFN_cuGraphExecMemcpyNodeSetParams_v10020 +#define PFN_cuGraphExecMemsetNodeSetParams PFN_cuGraphExecMemsetNodeSetParams_v10020 +#define PFN_cuGraphExecHostNodeSetParams PFN_cuGraphExecHostNodeSetParams_v10020 +#define PFN_cuGraphExecChildGraphNodeSetParams PFN_cuGraphExecChildGraphNodeSetParams_v11010 +#define PFN_cuGraphExecEventRecordNodeSetEvent PFN_cuGraphExecEventRecordNodeSetEvent_v11010 +#define PFN_cuGraphExecEventWaitNodeSetEvent PFN_cuGraphExecEventWaitNodeSetEvent_v11010 +#define PFN_cuGraphExecExternalSemaphoresSignalNodeSetParams PFN_cuGraphExecExternalSemaphoresSignalNodeSetParams_v11020 +#define PFN_cuGraphExecExternalSemaphoresWaitNodeSetParams PFN_cuGraphExecExternalSemaphoresWaitNodeSetParams_v11020 +#define PFN_cuGraphUpload __API_TYPEDEF_PTSZ(PFN_cuGraphUpload, 11010, 11010) +#define PFN_cuGraphLaunch __API_TYPEDEF_PTSZ(PFN_cuGraphLaunch, 10000, 10000) +#define PFN_cuGraphExecDestroy PFN_cuGraphExecDestroy_v10000 +#define PFN_cuGraphDestroy PFN_cuGraphDestroy_v10000 +#define PFN_cuGraphExecUpdate PFN_cuGraphExecUpdate_v12000 +#define PFN_cuGraphKernelNodeCopyAttributes PFN_cuGraphKernelNodeCopyAttributes_v11000 +#define PFN_cuGraphKernelNodeGetAttribute PFN_cuGraphKernelNodeGetAttribute_v11000 +#define PFN_cuGraphKernelNodeSetAttribute PFN_cuGraphKernelNodeSetAttribute_v11000 +#define PFN_cuGraphDebugDotPrint PFN_cuGraphDebugDotPrint_v11030 +#define PFN_cuGraphAddMemAllocNode PFN_cuGraphAddMemAllocNode_v11040 +#define PFN_cuGraphMemAllocNodeGetParams PFN_cuGraphMemAllocNodeGetParams_v11040 +#define PFN_cuGraphAddMemFreeNode PFN_cuGraphAddMemFreeNode_v11040 +#define PFN_cuGraphMemFreeNodeGetParams PFN_cuGraphMemFreeNodeGetParams_v11040 +#define PFN_cuGraphNodeSetEnabled PFN_cuGraphNodeSetEnabled_v11060 +#define PFN_cuGraphNodeGetEnabled PFN_cuGraphNodeGetEnabled_v11060 +#define PFN_cuDeviceGraphMemTrim PFN_cuDeviceGraphMemTrim_v11040 +#define PFN_cuDeviceGetGraphMemAttribute PFN_cuDeviceGetGraphMemAttribute_v11040 +#define PFN_cuDeviceSetGraphMemAttribute PFN_cuDeviceSetGraphMemAttribute_v11040 +#define PFN_cuOccupancyMaxActiveBlocksPerMultiprocessor PFN_cuOccupancyMaxActiveBlocksPerMultiprocessor_v6050 +#define PFN_cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags PFN_cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000 +#define PFN_cuOccupancyMaxPotentialBlockSize PFN_cuOccupancyMaxPotentialBlockSize_v6050 +#define PFN_cuOccupancyMaxPotentialBlockSizeWithFlags PFN_cuOccupancyMaxPotentialBlockSizeWithFlags_v7000 +#define PFN_cuOccupancyAvailableDynamicSMemPerBlock PFN_cuOccupancyAvailableDynamicSMemPerBlock_v10020 +#define PFN_cuOccupancyMaxPotentialClusterSize PFN_cuOccupancyMaxPotentialClusterSize_v11070 +#define PFN_cuOccupancyMaxActiveClusters PFN_cuOccupancyMaxActiveClusters_v11070 +#define PFN_cuTexRefSetArray PFN_cuTexRefSetArray_v2000 +#define PFN_cuTexRefSetMipmappedArray PFN_cuTexRefSetMipmappedArray_v5000 +#define PFN_cuTexRefSetAddress PFN_cuTexRefSetAddress_v3020 +#define PFN_cuTexRefSetAddress2D PFN_cuTexRefSetAddress2D_v4010 +#define PFN_cuTexRefSetFormat PFN_cuTexRefSetFormat_v2000 +#define PFN_cuTexRefSetAddressMode PFN_cuTexRefSetAddressMode_v2000 +#define PFN_cuTexRefSetFilterMode PFN_cuTexRefSetFilterMode_v2000 +#define PFN_cuTexRefSetMipmapFilterMode PFN_cuTexRefSetMipmapFilterMode_v5000 +#define PFN_cuTexRefSetMipmapLevelBias PFN_cuTexRefSetMipmapLevelBias_v5000 +#define PFN_cuTexRefSetMipmapLevelClamp PFN_cuTexRefSetMipmapLevelClamp_v5000 +#define PFN_cuTexRefSetMaxAnisotropy PFN_cuTexRefSetMaxAnisotropy_v5000 +#define PFN_cuTexRefSetBorderColor PFN_cuTexRefSetBorderColor_v8000 +#define PFN_cuTexRefSetFlags PFN_cuTexRefSetFlags_v2000 +#define PFN_cuTexRefGetAddress PFN_cuTexRefGetAddress_v3020 +#define PFN_cuTexRefGetArray PFN_cuTexRefGetArray_v2000 +#define PFN_cuTexRefGetMipmappedArray PFN_cuTexRefGetMipmappedArray_v5000 +#define PFN_cuTexRefGetAddressMode PFN_cuTexRefGetAddressMode_v2000 +#define PFN_cuTexRefGetFilterMode PFN_cuTexRefGetFilterMode_v2000 +#define PFN_cuTexRefGetFormat PFN_cuTexRefGetFormat_v2000 +#define PFN_cuTexRefGetMipmapFilterMode PFN_cuTexRefGetMipmapFilterMode_v5000 +#define PFN_cuTexRefGetMipmapLevelBias PFN_cuTexRefGetMipmapLevelBias_v5000 +#define PFN_cuTexRefGetMipmapLevelClamp PFN_cuTexRefGetMipmapLevelClamp_v5000 +#define PFN_cuTexRefGetMaxAnisotropy PFN_cuTexRefGetMaxAnisotropy_v5000 +#define PFN_cuTexRefGetBorderColor PFN_cuTexRefGetBorderColor_v8000 +#define PFN_cuTexRefGetFlags PFN_cuTexRefGetFlags_v2000 +#define PFN_cuTexRefCreate PFN_cuTexRefCreate_v2000 +#define PFN_cuTexRefDestroy PFN_cuTexRefDestroy_v2000 +#define PFN_cuSurfRefSetArray PFN_cuSurfRefSetArray_v3000 +#define PFN_cuSurfRefGetArray PFN_cuSurfRefGetArray_v3000 +#define PFN_cuTexObjectCreate PFN_cuTexObjectCreate_v5000 +#define PFN_cuTexObjectDestroy PFN_cuTexObjectDestroy_v5000 +#define PFN_cuTexObjectGetResourceDesc PFN_cuTexObjectGetResourceDesc_v5000 +#define PFN_cuTexObjectGetTextureDesc PFN_cuTexObjectGetTextureDesc_v5000 +#define PFN_cuTexObjectGetResourceViewDesc PFN_cuTexObjectGetResourceViewDesc_v5000 +#define PFN_cuSurfObjectCreate PFN_cuSurfObjectCreate_v5000 +#define PFN_cuSurfObjectDestroy PFN_cuSurfObjectDestroy_v5000 +#define PFN_cuSurfObjectGetResourceDesc PFN_cuSurfObjectGetResourceDesc_v5000 +#define PFN_cuTensorMapEncodeTiled PFN_cuTensorMapEncodeTiled_v12000 +#define PFN_cuTensorMapEncodeIm2col PFN_cuTensorMapEncodeIm2col_v12000 +#define PFN_cuTensorMapReplaceAddress PFN_cuTensorMapReplaceAddress_v12000 +#define PFN_cuDeviceCanAccessPeer PFN_cuDeviceCanAccessPeer_v4000 +#define PFN_cuCtxEnablePeerAccess PFN_cuCtxEnablePeerAccess_v4000 +#define PFN_cuCtxDisablePeerAccess PFN_cuCtxDisablePeerAccess_v4000 +#define PFN_cuDeviceGetP2PAttribute PFN_cuDeviceGetP2PAttribute_v8000 +#define PFN_cuGraphicsUnregisterResource PFN_cuGraphicsUnregisterResource_v3000 +#define PFN_cuGraphicsSubResourceGetMappedArray PFN_cuGraphicsSubResourceGetMappedArray_v3000 +#define PFN_cuGraphicsResourceGetMappedMipmappedArray PFN_cuGraphicsResourceGetMappedMipmappedArray_v5000 +#define PFN_cuGraphicsResourceGetMappedPointer PFN_cuGraphicsResourceGetMappedPointer_v3020 +#define PFN_cuGraphicsResourceSetMapFlags PFN_cuGraphicsResourceSetMapFlags_v6050 +#define PFN_cuGraphicsMapResources __API_TYPEDEF_PTSZ(PFN_cuGraphicsMapResources, 3000, 7000) +#define PFN_cuGraphicsUnmapResources __API_TYPEDEF_PTSZ(PFN_cuGraphicsUnmapResources, 3000, 7000) +#define PFN_cuGetExportTable PFN_cuGetExportTable_v3000 +#define PFN_cuFuncGetModule PFN_cuFuncGetModule_v11000 +#define PFN_cuFlushGPUDirectRDMAWrites PFN_cuFlushGPUDirectRDMAWrites_v11030 +#define PFN_cuGetProcAddress PFN_cuGetProcAddress_v12000 +#define PFN_cuUserObjectCreate PFN_cuUserObjectCreate_v11030 +#define PFN_cuUserObjectRetain PFN_cuUserObjectRetain_v11030 +#define PFN_cuUserObjectRelease PFN_cuUserObjectRelease_v11030 +#define PFN_cuGraphRetainUserObject PFN_cuGraphRetainUserObject_v11030 +#define PFN_cuGraphReleaseUserObject PFN_cuGraphReleaseUserObject_v11030 +#define PFN_cuModuleGetLoadingMode PFN_cuModuleGetLoadingMode_v11070 +#define PFN_cuMemGetHandleForAddressRange PFN_cuMemGetHandleForAddressRange_v11070 +#define PFN_cuLibraryLoadData PFN_cuLibraryLoadData_v12000 +#define PFN_cuLibraryLoadFromFile PFN_cuLibraryLoadFromFile_v12000 +#define PFN_cuLibraryUnload PFN_cuLibraryUnload_v12000 +#define PFN_cuLibraryGetKernel PFN_cuLibraryGetKernel_v12000 +#define PFN_cuLibraryGetModule PFN_cuLibraryGetModule_v12000 +#define PFN_cuKernelGetFunction PFN_cuKernelGetFunction_v12000 +#define PFN_cuLibraryGetGlobal PFN_cuLibraryGetGlobal_v12000 +#define PFN_cuLibraryGetManaged PFN_cuLibraryGetManaged_v12000 +#define PFN_cuKernelGetAttribute PFN_cuKernelGetAttribute_v12000 +#define PFN_cuKernelSetAttribute PFN_cuKernelSetAttribute_v12000 +#define PFN_cuKernelSetCacheConfig PFN_cuKernelSetCacheConfig_v12000 +#define PFN_cuLibraryGetUnifiedFunction PFN_cuLibraryGetUnifiedFunction_v12000 +#define PFN_cuCoredumpGetAttribute PFN_cuCoredumpGetAttribute_v12010 +#define PFN_cuCoredumpGetAttributeGlobal PFN_cuCoredumpGetAttributeGlobal_v12010 +#define PFN_cuCoredumpSetAttribute PFN_cuCoredumpSetAttribute_v12010 +#define PFN_cuCoredumpSetAttributeGlobal PFN_cuCoredumpSetAttributeGlobal_v12010 + + +/* + * Type definitions for functions defined in cuda.h + */ +typedef CUresult (CUDAAPI *PFN_cuGetErrorString_v6000)(CUresult error, const char **pStr); +typedef CUresult (CUDAAPI *PFN_cuGetErrorName_v6000)(CUresult error, const char **pStr); +typedef CUresult (CUDAAPI *PFN_cuInit_v2000)(unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuDriverGetVersion_v2020)(int *driverVersion); +typedef CUresult (CUDAAPI *PFN_cuDeviceGet_v2000)(CUdevice_v1 *device, int ordinal); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetCount_v2000)(int *count); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetName_v2000)(char *name, int len, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetUuid_v9020)(CUuuid *uuid, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetUuid_v11040)(CUuuid *uuid, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetLuid_v10000)(char *luid, unsigned int *deviceNodeMask, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDeviceTotalMem_v3020)(size_t *bytes, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetTexture1DLinearMaxWidth_v11010)(size_t *maxWidthInElements, CUarray_format format, unsigned numChannels, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetAttribute_v2000)(int *pi, CUdevice_attribute attrib, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetNvSciSyncAttributes_v10020)(void *nvSciSyncAttrList, CUdevice_v1 dev, int flags); +typedef CUresult (CUDAAPI *PFN_cuDeviceSetMemPool_v11020)(CUdevice_v1 dev, CUmemoryPool pool); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetMemPool_v11020)(CUmemoryPool *pool, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetDefaultMemPool_v11020)(CUmemoryPool *pool_out, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetProperties_v2000)(CUdevprop_v1 *prop, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDeviceComputeCapability_v2000)(int *major, int *minor, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDevicePrimaryCtxRetain_v7000)(CUcontext *pctx, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDevicePrimaryCtxRelease_v11000)(CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDevicePrimaryCtxSetFlags_v11000)(CUdevice_v1 dev, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuDevicePrimaryCtxGetState_v7000)(CUdevice_v1 dev, unsigned int *flags, int *active); +typedef CUresult (CUDAAPI *PFN_cuDevicePrimaryCtxReset_v11000)(CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetExecAffinitySupport_v11040)(int *pi, CUexecAffinityType type, CUdevice dev); +typedef CUresult (CUDAAPI *PFN_cuCtxCreate_v3020)(CUcontext *pctx, unsigned int flags, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuCtxCreate_v11040)(CUcontext *pctx, CUexecAffinityParam *paramsArray, int numParams, unsigned int flags, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuCtxGetId_v12000)(CUcontext ctx, unsigned long long *ctxId); +typedef CUresult (CUDAAPI *PFN_cuCtxDestroy_v4000)(CUcontext ctx); +typedef CUresult (CUDAAPI *PFN_cuCtxPushCurrent_v4000)(CUcontext ctx); +typedef CUresult (CUDAAPI *PFN_cuCtxPopCurrent_v4000)(CUcontext *pctx); +typedef CUresult (CUDAAPI *PFN_cuCtxSetCurrent_v4000)(CUcontext ctx); +typedef CUresult (CUDAAPI *PFN_cuCtxGetCurrent_v4000)(CUcontext *pctx); +typedef CUresult (CUDAAPI *PFN_cuCtxGetDevice_v2000)(CUdevice_v1 *device); +typedef CUresult (CUDAAPI *PFN_cuCtxGetFlags_v7000)(unsigned int *flags); +typedef CUresult (CUDAAPI *PFN_cuCtxSetFlags_v12010)(unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuCtxSynchronize_v2000)(void); +typedef CUresult (CUDAAPI *PFN_cuCtxSetLimit_v3010)(CUlimit limit, size_t value); +typedef CUresult (CUDAAPI *PFN_cuCtxGetLimit_v3010)(size_t *pvalue, CUlimit limit); +typedef CUresult (CUDAAPI *PFN_cuCtxGetCacheConfig_v3020)(CUfunc_cache *pconfig); +typedef CUresult (CUDAAPI *PFN_cuCtxSetCacheConfig_v3020)(CUfunc_cache config); +typedef CUresult (CUDAAPI *PFN_cuCtxGetSharedMemConfig_v4020)(CUsharedconfig *pConfig); +typedef CUresult (CUDAAPI *PFN_cuCtxSetSharedMemConfig_v4020)(CUsharedconfig config); +typedef CUresult (CUDAAPI *PFN_cuCtxGetApiVersion_v3020)(CUcontext ctx, unsigned int *version); +typedef CUresult (CUDAAPI *PFN_cuCtxGetStreamPriorityRange_v5050)(int *leastPriority, int *greatestPriority); +typedef CUresult (CUDAAPI *PFN_cuCtxResetPersistingL2Cache_v11000)(void); +typedef CUresult (CUDAAPI *PFN_cuCtxAttach_v2000)(CUcontext *pctx, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuCtxDetach_v2000)(CUcontext ctx); +typedef CUresult (CUDAAPI *PFN_cuCtxGetExecAffinity_v11040)(CUexecAffinityParam *pExecAffinity, CUexecAffinityType type); +typedef CUresult (CUDAAPI *PFN_cuModuleLoad_v2000)(CUmodule *module, const char *fname); +typedef CUresult (CUDAAPI *PFN_cuModuleLoadData_v2000)(CUmodule *module, const void *image); +typedef CUresult (CUDAAPI *PFN_cuModuleLoadDataEx_v2010)(CUmodule *module, const void *image, unsigned int numOptions, CUjit_option *options, void **optionValues); +typedef CUresult (CUDAAPI *PFN_cuModuleLoadFatBinary_v2000)(CUmodule *module, const void *fatCubin); +typedef CUresult (CUDAAPI *PFN_cuModuleUnload_v2000)(CUmodule hmod); +typedef CUresult (CUDAAPI *PFN_cuModuleGetFunction_v2000)(CUfunction *hfunc, CUmodule hmod, const char *name); +typedef CUresult (CUDAAPI *PFN_cuModuleGetGlobal_v3020)(CUdeviceptr_v2 *dptr, size_t *bytes, CUmodule hmod, const char *name); +typedef CUresult (CUDAAPI *PFN_cuModuleGetTexRef_v2000)(CUtexref *pTexRef, CUmodule hmod, const char *name); +typedef CUresult (CUDAAPI *PFN_cuModuleGetSurfRef_v3000)(CUsurfref *pSurfRef, CUmodule hmod, const char *name); +typedef CUresult (CUDAAPI *PFN_cuLinkCreate_v6050)(unsigned int numOptions, CUjit_option *options, void **optionValues, CUlinkState *stateOut); +typedef CUresult (CUDAAPI *PFN_cuLinkAddData_v6050)(CUlinkState state, CUjitInputType type, void *data, size_t size, const char *name, unsigned int numOptions, CUjit_option *options, void **optionValues); +typedef CUresult (CUDAAPI *PFN_cuLinkAddFile_v6050)(CUlinkState state, CUjitInputType type, const char *path, unsigned int numOptions, CUjit_option *options, void **optionValues); +typedef CUresult (CUDAAPI *PFN_cuLinkComplete_v5050)(CUlinkState state, void **cubinOut, size_t *sizeOut); +typedef CUresult (CUDAAPI *PFN_cuLinkDestroy_v5050)(CUlinkState state); +typedef CUresult (CUDAAPI *PFN_cuMemGetInfo_v3020)(size_t *free, size_t *total); +typedef CUresult (CUDAAPI *PFN_cuMemAlloc_v3020)(CUdeviceptr_v2 *dptr, size_t bytesize); +typedef CUresult (CUDAAPI *PFN_cuMemAllocPitch_v3020)(CUdeviceptr_v2 *dptr, size_t *pPitch, size_t WidthInBytes, size_t Height, unsigned int ElementSizeBytes); +typedef CUresult (CUDAAPI *PFN_cuMemFree_v3020)(CUdeviceptr_v2 dptr); +typedef CUresult (CUDAAPI *PFN_cuMemGetAddressRange_v3020)(CUdeviceptr_v2 *pbase, size_t *psize, CUdeviceptr_v2 dptr); +typedef CUresult (CUDAAPI *PFN_cuMemAllocHost_v3020)(void **pp, size_t bytesize); +typedef CUresult (CUDAAPI *PFN_cuMemFreeHost_v2000)(void *p); +typedef CUresult (CUDAAPI *PFN_cuMemHostAlloc_v2020)(void **pp, size_t bytesize, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuMemHostGetDevicePointer_v3020)(CUdeviceptr_v2 *pdptr, void *p, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuMemHostGetFlags_v2030)(unsigned int *pFlags, void *p); +typedef CUresult (CUDAAPI *PFN_cuMemAllocManaged_v6000)(CUdeviceptr_v2 *dptr, size_t bytesize, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetByPCIBusId_v4010)(CUdevice_v1 *dev, const char *pciBusId); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetPCIBusId_v4010)(char *pciBusId, int len, CUdevice_v1 dev); +typedef CUresult (CUDAAPI *PFN_cuIpcGetEventHandle_v4010)(CUipcEventHandle_v1 *pHandle, CUevent event); +typedef CUresult (CUDAAPI *PFN_cuIpcOpenEventHandle_v4010)(CUevent *phEvent, CUipcEventHandle_v1 handle); +typedef CUresult (CUDAAPI *PFN_cuIpcGetMemHandle_v4010)(CUipcMemHandle_v1 *pHandle, CUdeviceptr_v2 dptr); +typedef CUresult (CUDAAPI *PFN_cuIpcOpenMemHandle_v11000)(CUdeviceptr_v2 *pdptr, CUipcMemHandle_v1 handle, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuIpcCloseMemHandle_v4010)(CUdeviceptr_v2 dptr); +typedef CUresult (CUDAAPI *PFN_cuMemHostRegister_v6050)(void *p, size_t bytesize, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuMemHostUnregister_v4000)(void *p); +typedef CUresult (CUDAAPI *PFN_cuMemcpy_v7000_ptds)(CUdeviceptr_v2 dst, CUdeviceptr_v2 src, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyPeer_v7000_ptds)(CUdeviceptr_v2 dstDevice, CUcontext dstContext, CUdeviceptr_v2 srcDevice, CUcontext srcContext, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoD_v7000_ptds)(CUdeviceptr_v2 dstDevice, const void *srcHost, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoH_v7000_ptds)(void *dstHost, CUdeviceptr_v2 srcDevice, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoD_v7000_ptds)(CUdeviceptr_v2 dstDevice, CUdeviceptr_v2 srcDevice, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoA_v7000_ptds)(CUarray dstArray, size_t dstOffset, CUdeviceptr_v2 srcDevice, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoD_v7000_ptds)(CUdeviceptr_v2 dstDevice, CUarray srcArray, size_t srcOffset, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoA_v7000_ptds)(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoH_v7000_ptds)(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoA_v7000_ptds)(CUarray dstArray, size_t dstOffset, CUarray srcArray, size_t srcOffset, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpy2D_v7000_ptds)(const CUDA_MEMCPY2D_v2 *pCopy); +typedef CUresult (CUDAAPI *PFN_cuMemcpy2DUnaligned_v7000_ptds)(const CUDA_MEMCPY2D_v2 *pCopy); +typedef CUresult (CUDAAPI *PFN_cuMemcpy3D_v7000_ptds)(const CUDA_MEMCPY3D_v2 *pCopy); +typedef CUresult (CUDAAPI *PFN_cuMemcpy3DPeer_v7000_ptds)(const CUDA_MEMCPY3D_PEER_v1 *pCopy); +typedef CUresult (CUDAAPI *PFN_cuMemcpyAsync_v7000_ptsz)(CUdeviceptr_v2 dst, CUdeviceptr_v2 src, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpyPeerAsync_v7000_ptsz)(CUdeviceptr_v2 dstDevice, CUcontext dstContext, CUdeviceptr_v2 srcDevice, CUcontext srcContext, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoDAsync_v7000_ptsz)(CUdeviceptr_v2 dstDevice, const void *srcHost, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoHAsync_v7000_ptsz)(void *dstHost, CUdeviceptr_v2 srcDevice, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoDAsync_v7000_ptsz)(CUdeviceptr_v2 dstDevice, CUdeviceptr_v2 srcDevice, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoAAsync_v7000_ptsz)(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoHAsync_v7000_ptsz)(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpy2DAsync_v7000_ptsz)(const CUDA_MEMCPY2D_v2 *pCopy, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpy3DAsync_v7000_ptsz)(const CUDA_MEMCPY3D_v2 *pCopy, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpy3DPeerAsync_v7000_ptsz)(const CUDA_MEMCPY3D_PEER_v1 *pCopy, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD8_v7000_ptds)(CUdeviceptr_v2 dstDevice, unsigned char uc, size_t N); +typedef CUresult (CUDAAPI *PFN_cuMemsetD16_v7000_ptds)(CUdeviceptr_v2 dstDevice, unsigned short us, size_t N); +typedef CUresult (CUDAAPI *PFN_cuMemsetD32_v7000_ptds)(CUdeviceptr_v2 dstDevice, unsigned int ui, size_t N); +typedef CUresult (CUDAAPI *PFN_cuMemsetD2D8_v7000_ptds)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height); +typedef CUresult (CUDAAPI *PFN_cuMemsetD2D16_v7000_ptds)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height); +typedef CUresult (CUDAAPI *PFN_cuMemsetD2D32_v7000_ptds)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height); +typedef CUresult (CUDAAPI *PFN_cuMemsetD8Async_v7000_ptsz)(CUdeviceptr_v2 dstDevice, unsigned char uc, size_t N, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD16Async_v7000_ptsz)(CUdeviceptr_v2 dstDevice, unsigned short us, size_t N, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD32Async_v7000_ptsz)(CUdeviceptr_v2 dstDevice, unsigned int ui, size_t N, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD2D8Async_v7000_ptsz)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD2D16Async_v7000_ptsz)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD2D32Async_v7000_ptsz)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuArrayCreate_v3020)(CUarray *pHandle, const CUDA_ARRAY_DESCRIPTOR_v2 *pAllocateArray); +typedef CUresult (CUDAAPI *PFN_cuArrayGetDescriptor_v3020)(CUDA_ARRAY_DESCRIPTOR_v2 *pArrayDescriptor, CUarray hArray); +typedef CUresult (CUDAAPI *PFN_cuArrayGetSparseProperties_v11010)(CUDA_ARRAY_SPARSE_PROPERTIES_v1 *sparseProperties, CUarray array); +typedef CUresult (CUDAAPI *PFN_cuMipmappedArrayGetSparseProperties_v11010)(CUDA_ARRAY_SPARSE_PROPERTIES_v1 *sparseProperties, CUmipmappedArray mipmap); +typedef CUresult (CUDAAPI *PFN_cuArrayGetMemoryRequirements_v11060)(CUDA_ARRAY_MEMORY_REQUIREMENTS_v1 *memoryRequirements, CUarray array, CUdevice device); +typedef CUresult (CUDAAPI *PFN_cuMipmappedArrayGetMemoryRequirements_v11060)(CUDA_ARRAY_MEMORY_REQUIREMENTS_v1 *memoryRequirements, CUmipmappedArray mipmap, CUdevice device); +typedef CUresult (CUDAAPI *PFN_cuArrayGetPlane_v11020)(CUarray *pPlaneArray, CUarray hArray, unsigned int planeIdx); +typedef CUresult (CUDAAPI *PFN_cuArrayDestroy_v2000)(CUarray hArray); +typedef CUresult (CUDAAPI *PFN_cuArray3DCreate_v3020)(CUarray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR_v2 *pAllocateArray); +typedef CUresult (CUDAAPI *PFN_cuArray3DGetDescriptor_v3020)(CUDA_ARRAY3D_DESCRIPTOR_v2 *pArrayDescriptor, CUarray hArray); +typedef CUresult (CUDAAPI *PFN_cuMipmappedArrayCreate_v5000)(CUmipmappedArray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR_v2 *pMipmappedArrayDesc, unsigned int numMipmapLevels); +typedef CUresult (CUDAAPI *PFN_cuMipmappedArrayGetLevel_v5000)(CUarray *pLevelArray, CUmipmappedArray hMipmappedArray, unsigned int level); +typedef CUresult (CUDAAPI *PFN_cuMipmappedArrayDestroy_v5000)(CUmipmappedArray hMipmappedArray); +typedef CUresult (CUDAAPI *PFN_cuMemAddressReserve_v10020)(CUdeviceptr_v2 *ptr, size_t size, size_t alignment, CUdeviceptr_v2 addr, unsigned long long flags); +typedef CUresult (CUDAAPI *PFN_cuMemAddressFree_v10020)(CUdeviceptr_v2 ptr, size_t size); +typedef CUresult (CUDAAPI *PFN_cuMemCreate_v10020)(CUmemGenericAllocationHandle_v1 *handle, size_t size, const CUmemAllocationProp_v1 *prop, unsigned long long flags); +typedef CUresult (CUDAAPI *PFN_cuMemRelease_v10020)(CUmemGenericAllocationHandle_v1 handle); +typedef CUresult (CUDAAPI *PFN_cuMemMap_v10020)(CUdeviceptr_v2 ptr, size_t size, size_t offset, CUmemGenericAllocationHandle_v1 handle, unsigned long long flags); +typedef CUresult (CUDAAPI *PFN_cuMemMapArrayAsync_v11010_ptsz)(CUarrayMapInfo_v1 *mapInfoList, unsigned int count, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemUnmap_v10020)(CUdeviceptr_v2 ptr, size_t size); +typedef CUresult (CUDAAPI *PFN_cuMemSetAccess_v10020)(CUdeviceptr_v2 ptr, size_t size, const CUmemAccessDesc_v1 *desc, size_t count); +typedef CUresult (CUDAAPI *PFN_cuMemGetAccess_v10020)(unsigned long long *flags, const CUmemLocation_v1 *location, CUdeviceptr_v2 ptr); +typedef CUresult (CUDAAPI *PFN_cuMemExportToShareableHandle_v10020)(void *shareableHandle, CUmemGenericAllocationHandle_v1 handle, CUmemAllocationHandleType handleType, unsigned long long flags); +typedef CUresult (CUDAAPI *PFN_cuMemImportFromShareableHandle_v10020)(CUmemGenericAllocationHandle_v1 *handle, void *osHandle, CUmemAllocationHandleType shHandleType); +typedef CUresult (CUDAAPI *PFN_cuMemGetAllocationGranularity_v10020)(size_t *granularity, const CUmemAllocationProp_v1 *prop, CUmemAllocationGranularity_flags option); +typedef CUresult (CUDAAPI *PFN_cuMemGetAllocationPropertiesFromHandle_v10020)(CUmemAllocationProp_v1 *prop, CUmemGenericAllocationHandle_v1 handle); +typedef CUresult (CUDAAPI *PFN_cuMemRetainAllocationHandle_v11000)(CUmemGenericAllocationHandle_v1 *handle, void *addr); +typedef CUresult (CUDAAPI *PFN_cuMemFreeAsync_v11020_ptsz)(CUdeviceptr_v2 dptr, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemAllocAsync_v11020_ptsz)(CUdeviceptr_v2 *dptr, size_t bytesize, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemPoolTrimTo_v11020)(CUmemoryPool pool, size_t minBytesToKeep); +typedef CUresult (CUDAAPI *PFN_cuMemPoolSetAttribute_v11020)(CUmemoryPool pool, CUmemPool_attribute attr, void *value); +typedef CUresult (CUDAAPI *PFN_cuMemPoolGetAttribute_v11020)(CUmemoryPool pool, CUmemPool_attribute attr, void *value); +typedef CUresult (CUDAAPI *PFN_cuMemPoolSetAccess_v11020)(CUmemoryPool pool, const CUmemAccessDesc_v1 *map, size_t count); +typedef CUresult (CUDAAPI *PFN_cuMemPoolGetAccess_v11020)(CUmemAccess_flags *flags, CUmemoryPool memPool, CUmemLocation_v1 *location); +typedef CUresult (CUDAAPI *PFN_cuMemPoolCreate_v11020)(CUmemoryPool *pool, const CUmemPoolProps_v1 *poolProps); +typedef CUresult (CUDAAPI *PFN_cuMemPoolDestroy_v11020)(CUmemoryPool pool); +typedef CUresult (CUDAAPI *PFN_cuMemAllocFromPoolAsync_v11020_ptsz)(CUdeviceptr_v2 *dptr, size_t bytesize, CUmemoryPool pool, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemPoolExportToShareableHandle_v11020)(void *handle_out, CUmemoryPool pool, CUmemAllocationHandleType handleType, unsigned long long flags); +typedef CUresult (CUDAAPI *PFN_cuMemPoolImportFromShareableHandle_v11020)(CUmemoryPool *pool_out, void *handle, CUmemAllocationHandleType handleType, unsigned long long flags); +typedef CUresult (CUDAAPI *PFN_cuMemPoolExportPointer_v11020)(CUmemPoolPtrExportData_v1 *shareData_out, CUdeviceptr_v2 ptr); +typedef CUresult (CUDAAPI *PFN_cuMemPoolImportPointer_v11020)(CUdeviceptr_v2 *ptr_out, CUmemoryPool pool, CUmemPoolPtrExportData_v1 *shareData); +typedef CUresult (CUDAAPI *PFN_cuPointerGetAttribute_v4000)(void *data, CUpointer_attribute attribute, CUdeviceptr_v2 ptr); +typedef CUresult (CUDAAPI *PFN_cuMemPrefetchAsync_v8000_ptsz)(CUdeviceptr_v2 devPtr, size_t count, CUdevice_v1 dstDevice, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemAdvise_v8000)(CUdeviceptr_v2 devPtr, size_t count, CUmem_advise advice, CUdevice_v1 device); +typedef CUresult (CUDAAPI *PFN_cuMemRangeGetAttribute_v8000)(void *data, size_t dataSize, CUmem_range_attribute attribute, CUdeviceptr_v2 devPtr, size_t count); +typedef CUresult (CUDAAPI *PFN_cuMemRangeGetAttributes_v8000)(void **data, size_t *dataSizes, CUmem_range_attribute *attributes, size_t numAttributes, CUdeviceptr_v2 devPtr, size_t count); +typedef CUresult (CUDAAPI *PFN_cuMulticastCreate_v12010)(CUmemGenericAllocationHandle *mcHandle, const CUmulticastObjectProp *prop); +typedef CUresult (CUDAAPI *PFN_cuMulticastAddDevice_v12010)(CUmemGenericAllocationHandle mcHandle, CUdevice dev); +typedef CUresult (CUDAAPI *PFN_cuMulticastBindMem_v12010)(CUmemGenericAllocationHandle mcHandle, size_t mcOffset, CUmemGenericAllocationHandle memHandle, size_t memOffset, size_t size, unsigned long long flags); +typedef CUresult (CUDAAPI *PFN_cuMulticastBindAddr_v12010)(CUmemGenericAllocationHandle mcHandle, size_t mcOffset, CUdeviceptr memptr, size_t size, unsigned long long flags); +typedef CUresult (CUDAAPI *PFN_cuMulticastUnbind_v12010)(CUmemGenericAllocationHandle mcHandle, CUdevice dev, size_t mcOffset, size_t size); +typedef CUresult (CUDAAPI *PFN_cuMulticastGetGranularity_v12010)(size_t *granularity, const CUmulticastObjectProp *prop, CUmulticastGranularity_flags option); +typedef CUresult (CUDAAPI *PFN_cuPointerSetAttribute_v6000)(const void *value, CUpointer_attribute attribute, CUdeviceptr_v2 ptr); +typedef CUresult (CUDAAPI *PFN_cuPointerGetAttributes_v7000)(unsigned int numAttributes, CUpointer_attribute *attributes, void **data, CUdeviceptr_v2 ptr); +typedef CUresult (CUDAAPI *PFN_cuStreamCreate_v2000)(CUstream *phStream, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuStreamCreateWithPriority_v5050)(CUstream *phStream, unsigned int flags, int priority); +typedef CUresult (CUDAAPI *PFN_cuStreamGetId_v12000)(CUstream hStream, unsigned long long *streamId); +typedef CUresult (CUDAAPI *PFN_cuStreamGetId_v12000_ptsz)(CUstream hStream, unsigned long long *streamId); +typedef CUresult (CUDAAPI *PFN_cuStreamGetPriority_v7000_ptsz)(CUstream hStream, int *priority); +typedef CUresult (CUDAAPI *PFN_cuStreamGetFlags_v7000_ptsz)(CUstream hStream, unsigned int *flags); +typedef CUresult (CUDAAPI *PFN_cuStreamGetCtx_v9020_ptsz)(CUstream hStream, CUcontext *pctx); +typedef CUresult (CUDAAPI *PFN_cuStreamWaitEvent_v7000_ptsz)(CUstream hStream, CUevent hEvent, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuStreamAddCallback_v7000_ptsz)(CUstream hStream, CUstreamCallback callback, void *userData, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamBeginCapture_v10010_ptsz)(CUstream hStream, CUstreamCaptureMode mode); +typedef CUresult (CUDAAPI *PFN_cuThreadExchangeStreamCaptureMode_v10010)(CUstreamCaptureMode *mode); +typedef CUresult (CUDAAPI *PFN_cuStreamEndCapture_v10000_ptsz)(CUstream hStream, CUgraph *phGraph); +typedef CUresult (CUDAAPI *PFN_cuStreamIsCapturing_v10000_ptsz)(CUstream hStream, CUstreamCaptureStatus *captureStatus); +typedef CUresult (CUDAAPI *PFN_cuStreamGetCaptureInfo_v10010_ptsz)(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, cuuint64_t *id_out); +typedef CUresult (CUDAAPI *PFN_cuStreamGetCaptureInfo_v11030_ptsz)(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, cuuint64_t *id_out, CUgraph *graph_out, const CUgraphNode **dependencies_out, size_t *numDependencies_out); +typedef CUresult (CUDAAPI *PFN_cuStreamUpdateCaptureDependencies_v11030_ptsz)(CUstream hStream, CUgraphNode *dependencies, size_t numDependencies, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamAttachMemAsync_v7000_ptsz)(CUstream hStream, CUdeviceptr_v2 dptr, size_t length, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamQuery_v7000_ptsz)(CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuStreamSynchronize_v7000_ptsz)(CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuStreamDestroy_v4000)(CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuStreamCopyAttributes_v11000_ptsz)(CUstream dst, CUstream src); +typedef CUresult (CUDAAPI *PFN_cuStreamGetAttribute_v11000_ptsz)(CUstream hStream, CUstreamAttrID attr, CUstreamAttrValue_v1 *value_out); +typedef CUresult (CUDAAPI *PFN_cuStreamSetAttribute_v11000_ptsz)(CUstream hStream, CUstreamAttrID attr, const CUstreamAttrValue_v1 *value); +typedef CUresult (CUDAAPI *PFN_cuEventCreate_v2000)(CUevent *phEvent, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuEventRecord_v7000_ptsz)(CUevent hEvent, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuEventRecordWithFlags_v11010_ptsz)(CUevent hEvent, CUstream hStream, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuEventQuery_v2000)(CUevent hEvent); +typedef CUresult (CUDAAPI *PFN_cuEventSynchronize_v2000)(CUevent hEvent); +typedef CUresult (CUDAAPI *PFN_cuEventDestroy_v4000)(CUevent hEvent); +typedef CUresult (CUDAAPI *PFN_cuEventElapsedTime_v2000)(float *pMilliseconds, CUevent hStart, CUevent hEnd); +typedef CUresult (CUDAAPI *PFN_cuImportExternalMemory_v10000)(CUexternalMemory *extMem_out, const CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1 *memHandleDesc); +typedef CUresult (CUDAAPI *PFN_cuExternalMemoryGetMappedBuffer_v10000)(CUdeviceptr_v2 *devPtr, CUexternalMemory extMem, const CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1 *bufferDesc); +typedef CUresult (CUDAAPI *PFN_cuExternalMemoryGetMappedMipmappedArray_v10000)(CUmipmappedArray *mipmap, CUexternalMemory extMem, const CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1 *mipmapDesc); +typedef CUresult (CUDAAPI *PFN_cuDestroyExternalMemory_v10000)(CUexternalMemory extMem); +typedef CUresult (CUDAAPI *PFN_cuImportExternalSemaphore_v10000)(CUexternalSemaphore *extSem_out, const CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1 *semHandleDesc); +typedef CUresult (CUDAAPI *PFN_cuSignalExternalSemaphoresAsync_v10000_ptsz)(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1 *paramsArray, unsigned int numExtSems, CUstream stream); +typedef CUresult (CUDAAPI *PFN_cuWaitExternalSemaphoresAsync_v10000_ptsz)(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1 *paramsArray, unsigned int numExtSems, CUstream stream); +typedef CUresult (CUDAAPI *PFN_cuDestroyExternalSemaphore_v10000)(CUexternalSemaphore extSem); +typedef CUresult (CUDAAPI *PFN_cuStreamWaitValue32_v8000_ptsz)(CUstream stream, CUdeviceptr_v2 addr, cuuint32_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWaitValue64_v9000_ptsz)(CUstream stream, CUdeviceptr_v2 addr, cuuint64_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWriteValue32_v8000_ptsz)(CUstream stream, CUdeviceptr_v2 addr, cuuint32_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWriteValue64_v9000_ptsz)(CUstream stream, CUdeviceptr_v2 addr, cuuint64_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamBatchMemOp_v8000_ptsz)(CUstream stream, unsigned int count, CUstreamBatchMemOpParams_v1 *paramArray, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWaitValue32_v11070_ptsz)(CUstream stream, CUdeviceptr_v2 addr, cuuint32_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWaitValue64_v11070_ptsz)(CUstream stream, CUdeviceptr_v2 addr, cuuint64_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWriteValue32_v11070_ptsz)(CUstream stream, CUdeviceptr_v2 addr, cuuint32_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWriteValue64_v11070_ptsz)(CUstream stream, CUdeviceptr_v2 addr, cuuint64_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamBatchMemOp_v11070_ptsz)(CUstream stream, unsigned int count, CUstreamBatchMemOpParams *paramArray, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuFuncGetAttribute_v2020)(int *pi, CUfunction_attribute attrib, CUfunction hfunc); +typedef CUresult (CUDAAPI *PFN_cuFuncSetAttribute_v9000)(CUfunction hfunc, CUfunction_attribute attrib, int value); +typedef CUresult (CUDAAPI *PFN_cuFuncSetCacheConfig_v3000)(CUfunction hfunc, CUfunc_cache config); +typedef CUresult (CUDAAPI *PFN_cuFuncSetSharedMemConfig_v4020)(CUfunction hfunc, CUsharedconfig config); +typedef CUresult (CUDAAPI *PFN_cuLaunchKernel_v7000_ptsz)(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void **kernelParams, void **extra); +typedef CUresult (CUDAAPI *PFN_cuLaunchKernelEx_v11060_ptsz)(const CUlaunchConfig *config, CUfunction f, void **kernelParams, void **extra); +typedef CUresult (CUDAAPI *PFN_cuLaunchCooperativeKernel_v9000_ptsz)(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void **kernelParams); +typedef CUresult (CUDAAPI *PFN_cuLaunchCooperativeKernelMultiDevice_v9000)(CUDA_LAUNCH_PARAMS_v1 *launchParamsList, unsigned int numDevices, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuLaunchHostFunc_v10000_ptsz)(CUstream hStream, CUhostFn fn, void *userData); +typedef CUresult (CUDAAPI *PFN_cuFuncSetBlockShape_v2000)(CUfunction hfunc, int x, int y, int z); +typedef CUresult (CUDAAPI *PFN_cuFuncSetSharedSize_v2000)(CUfunction hfunc, unsigned int bytes); +typedef CUresult (CUDAAPI *PFN_cuParamSetSize_v2000)(CUfunction hfunc, unsigned int numbytes); +typedef CUresult (CUDAAPI *PFN_cuParamSeti_v2000)(CUfunction hfunc, int offset, unsigned int value); +typedef CUresult (CUDAAPI *PFN_cuParamSetf_v2000)(CUfunction hfunc, int offset, float value); +typedef CUresult (CUDAAPI *PFN_cuParamSetv_v2000)(CUfunction hfunc, int offset, void *ptr, unsigned int numbytes); +typedef CUresult (CUDAAPI *PFN_cuLaunch_v2000)(CUfunction f); +typedef CUresult (CUDAAPI *PFN_cuLaunchGrid_v2000)(CUfunction f, int grid_width, int grid_height); +typedef CUresult (CUDAAPI *PFN_cuLaunchGridAsync_v2000)(CUfunction f, int grid_width, int grid_height, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuParamSetTexRef_v2000)(CUfunction hfunc, int texunit, CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuGraphCreate_v10000)(CUgraph *phGraph, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuGraphAddKernelNode_v10000)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphKernelNodeGetParams_v10000)(CUgraphNode hNode, CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphKernelNodeSetParams_v10000)(CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphAddKernelNode_v12000)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_KERNEL_NODE_PARAMS_v2 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphKernelNodeGetParams_v12000)(CUgraphNode hNode, CUDA_KERNEL_NODE_PARAMS_v2 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphKernelNodeSetParams_v12000)(CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS_v2 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphAddMemcpyNode_v10000)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_MEMCPY3D_v2 *copyParams, CUcontext ctx); +typedef CUresult (CUDAAPI *PFN_cuGraphMemcpyNodeGetParams_v10000)(CUgraphNode hNode, CUDA_MEMCPY3D_v2 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphMemcpyNodeSetParams_v10000)(CUgraphNode hNode, const CUDA_MEMCPY3D_v2 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphAddMemsetNode_v10000)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_MEMSET_NODE_PARAMS_v1 *memsetParams, CUcontext ctx); +typedef CUresult (CUDAAPI *PFN_cuGraphMemsetNodeGetParams_v10000)(CUgraphNode hNode, CUDA_MEMSET_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphMemsetNodeSetParams_v10000)(CUgraphNode hNode, const CUDA_MEMSET_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphAddHostNode_v10000)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_HOST_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphHostNodeGetParams_v10000)(CUgraphNode hNode, CUDA_HOST_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphHostNodeSetParams_v10000)(CUgraphNode hNode, const CUDA_HOST_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphAddChildGraphNode_v10000)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUgraph childGraph); +typedef CUresult (CUDAAPI *PFN_cuGraphChildGraphNodeGetGraph_v10000)(CUgraphNode hNode, CUgraph *phGraph); +typedef CUresult (CUDAAPI *PFN_cuGraphAddEmptyNode_v10000)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies); +typedef CUresult (CUDAAPI *PFN_cuGraphAddEventRecordNode_v11010)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUevent event); +typedef CUresult (CUDAAPI *PFN_cuGraphEventRecordNodeGetEvent_v11010)(CUgraphNode hNode, CUevent *event_out); +typedef CUresult (CUDAAPI *PFN_cuGraphEventRecordNodeSetEvent_v11010)(CUgraphNode hNode, CUevent event); +typedef CUresult (CUDAAPI *PFN_cuGraphAddEventWaitNode_v11010)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUevent event); +typedef CUresult (CUDAAPI *PFN_cuGraphEventWaitNodeGetEvent_v11010)(CUgraphNode hNode, CUevent *event_out); +typedef CUresult (CUDAAPI *PFN_cuGraphEventWaitNodeSetEvent_v11010)(CUgraphNode hNode, CUevent event); +typedef CUresult (CUDAAPI *PFN_cuGraphAddExternalSemaphoresSignalNode_v11020)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphExternalSemaphoresSignalNodeGetParams_v11020)(CUgraphNode hNode, CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1 *params_out); +typedef CUresult (CUDAAPI *PFN_cuGraphExternalSemaphoresSignalNodeSetParams_v11020)(CUgraphNode hNode, const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphAddExternalSemaphoresWaitNode_v11020)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphExternalSemaphoresWaitNodeGetParams_v11020)(CUgraphNode hNode, CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1 *params_out); +typedef CUresult (CUDAAPI *PFN_cuGraphExternalSemaphoresWaitNodeSetParams_v11020)(CUgraphNode hNode, const CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphAddBatchMemOpNode_v11070)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphBatchMemOpNodeGetParams_v11070)(CUgraphNode hNode, CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams_out); +typedef CUresult (CUDAAPI *PFN_cuGraphBatchMemOpNodeSetParams_v11070)(CUgraphNode hNode, const CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphExecBatchMemOpNodeSetParams_v11070)(CUgraphExec graphExec, CUgraphNode node, const CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphClone_v10000)(CUgraph *phGraphClone, CUgraph originalGraph); +typedef CUresult (CUDAAPI *PFN_cuGraphNodeFindInClone_v10000)(CUgraphNode *phNode, CUgraphNode hOriginalNode, CUgraph hClonedGraph); +typedef CUresult (CUDAAPI *PFN_cuGraphNodeGetType_v10000)(CUgraphNode hNode, CUgraphNodeType *type); +typedef CUresult (CUDAAPI *PFN_cuGraphGetNodes_v10000)(CUgraph hGraph, CUgraphNode *nodes, size_t *numNodes); +typedef CUresult (CUDAAPI *PFN_cuGraphGetRootNodes_v10000)(CUgraph hGraph, CUgraphNode *rootNodes, size_t *numRootNodes); +typedef CUresult (CUDAAPI *PFN_cuGraphGetEdges_v10000)(CUgraph hGraph, CUgraphNode *from, CUgraphNode *to, size_t *numEdges); +typedef CUresult (CUDAAPI *PFN_cuGraphNodeGetDependencies_v10000)(CUgraphNode hNode, CUgraphNode *dependencies, size_t *numDependencies); +typedef CUresult (CUDAAPI *PFN_cuGraphNodeGetDependentNodes_v10000)(CUgraphNode hNode, CUgraphNode *dependentNodes, size_t *numDependentNodes); +typedef CUresult (CUDAAPI *PFN_cuGraphAddDependencies_v10000)(CUgraph hGraph, const CUgraphNode *from, const CUgraphNode *to, size_t numDependencies); +typedef CUresult (CUDAAPI *PFN_cuGraphRemoveDependencies_v10000)(CUgraph hGraph, const CUgraphNode *from, const CUgraphNode *to, size_t numDependencies); +typedef CUresult (CUDAAPI *PFN_cuGraphDestroyNode_v10000)(CUgraphNode hNode); +typedef CUresult (CUDAAPI *PFN_cuGraphInstantiateWithFlags_v11040)(CUgraphExec *phGraphExec, CUgraph hGraph, unsigned long long flags); +typedef CUresult (CUDAAPI *PFN_cuGraphInstantiateWithParams_v12000_ptsz)(CUgraphExec *phGraphExec, CUgraph hGraph, CUDA_GRAPH_INSTANTIATE_PARAMS *instantiateParams); +typedef CUresult (CUDAAPI *PFN_cuGraphExecGetFlags_v12000)(CUgraphExec hGraphExec, cuuint64_t *flags); +typedef CUresult (CUDAAPI *PFN_cuGraphExecKernelNodeSetParams_v10010)(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphExecKernelNodeSetParams_v12000)(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS_v2 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphExecMemcpyNodeSetParams_v10020)(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_MEMCPY3D_v2 *copyParams, CUcontext ctx); +typedef CUresult (CUDAAPI *PFN_cuGraphExecMemsetNodeSetParams_v10020)(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_MEMSET_NODE_PARAMS_v1 *memsetParams, CUcontext ctx); +typedef CUresult (CUDAAPI *PFN_cuGraphExecHostNodeSetParams_v10020)(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_HOST_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphExecChildGraphNodeSetParams_v11010)(CUgraphExec hGraphExec, CUgraphNode hNode, CUgraph childGraph); +typedef CUresult (CUDAAPI *PFN_cuGraphExecEventRecordNodeSetEvent_v11010)(CUgraphExec hGraphExec, CUgraphNode hNode, CUevent event); +typedef CUresult (CUDAAPI *PFN_cuGraphExecEventWaitNodeSetEvent_v11010)(CUgraphExec hGraphExec, CUgraphNode hNode, CUevent event); +typedef CUresult (CUDAAPI *PFN_cuGraphExecExternalSemaphoresSignalNodeSetParams_v11020)(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphExecExternalSemaphoresWaitNodeSetParams_v11020)(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1 *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphUpload_v11010_ptsz)(CUgraphExec hGraphExec, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuGraphLaunch_v10000_ptsz)(CUgraphExec hGraphExec, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuGraphExecDestroy_v10000)(CUgraphExec hGraphExec); +typedef CUresult (CUDAAPI *PFN_cuGraphDestroy_v10000)(CUgraph hGraph); +typedef CUresult (CUDAAPI *PFN_cuGraphExecUpdate_v10020)(CUgraphExec hGraphExec, CUgraph hGraph, CUgraphNode *hErrorNode_out, CUgraphExecUpdateResult *updateResult_out); +typedef CUresult (CUDAAPI *PFN_cuGraphExecUpdate_v12000)(CUgraphExec hGraphExec, CUgraph hGraph, CUgraphExecUpdateResultInfo *resultInfo); +typedef CUresult (CUDAAPI *PFN_cuGraphKernelNodeCopyAttributes_v11000)(CUgraphNode dst, CUgraphNode src); +typedef CUresult (CUDAAPI *PFN_cuGraphKernelNodeGetAttribute_v11000)(CUgraphNode hNode, CUkernelNodeAttrID attr, CUkernelNodeAttrValue_v1 *value_out); +typedef CUresult (CUDAAPI *PFN_cuGraphKernelNodeSetAttribute_v11000)(CUgraphNode hNode, CUkernelNodeAttrID attr, const CUkernelNodeAttrValue_v1 *value); +typedef CUresult (CUDAAPI *PFN_cuGraphDebugDotPrint_v11030)(CUgraph hGraph, const char *path, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuGraphAddMemAllocNode_v11040)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUDA_MEM_ALLOC_NODE_PARAMS *nodeParams); +typedef CUresult (CUDAAPI *PFN_cuGraphMemAllocNodeGetParams_v11040)(CUgraphNode hNode, CUDA_MEM_ALLOC_NODE_PARAMS *params_out); +typedef CUresult (CUDAAPI *PFN_cuGraphAddMemFreeNode_v11040)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUdeviceptr dptr); +typedef CUresult (CUDAAPI *PFN_cuGraphMemFreeNodeGetParams_v11040)(CUgraphNode hNode, CUdeviceptr *dptr_out); +typedef CUresult (CUDAAPI *PFN_cuGraphNodeSetEnabled_v11060)(CUgraphExec hGraphExec, CUgraphNode hNode, unsigned int isEnabled); +typedef CUresult (CUDAAPI *PFN_cuGraphNodeGetEnabled_v11060)(CUgraphExec hGraphExec, CUgraphNode hNode, unsigned int *isEnabled); +typedef CUresult (CUDAAPI *PFN_cuDeviceGraphMemTrim_v11040)(CUdevice device); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetGraphMemAttribute_v11040)(CUdevice device, CUgraphMem_attribute attr, void* value); +typedef CUresult (CUDAAPI *PFN_cuDeviceSetGraphMemAttribute_v11040)(CUdevice device, CUgraphMem_attribute attr, void* value); +typedef CUresult (CUDAAPI *PFN_cuOccupancyMaxActiveBlocksPerMultiprocessor_v6050)(int *numBlocks, CUfunction func, int blockSize, size_t dynamicSMemSize); +typedef CUresult (CUDAAPI *PFN_cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000)(int *numBlocks, CUfunction func, int blockSize, size_t dynamicSMemSize, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuOccupancyMaxPotentialBlockSize_v6050)(int *minGridSize, int *blockSize, CUfunction func, CUoccupancyB2DSize blockSizeToDynamicSMemSize, size_t dynamicSMemSize, int blockSizeLimit); +typedef CUresult (CUDAAPI *PFN_cuOccupancyMaxPotentialBlockSizeWithFlags_v7000)(int *minGridSize, int *blockSize, CUfunction func, CUoccupancyB2DSize blockSizeToDynamicSMemSize, size_t dynamicSMemSize, int blockSizeLimit, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuOccupancyAvailableDynamicSMemPerBlock_v10020)(size_t *dynamicSmemSize, CUfunction func, int numBlocks, int blockSize); +typedef CUresult (CUDAAPI *PFN_cuOccupancyMaxPotentialClusterSize_v11070)(int *clusterSize, CUfunction func, const CUlaunchConfig *config); +typedef CUresult (CUDAAPI *PFN_cuOccupancyMaxActiveClusters_v11070)(int *numClusters, CUfunction func, const CUlaunchConfig *config); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetArray_v2000)(CUtexref hTexRef, CUarray hArray, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetMipmappedArray_v5000)(CUtexref hTexRef, CUmipmappedArray hMipmappedArray, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetAddress_v3020)(size_t *ByteOffset, CUtexref hTexRef, CUdeviceptr_v2 dptr, size_t bytes); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetAddress2D_v4010)(CUtexref hTexRef, const CUDA_ARRAY_DESCRIPTOR_v2 *desc, CUdeviceptr_v2 dptr, size_t Pitch); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetFormat_v2000)(CUtexref hTexRef, CUarray_format fmt, int NumPackedComponents); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetAddressMode_v2000)(CUtexref hTexRef, int dim, CUaddress_mode am); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetFilterMode_v2000)(CUtexref hTexRef, CUfilter_mode fm); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetMipmapFilterMode_v5000)(CUtexref hTexRef, CUfilter_mode fm); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetMipmapLevelBias_v5000)(CUtexref hTexRef, float bias); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetMipmapLevelClamp_v5000)(CUtexref hTexRef, float minMipmapLevelClamp, float maxMipmapLevelClamp); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetMaxAnisotropy_v5000)(CUtexref hTexRef, unsigned int maxAniso); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetBorderColor_v8000)(CUtexref hTexRef, float *pBorderColor); +typedef CUresult (CUDAAPI *PFN_cuTexRefSetFlags_v2000)(CUtexref hTexRef, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuTexRefGetAddress_v3020)(CUdeviceptr_v2 *pdptr, CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuTexRefGetArray_v2000)(CUarray *phArray, CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuTexRefGetMipmappedArray_v5000)(CUmipmappedArray *phMipmappedArray, CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuTexRefGetAddressMode_v2000)(CUaddress_mode *pam, CUtexref hTexRef, int dim); +typedef CUresult (CUDAAPI *PFN_cuTexRefGetFilterMode_v2000)(CUfilter_mode *pfm, CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuTexRefGetFormat_v2000)(CUarray_format *pFormat, int *pNumChannels, CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuTexRefGetMipmapFilterMode_v5000)(CUfilter_mode *pfm, CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuTexRefGetMipmapLevelBias_v5000)(float *pbias, CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuTexRefGetMipmapLevelClamp_v5000)(float *pminMipmapLevelClamp, float *pmaxMipmapLevelClamp, CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuTexRefGetMaxAnisotropy_v5000)(int *pmaxAniso, CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuTexRefGetBorderColor_v8000)(float *pBorderColor, CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuTexRefGetFlags_v2000)(unsigned int *pFlags, CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuTexRefCreate_v2000)(CUtexref *pTexRef); +typedef CUresult (CUDAAPI *PFN_cuTexRefDestroy_v2000)(CUtexref hTexRef); +typedef CUresult (CUDAAPI *PFN_cuSurfRefSetArray_v3000)(CUsurfref hSurfRef, CUarray hArray, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuSurfRefGetArray_v3000)(CUarray *phArray, CUsurfref hSurfRef); +typedef CUresult (CUDAAPI *PFN_cuTexObjectCreate_v5000)(CUtexObject_v1 *pTexObject, const CUDA_RESOURCE_DESC_v1 *pResDesc, const CUDA_TEXTURE_DESC_v1 *pTexDesc, const CUDA_RESOURCE_VIEW_DESC_v1 *pResViewDesc); +typedef CUresult (CUDAAPI *PFN_cuTexObjectDestroy_v5000)(CUtexObject_v1 texObject); +typedef CUresult (CUDAAPI *PFN_cuTexObjectGetResourceDesc_v5000)(CUDA_RESOURCE_DESC_v1 *pResDesc, CUtexObject_v1 texObject); +typedef CUresult (CUDAAPI *PFN_cuTexObjectGetTextureDesc_v5000)(CUDA_TEXTURE_DESC_v1 *pTexDesc, CUtexObject_v1 texObject); +typedef CUresult (CUDAAPI *PFN_cuTexObjectGetResourceViewDesc_v5000)(CUDA_RESOURCE_VIEW_DESC_v1 *pResViewDesc, CUtexObject_v1 texObject); +typedef CUresult (CUDAAPI *PFN_cuSurfObjectCreate_v5000)(CUsurfObject_v1 *pSurfObject, const CUDA_RESOURCE_DESC_v1 *pResDesc); +typedef CUresult (CUDAAPI *PFN_cuSurfObjectDestroy_v5000)(CUsurfObject_v1 surfObject); +typedef CUresult (CUDAAPI *PFN_cuSurfObjectGetResourceDesc_v5000)(CUDA_RESOURCE_DESC_v1 *pResDesc, CUsurfObject_v1 surfObject); +typedef CUresult (CUDAAPI *PFN_cuTensorMapEncodeTiled_v12000)(CUtensorMap *tensorMap, CUtensorMapDataType tensorDataType, cuuint32_t tensorRank, void *globalAddress, const cuuint64_t *globalDim, const cuuint64_t *globalStrides, const cuuint32_t *boxDim, const cuuint32_t *elementStrides, CUtensorMapInterleave interleave, CUtensorMapSwizzle swizzle, CUtensorMapL2promotion l2Promotion, CUtensorMapFloatOOBfill oobFill); +typedef CUresult (CUDAAPI *PFN_cuTensorMapEncodeIm2col_v12000)(CUtensorMap *tensorMap, CUtensorMapDataType tensorDataType, cuuint32_t tensorRank, void *globalAddress, const cuuint64_t *globalDim, const cuuint64_t *globalStrides, const int *pixelBoxLowerCorner, const int *pixelBoxUpperCorner, cuuint32_t channelsPerPixel, cuuint32_t pixelsPerColumn, const cuuint32_t *elementStrides, CUtensorMapInterleave interleave, CUtensorMapSwizzle swizzle, CUtensorMapL2promotion l2Promotion, CUtensorMapFloatOOBfill oobFill); +typedef CUresult (CUDAAPI *PFN_cuTensorMapReplaceAddress_v12000)(CUtensorMap *tensorMap, void *globalAddress); +typedef CUresult (CUDAAPI *PFN_cuDeviceCanAccessPeer_v4000)(int *canAccessPeer, CUdevice_v1 dev, CUdevice_v1 peerDev); +typedef CUresult (CUDAAPI *PFN_cuCtxEnablePeerAccess_v4000)(CUcontext peerContext, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuCtxDisablePeerAccess_v4000)(CUcontext peerContext); +typedef CUresult (CUDAAPI *PFN_cuDeviceGetP2PAttribute_v8000)(int *value, CUdevice_P2PAttribute attrib, CUdevice_v1 srcDevice, CUdevice_v1 dstDevice); +typedef CUresult (CUDAAPI *PFN_cuGraphicsUnregisterResource_v3000)(CUgraphicsResource resource); +typedef CUresult (CUDAAPI *PFN_cuGraphicsSubResourceGetMappedArray_v3000)(CUarray *pArray, CUgraphicsResource resource, unsigned int arrayIndex, unsigned int mipLevel); +typedef CUresult (CUDAAPI *PFN_cuGraphicsResourceGetMappedMipmappedArray_v5000)(CUmipmappedArray *pMipmappedArray, CUgraphicsResource resource); +typedef CUresult (CUDAAPI *PFN_cuGraphicsResourceGetMappedPointer_v3020)(CUdeviceptr_v2 *pDevPtr, size_t *pSize, CUgraphicsResource resource); +typedef CUresult (CUDAAPI *PFN_cuGraphicsResourceSetMapFlags_v6050)(CUgraphicsResource resource, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuGraphicsMapResources_v7000_ptsz)(unsigned int count, CUgraphicsResource *resources, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuGraphicsUnmapResources_v7000_ptsz)(unsigned int count, CUgraphicsResource *resources, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuGetExportTable_v3000)(const void **ppExportTable, const CUuuid *pExportTableId); +typedef CUresult (CUDAAPI *PFN_cuFuncGetModule_v11000)(CUmodule *hmod, CUfunction hfunc); +typedef CUresult (CUDAAPI *PFN_cuGetProcAddress_v11030)(const char *symbol, void **pfn, int driverVersion, cuuint64_t flags); +typedef CUresult (CUDAAPI *PFN_cuGetProcAddress_v12000)(const char *symbol, void **pfn, int driverVersion, cuuint64_t flags, CUdriverProcAddressQueryResult *symbolFound); +typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoD_v3020)(CUdeviceptr_v2 dstDevice, const void *srcHost, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoH_v3020)(void *dstHost, CUdeviceptr_v2 srcDevice, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoD_v3020)(CUdeviceptr_v2 dstDevice, CUdeviceptr_v2 srcDevice, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoA_v3020)(CUarray dstArray, size_t dstOffset, CUdeviceptr_v2 srcDevice, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoD_v3020)(CUdeviceptr_v2 dstDevice, CUarray srcArray, size_t srcOffset, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoA_v3020)(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoH_v3020)(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoA_v3020)(CUarray dstArray, size_t dstOffset, CUarray srcArray, size_t srcOffset, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoAAsync_v3020)(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoHAsync_v3020)(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpy2D_v3020)(const CUDA_MEMCPY2D_v2 *pCopy); +typedef CUresult (CUDAAPI *PFN_cuMemcpy2DUnaligned_v3020)(const CUDA_MEMCPY2D_v2 *pCopy); +typedef CUresult (CUDAAPI *PFN_cuMemcpy3D_v3020)(const CUDA_MEMCPY3D_v2 *pCopy); +typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoDAsync_v3020)(CUdeviceptr_v2 dstDevice, const void *srcHost, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoHAsync_v3020)(void *dstHost, CUdeviceptr_v2 srcDevice, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoDAsync_v3020)(CUdeviceptr_v2 dstDevice, CUdeviceptr_v2 srcDevice, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpy2DAsync_v3020)(const CUDA_MEMCPY2D_v2 *pCopy, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpy3DAsync_v3020)(const CUDA_MEMCPY3D_v2 *pCopy, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD8_v3020)(CUdeviceptr_v2 dstDevice, unsigned char uc, size_t N); +typedef CUresult (CUDAAPI *PFN_cuMemsetD16_v3020)(CUdeviceptr_v2 dstDevice, unsigned short us, size_t N); +typedef CUresult (CUDAAPI *PFN_cuMemsetD32_v3020)(CUdeviceptr_v2 dstDevice, unsigned int ui, size_t N); +typedef CUresult (CUDAAPI *PFN_cuMemsetD2D8_v3020)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height); +typedef CUresult (CUDAAPI *PFN_cuMemsetD2D16_v3020)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height); +typedef CUresult (CUDAAPI *PFN_cuMemsetD2D32_v3020)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height); +typedef CUresult (CUDAAPI *PFN_cuMemcpy_v4000)(CUdeviceptr_v2 dst, CUdeviceptr_v2 src, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyAsync_v4000)(CUdeviceptr_v2 dst, CUdeviceptr_v2 src, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpyPeer_v4000)(CUdeviceptr_v2 dstDevice, CUcontext dstContext, CUdeviceptr_v2 srcDevice, CUcontext srcContext, size_t ByteCount); +typedef CUresult (CUDAAPI *PFN_cuMemcpyPeerAsync_v4000)(CUdeviceptr_v2 dstDevice, CUcontext dstContext, CUdeviceptr_v2 srcDevice, CUcontext srcContext, size_t ByteCount, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemcpy3DPeer_v4000)(const CUDA_MEMCPY3D_PEER_v1 *pCopy); +typedef CUresult (CUDAAPI *PFN_cuMemcpy3DPeerAsync_v4000)(const CUDA_MEMCPY3D_PEER_v1 *pCopy, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD8Async_v3020)(CUdeviceptr_v2 dstDevice, unsigned char uc, size_t N, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD16Async_v3020)(CUdeviceptr_v2 dstDevice, unsigned short us, size_t N, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD32Async_v3020)(CUdeviceptr_v2 dstDevice, unsigned int ui, size_t N, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD2D8Async_v3020)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD2D16Async_v3020)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemsetD2D32Async_v3020)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuStreamGetPriority_v5050)(CUstream hStream, int *priority); +typedef CUresult (CUDAAPI *PFN_cuStreamGetFlags_v5050)(CUstream hStream, unsigned int *flags); +typedef CUresult (CUDAAPI *PFN_cuStreamGetCtx_v9020)(CUstream hStream, CUcontext *pctx); +typedef CUresult (CUDAAPI *PFN_cuStreamWaitEvent_v3020)(CUstream hStream, CUevent hEvent, unsigned int Flags); +typedef CUresult (CUDAAPI *PFN_cuStreamAddCallback_v5000)(CUstream hStream, CUstreamCallback callback, void *userData, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamAttachMemAsync_v6000)(CUstream hStream, CUdeviceptr_v2 dptr, size_t length, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamQuery_v2000)(CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuStreamSynchronize_v2000)(CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuEventRecord_v2000)(CUevent hEvent, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuEventRecordWithFlags_v11010)(CUevent hEvent, CUstream hStream, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuLaunchKernel_v4000)(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void **kernelParams, void **extra); +typedef CUresult (CUDAAPI *PFN_cuLaunchKernelEx_v11060)(const CUlaunchConfig *config, CUfunction f, void **kernelParams, void **extra); +typedef CUresult (CUDAAPI *PFN_cuLaunchHostFunc_v10000)(CUstream hStream, CUhostFn fn, void *userData); +typedef CUresult (CUDAAPI *PFN_cuGraphicsMapResources_v3000)(unsigned int count, CUgraphicsResource *resources, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuGraphicsUnmapResources_v3000)(unsigned int count, CUgraphicsResource *resources, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuStreamWriteValue32_v8000)(CUstream stream, CUdeviceptr_v2 addr, cuuint32_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWaitValue32_v8000)(CUstream stream, CUdeviceptr_v2 addr, cuuint32_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWriteValue64_v9000)(CUstream stream, CUdeviceptr_v2 addr, cuuint64_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWaitValue64_v9000)(CUstream stream, CUdeviceptr_v2 addr, cuuint64_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamBatchMemOp_v8000)(CUstream stream, unsigned int count, CUstreamBatchMemOpParams *paramArray, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWriteValue32_v11070)(CUstream stream, CUdeviceptr_v2 addr, cuuint32_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWaitValue32_v11070)(CUstream stream, CUdeviceptr_v2 addr, cuuint32_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWriteValue64_v11070)(CUstream stream, CUdeviceptr_v2 addr, cuuint64_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamWaitValue64_v11070)(CUstream stream, CUdeviceptr_v2 addr, cuuint64_t value, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuStreamBatchMemOp_v11070)(CUstream stream, unsigned int count, CUstreamBatchMemOpParams *paramArray, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuMemPrefetchAsync_v8000)(CUdeviceptr_v2 devPtr, size_t count, CUdevice_v1 dstDevice, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuLaunchCooperativeKernel_v9000)(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void **kernelParams); +typedef CUresult (CUDAAPI *PFN_cuSignalExternalSemaphoresAsync_v10000)(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1 *paramsArray, unsigned int numExtSems, CUstream stream); +typedef CUresult (CUDAAPI *PFN_cuWaitExternalSemaphoresAsync_v10000)(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1 *paramsArray, unsigned int numExtSems, CUstream stream); +typedef CUresult (CUDAAPI *PFN_cuStreamBeginCapture_v10010)(CUstream hStream, CUstreamCaptureMode mode); +typedef CUresult (CUDAAPI *PFN_cuStreamEndCapture_v10000)(CUstream hStream, CUgraph *phGraph); +typedef CUresult (CUDAAPI *PFN_cuStreamIsCapturing_v10000)(CUstream hStream, CUstreamCaptureStatus *captureStatus); +typedef CUresult (CUDAAPI *PFN_cuStreamGetCaptureInfo_v10010)(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, cuuint64_t *id_out); +typedef CUresult (CUDAAPI *PFN_cuStreamGetCaptureInfo_v11030)(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, cuuint64_t *id_out, CUgraph *graph_out, const CUgraphNode **dependencies_out, size_t *numDependencies_out); +typedef CUresult (CUDAAPI *PFN_cuStreamUpdateCaptureDependencies_v11030)(CUstream hStream, CUgraphNode *dependencies, size_t numDependencies, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuGraphInstantiateWithParams_v12000)(CUgraphExec *phGraphExec, CUgraph hGraph, CUDA_GRAPH_INSTANTIATE_PARAMS *instantiateParams); +typedef CUresult (CUDAAPI *PFN_cuGraphUpload_v11010)(CUgraphExec hGraph, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuGraphLaunch_v10000)(CUgraphExec hGraph, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuStreamCopyAttributes_v11000)(CUstream dstStream, CUstream srcStream); +typedef CUresult (CUDAAPI *PFN_cuStreamGetAttribute_v11000)(CUstream hStream, CUstreamAttrID attr, CUstreamAttrValue_v1 *value); +typedef CUresult (CUDAAPI *PFN_cuStreamSetAttribute_v11000)(CUstream hStream, CUstreamAttrID attr, const CUstreamAttrValue_v1 *param); +typedef CUresult (CUDAAPI *PFN_cuMemMapArrayAsync_v11010)(CUarrayMapInfo_v1 *mapInfoList, unsigned int count, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemFreeAsync_v11020)(CUdeviceptr_v2 dptr, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemAllocAsync_v11020)(CUdeviceptr_v2 *dptr, size_t bytesize, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuMemAllocFromPoolAsync_v11020)(CUdeviceptr_v2 *dptr, size_t bytesize, CUmemoryPool pool, CUstream hStream); +typedef CUresult (CUDAAPI *PFN_cuFlushGPUDirectRDMAWrites_v11030)(CUflushGPUDirectRDMAWritesTarget target, CUflushGPUDirectRDMAWritesScope scope); +typedef CUresult (CUDAAPI *PFN_cuUserObjectCreate_v11030)(CUuserObject *object_out, void *ptr, CUhostFn destroy, unsigned int initialRefcount, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuUserObjectRetain_v11030)(CUuserObject object, unsigned int count); +typedef CUresult (CUDAAPI *PFN_cuUserObjectRelease_v11030)(CUuserObject object, unsigned int count); +typedef CUresult (CUDAAPI *PFN_cuGraphRetainUserObject_v11030)(CUgraph graph, CUuserObject object, unsigned int count, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuGraphReleaseUserObject_v11030)(CUgraph graph, CUuserObject object, unsigned int count); +typedef CUresult (CUDAAPI *PFN_cuModuleGetLoadingMode_v11070)(CUmoduleLoadingMode *mode); +typedef CUresult (CUDAAPI *PFN_cuMemGetHandleForAddressRange_v11070)(void *handle, CUdeviceptr dptr, size_t size, CUmemRangeHandleType handleType, unsigned long long flags); +typedef CUresult (CUDAAPI *PFN_cuLibraryLoadData_v12000)(CUlibrary *library, const void *code, CUjit_option *jitOptions, void **jitOptionsValues, unsigned int numJitOptions, CUlibraryOption *libraryOptions, void** libraryOptionValues, unsigned int numLibraryOptions); +typedef CUresult (CUDAAPI *PFN_cuLibraryLoadFromFile_v12000)(CUlibrary *library, const char *fileName, CUjit_option *jitOptions, void **jitOptionsValues, unsigned int numJitOptions, CUlibraryOption *libraryOptions, void **libraryOptionValues, unsigned int numLibraryOptions); +typedef CUresult (CUDAAPI *PFN_cuLibraryUnload_v12000)(CUlibrary library); +typedef CUresult (CUDAAPI *PFN_cuLibraryGetKernel_v12000)(CUkernel *pKernel, CUlibrary library, const char *name); +typedef CUresult (CUDAAPI *PFN_cuLibraryGetModule_v12000)(CUmodule *pMod, CUlibrary library); +typedef CUresult (CUDAAPI *PFN_cuKernelGetFunction_v12000)(CUfunction *pFunc, CUkernel kernel); +typedef CUresult (CUDAAPI *PFN_cuLibraryGetGlobal_v12000)(CUdeviceptr *dptr, size_t *bytes, CUlibrary library, const char *name); +typedef CUresult (CUDAAPI *PFN_cuLibraryGetManaged_v12000)(CUdeviceptr *dptr, size_t *bytes, CUlibrary library, const char *name); +typedef CUresult (CUDAAPI *PFN_cuKernelGetAttribute_v12000)(int *pi, CUfunction_attribute attrib, CUkernel kernel, CUdevice dev); +typedef CUresult (CUDAAPI *PFN_cuKernelSetAttribute_v12000)(CUfunction_attribute attrib, int val, CUkernel kernel, CUdevice dev); +typedef CUresult (CUDAAPI *PFN_cuKernelSetCacheConfig_v12000)(CUkernel kernel, CUfunc_cache config, CUdevice dev); +typedef CUresult (CUDAAPI *PFN_cuLibraryGetUnifiedFunction_v12000)(void **fptr, CUlibrary library, const char *symbol); +typedef CUresult(CUDAAPI *PFN_cuCoredumpGetAttribute_v12010)(CUcoredumpSettings get, void *value, size_t *size); +typedef CUresult(CUDAAPI *PFN_cuCoredumpGetAttributeGlobal_v12010)(CUcoredumpSettings get, void *value, size_t *size); +typedef CUresult(CUDAAPI *PFN_cuCoredumpSetAttribute_v12010)(CUcoredumpSettings set, void *value, size_t *size); +typedef CUresult(CUDAAPI *PFN_cuCoredumpSetAttributeGlobal_v12010)(CUcoredumpSettings set, void *value, size_t *size); +/* + * Type definitions for older versioned functions in cuda.h + */ +#if defined(__CUDA_API_VERSION_INTERNAL) + typedef CUresult (CUDAAPI *PFN_cuMemHostRegister_v4000)(void *p, size_t bytesize, unsigned int Flags); + typedef CUresult (CUDAAPI *PFN_cuGraphicsResourceSetMapFlags_v3000)(CUgraphicsResource resource, unsigned int flags); + typedef CUresult (CUDAAPI *PFN_cuLinkCreate_v5050)(unsigned int numOptions, CUjit_option *options, void **optionValues, CUlinkState *stateOut); + typedef CUresult (CUDAAPI *PFN_cuLinkAddData_v5050)(CUlinkState state, CUjitInputType type, void *data, size_t size, const char *name, unsigned int numOptions, CUjit_option *options, void **optionValues); + typedef CUresult (CUDAAPI *PFN_cuLinkAddFile_v5050)(CUlinkState state, CUjitInputType type, const char *path, unsigned int numOptions, CUjit_option *options, void **optionValues); + typedef CUresult (CUDAAPI *PFN_cuTexRefSetAddress2D_v3020)(CUtexref hTexRef, const CUDA_ARRAY_DESCRIPTOR_v2 *desc, CUdeviceptr_v2 dptr, size_t Pitch); + typedef CUresult (CUDAAPI *PFN_cuDeviceTotalMem_v2000)(unsigned int *bytes, CUdevice_v1 dev); + typedef CUresult (CUDAAPI *PFN_cuCtxCreate_v2000)(CUcontext *pctx, unsigned int flags, CUdevice_v1 dev); + typedef CUresult (CUDAAPI *PFN_cuModuleGetGlobal_v2000)(CUdeviceptr_v1 *dptr, unsigned int *bytes, CUmodule hmod, const char *name); + typedef CUresult (CUDAAPI *PFN_cuMemGetInfo_v2000)(unsigned int *free, unsigned int *total); + typedef CUresult (CUDAAPI *PFN_cuMemAlloc_v2000)(CUdeviceptr_v1 *dptr, unsigned int bytesize); + typedef CUresult (CUDAAPI *PFN_cuMemAllocPitch_v2000)(CUdeviceptr_v1 *dptr, unsigned int *pPitch, unsigned int WidthInBytes, unsigned int Height, unsigned int ElementSizeBytes); + typedef CUresult (CUDAAPI *PFN_cuMemFree_v2000)(CUdeviceptr_v1 dptr); + typedef CUresult (CUDAAPI *PFN_cuMemGetAddressRange_v2000)(CUdeviceptr_v1 *pbase, unsigned int *psize, CUdeviceptr_v1 dptr); + typedef CUresult (CUDAAPI *PFN_cuMemAllocHost_v2000)(void **pp, unsigned int bytesize); + typedef CUresult (CUDAAPI *PFN_cuMemHostGetDevicePointer_v2020)(CUdeviceptr_v1 *pdptr, void *p, unsigned int Flags); + typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoD_v2000)(CUdeviceptr_v1 dstDevice, const void *srcHost, unsigned int ByteCount); + typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoH_v2000)(void *dstHost, CUdeviceptr_v1 srcDevice, unsigned int ByteCount); + typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoD_v2000)(CUdeviceptr_v1 dstDevice, CUdeviceptr_v1 srcDevice, unsigned int ByteCount); + typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoA_v2000)(CUarray dstArray, unsigned int dstOffset, CUdeviceptr_v1 srcDevice, unsigned int ByteCount); + typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoD_v2000)(CUdeviceptr_v1 dstDevice, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount); + typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoA_v2000)(CUarray dstArray, unsigned int dstOffset, const void *srcHost, unsigned int ByteCount); + typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoH_v2000)(void *dstHost, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount); + typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoA_v2000)(CUarray dstArray, unsigned int dstOffset, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount); + typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoAAsync_v2000)(CUarray dstArray, unsigned int dstOffset, const void *srcHost, unsigned int ByteCount, CUstream hStream); + typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoHAsync_v2000)(void *dstHost, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount, CUstream hStream); + typedef CUresult (CUDAAPI *PFN_cuMemcpy2D_v2000)(const CUDA_MEMCPY2D_v1 *pCopy); + typedef CUresult (CUDAAPI *PFN_cuMemcpy2DUnaligned_v2000)(const CUDA_MEMCPY2D_v1 *pCopy); + typedef CUresult (CUDAAPI *PFN_cuMemcpy3D_v2000)(const CUDA_MEMCPY3D_v1 *pCopy); + typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoDAsync_v2000)(CUdeviceptr_v1 dstDevice, const void *srcHost, unsigned int ByteCount, CUstream hStream); + typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoHAsync_v2000)(void *dstHost, CUdeviceptr_v1 srcDevice, unsigned int ByteCount, CUstream hStream); + typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoDAsync_v3000)(CUdeviceptr_v1 dstDevice, CUdeviceptr_v1 srcDevice, unsigned int ByteCount, CUstream hStream); + typedef CUresult (CUDAAPI *PFN_cuMemcpy2DAsync_v2000)(const CUDA_MEMCPY2D_v1 *pCopy, CUstream hStream); + typedef CUresult (CUDAAPI *PFN_cuMemcpy3DAsync_v2000)(const CUDA_MEMCPY3D_v1 *pCopy, CUstream hStream); + typedef CUresult (CUDAAPI *PFN_cuMemsetD8_v2000)(CUdeviceptr_v1 dstDevice, unsigned char uc, unsigned int N); + typedef CUresult (CUDAAPI *PFN_cuMemsetD16_v2000)(CUdeviceptr_v1 dstDevice, unsigned short us, unsigned int N); + typedef CUresult (CUDAAPI *PFN_cuMemsetD32_v2000)(CUdeviceptr_v1 dstDevice, unsigned int ui, unsigned int N); + typedef CUresult (CUDAAPI *PFN_cuMemsetD2D8_v2000)(CUdeviceptr_v1 dstDevice, unsigned int dstPitch, unsigned char uc, unsigned int Width, unsigned int Height); + typedef CUresult (CUDAAPI *PFN_cuMemsetD2D16_v2000)(CUdeviceptr_v1 dstDevice, unsigned int dstPitch, unsigned short us, unsigned int Width, unsigned int Height); + typedef CUresult (CUDAAPI *PFN_cuMemsetD2D32_v2000)(CUdeviceptr_v1 dstDevice, unsigned int dstPitch, unsigned int ui, unsigned int Width, unsigned int Height); + typedef CUresult (CUDAAPI *PFN_cuArrayCreate_v2000)(CUarray *pHandle, const CUDA_ARRAY_DESCRIPTOR_v1 *pAllocateArray); + typedef CUresult (CUDAAPI *PFN_cuArrayGetDescriptor_v2000)(CUDA_ARRAY_DESCRIPTOR_v1 *pArrayDescriptor, CUarray hArray); + typedef CUresult (CUDAAPI *PFN_cuArray3DCreate_v2000)(CUarray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR_v1 *pAllocateArray); + typedef CUresult (CUDAAPI *PFN_cuArray3DGetDescriptor_v2000)(CUDA_ARRAY3D_DESCRIPTOR_v1 *pArrayDescriptor, CUarray hArray); + typedef CUresult (CUDAAPI *PFN_cuTexRefSetAddress_v2000)(unsigned int *ByteOffset, CUtexref hTexRef, CUdeviceptr_v1 dptr, unsigned int bytes); + typedef CUresult (CUDAAPI *PFN_cuTexRefSetAddress2D_v2020)(CUtexref hTexRef, const CUDA_ARRAY_DESCRIPTOR_v1 *desc, CUdeviceptr_v1 dptr, unsigned int Pitch); + typedef CUresult (CUDAAPI *PFN_cuTexRefGetAddress_v2000)(CUdeviceptr_v1 *pdptr, CUtexref hTexRef); + typedef CUresult (CUDAAPI *PFN_cuGraphicsResourceGetMappedPointer_v3000)(CUdeviceptr_v1 *pDevPtr, unsigned int *pSize, CUgraphicsResource resource); + typedef CUresult (CUDAAPI *PFN_cuCtxDestroy_v2000)(CUcontext ctx); + typedef CUresult (CUDAAPI *PFN_cuCtxPopCurrent_v2000)(CUcontext *pctx); + typedef CUresult (CUDAAPI *PFN_cuCtxPushCurrent_v2000)(CUcontext ctx); + typedef CUresult (CUDAAPI *PFN_cuStreamDestroy_v2000)(CUstream hStream); + typedef CUresult (CUDAAPI *PFN_cuEventDestroy_v2000)(CUevent hEvent); + typedef CUresult (CUDAAPI *PFN_cuDevicePrimaryCtxRelease_v7000)(CUdevice_v1 dev); + typedef CUresult (CUDAAPI *PFN_cuDevicePrimaryCtxReset_v7000)(CUdevice_v1 dev); + typedef CUresult (CUDAAPI *PFN_cuDevicePrimaryCtxSetFlags_v7000)(CUdevice_v1 dev, unsigned int flags); + typedef CUresult (CUDAAPI *PFN_cuStreamBeginCapture_v10000)(CUstream hStream); + typedef CUresult (CUDAAPI *PFN_cuStreamBeginCapture_v10000_ptsz)(CUstream hStream); + typedef CUresult (CUDAAPI *PFN_cuIpcOpenMemHandle_v4010)(CUdeviceptr_v2 *pdptr, CUipcMemHandle_v1 handle, unsigned int Flags); + typedef CUresult (CUDAAPI *PFN_cuGraphInstantiate_v10000)(CUgraphExec *phGraphExec, CUgraph hGraph, CUgraphNode *phErrorNode, char *logBuffer, size_t bufferSize); + typedef CUresult (CUDAAPI *PFN_cuGraphInstantiate_v11000)(CUgraphExec *phGraphExec, CUgraph hGraph, CUgraphNode *phErrorNode, char *logBuffer, size_t bufferSize); +#endif + +#ifdef __cplusplus +} +#endif // __cplusplus + +#endif // file guard diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAUTypedefs.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAUTypedefs.h new file mode 100644 index 0000000000000000000000000000000000000000..2bfd148632827d222548be49b3a2ffb7caa1c4dc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAUTypedefs.h @@ -0,0 +1,90 @@ +/* + * Copyright 2020-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef CUDAVDPAUTYPEDEFS_H +#define CUDAVDPAUTYPEDEFS_H + +// Dependent includes for cudavdpau.h +#include + +#include + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +/* + * Macros for the latest version for each driver function in cudaVDPAU.h + */ +#define PFN_cuVDPAUGetDevice PFN_cuVDPAUGetDevice_v3010 +#define PFN_cuVDPAUCtxCreate PFN_cuVDPAUCtxCreate_v3020 +#define PFN_cuGraphicsVDPAURegisterVideoSurface PFN_cuGraphicsVDPAURegisterVideoSurface_v3010 +#define PFN_cuGraphicsVDPAURegisterOutputSurface PFN_cuGraphicsVDPAURegisterOutputSurface_v3010 + + +/** + * Type definitions for functions defined in cudaVDPAU.h + */ +typedef CUresult (CUDAAPI *PFN_cuVDPAUGetDevice_v3010)(CUdevice_v1 *pDevice, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress); +typedef CUresult (CUDAAPI *PFN_cuVDPAUCtxCreate_v3020)(CUcontext *pCtx, unsigned int flags, CUdevice_v1 device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress); +typedef CUresult (CUDAAPI *PFN_cuGraphicsVDPAURegisterVideoSurface_v3010)(CUgraphicsResource *pCudaResource, VdpVideoSurface vdpSurface, unsigned int flags); +typedef CUresult (CUDAAPI *PFN_cuGraphicsVDPAURegisterOutputSurface_v3010)(CUgraphicsResource *pCudaResource, VdpOutputSurface vdpSurface, unsigned int flags); + +/* + * Type definitions for older versioned functions in cudaVDPAU.h + */ +#if defined(__CUDA_API_VERSION_INTERNAL) +typedef CUresult (CUDAAPI *PFN_cuVDPAUCtxCreate_v3010)(CUcontext *pCtx, unsigned int flags, CUdevice_v1 device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress); +#endif + +#ifdef __cplusplus +} +#endif // __cplusplus + +#endif // file guard diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier_helpers.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier_helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..7c58346fe78c59329aca138ebc92add9015c005c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier_helpers.h @@ -0,0 +1,365 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CUDA_AWBARRIER_HELPERS_H_ +#define _CUDA_AWBARRIER_HELPERS_H_ + +#define _CUDA_AWBARRIER_NAMESPACE nvcuda::experimental +#define _CUDA_AWBARRIER_BEGIN_NAMESPACE namespace nvcuda { namespace experimental { +#define _CUDA_AWBARRIER_END_NAMESPACE } } + +#define _CUDA_AWBARRIER_INTERNAL_NAMESPACE _CUDA_AWBARRIER_NAMESPACE::__awbarrier_internal +#define _CUDA_AWBARRIER_BEGIN_INTERNAL_NAMESPACE _CUDA_AWBARRIER_BEGIN_NAMESPACE namespace __awbarrier_internal { +#define _CUDA_AWBARRIER_END_INTERNAL_NAMESPACE } _CUDA_AWBARRIER_END_NAMESPACE + +# if !defined(_CUDA_AWBARRIER_QUALIFIER) +# define _CUDA_AWBARRIER_QUALIFIER inline __device__ +# endif +# if !defined(_CUDA_AWBARRIER_STATIC_QUALIFIER) +# define _CUDA_AWBARRIER_STATIC_QUALIFIER static inline __device__ +#endif + +#if defined(__CUDA_ARCH__) +#if (__CUDA_ARCH__ >= 900) +# define _CUDA_AWBARRIER_SM_TARGET _CUDA_AWBARRIER_SM_90 +#elif (__CUDA_ARCH__ >= 800) +# define _CUDA_AWBARRIER_SM_TARGET _CUDA_AWBARRIER_SM_80 +#elif (__CUDA_ARCH__ >= 700) +# define _CUDA_AWBARRIER_SM_TARGET _CUDA_AWBARRIER_SM_70 +#endif +#else +# define _CUDA_AWBARRIER_SM_TARGET _CUDA_AWBARRIER_SM_70 +#endif + +#define _CUDA_AWBARRIER_MAX_COUNT ((1 << 14) - 1) + +#if defined(__cplusplus) && ((__cplusplus >= 201103L) || (defined(_MSC_VER) && (_MSC_VER >= 1900))) +# define _CUDA_AWBARRIER_CPLUSPLUS_11_OR_LATER +#endif + +#if !defined(_CUDA_AWBARRIER_DEBUG) +# if defined(__CUDACC_DEBUG__) +# define _CUDA_AWBARRIER_DEBUG 1 +# else +# define _CUDA_AWBARRIER_DEBUG 0 +# endif +#endif + +#if defined(_CUDA_AWBARRIER_DEBUG) && (_CUDA_AWBARRIER_DEBUG == 1) && !defined(NDEBUG) +# if !defined(__CUDACC_RTC__) +# include +# endif +# define _CUDA_AWBARRIER_ASSERT(x) assert((x)); +# define _CUDA_AWBARRIER_ABORT() assert(0); +#else +# define _CUDA_AWBARRIER_ASSERT(x) +# define _CUDA_AWBARRIER_ABORT() __trap(); +#endif + +#if defined(__CUDACC_RTC__) +typedef unsigned short uint16_t; +typedef unsigned int uint32_t; +typedef unsigned long long uint64_t; +typedef uint64_t uintptr_t; +#else +# include +#endif + +// implicitly provided by NVRTC +#ifndef __CUDACC_RTC__ +#include +#endif /* !defined(__CUDACC_RTC__) */ + +typedef uint64_t __mbarrier_t; +typedef uint64_t __mbarrier_token_t; + +_CUDA_AWBARRIER_BEGIN_INTERNAL_NAMESPACE + +extern "C" __device__ uint32_t __nvvm_get_smem_pointer(void *); + +union AWBarrier { + struct { + uint32_t expected; + uint32_t pending; + } split; + uint64_t raw; +}; + +_CUDA_AWBARRIER_STATIC_QUALIFIER +void awbarrier_init(uint64_t* barrier, uint32_t expected_count) { + _CUDA_AWBARRIER_ASSERT(__isShared(barrier)); + _CUDA_AWBARRIER_ASSERT(expected_count > 0 && expected_count < (1 << 29)); + + NV_IF_TARGET(NV_PROVIDES_SM_80, + asm volatile ("mbarrier.init.shared.b64 [%0], %1;" + : + : "r"(__nvvm_get_smem_pointer(barrier)), "r"(expected_count) + : "memory"); + return; + ) + NV_IF_TARGET(NV_PROVIDES_SM_70, + AWBarrier* awbarrier = reinterpret_cast(barrier); + + awbarrier->split.expected = 0x40000000 - expected_count; + awbarrier->split.pending = 0x80000000 - expected_count; + return; + ) +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +void awbarrier_inval(uint64_t* barrier) { + _CUDA_AWBARRIER_ASSERT(__isShared(barrier)); + + NV_IF_TARGET(NV_PROVIDES_SM_80, + asm volatile ("mbarrier.inval.shared.b64 [%0];" + : + : "r"(__nvvm_get_smem_pointer(barrier)) + : "memory"); + return; + ) + return; +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +uint32_t awbarrier_token_pending_count(uint64_t token) { + NV_IF_TARGET(NV_PROVIDES_SM_80, + uint32_t __pending_count; + + asm ("mbarrier.pending_count.b64 %0, %1;" + : "=r"(__pending_count) + : "l"(token)); + return __pending_count; + ) + NV_IF_TARGET(NV_PROVIDES_SM_70, + const uint32_t pending = token >> 32; + return 0x80000000 - (pending & 0x7fffffff); + ) +} + +template +_CUDA_AWBARRIER_STATIC_QUALIFIER +uint64_t awbarrier_arrive_drop(uint64_t* barrier) { + _CUDA_AWBARRIER_ASSERT(__isShared(barrier)); + + NV_IF_TARGET(NV_PROVIDES_SM_80, + uint64_t token; + + if (_Drop) { + asm volatile ("mbarrier.arrive_drop.shared.b64 %0, [%1];" + : "=l"(token) + : "r"(__nvvm_get_smem_pointer(barrier)) + : "memory"); + } else { + asm volatile ("mbarrier.arrive.shared.b64 %0, [%1];" + : "=l"(token) + : "r"(__nvvm_get_smem_pointer(barrier)) + : "memory"); + } + + return token; + ) + NV_IF_TARGET(NV_PROVIDES_SM_70, + AWBarrier* awbarrier = reinterpret_cast(barrier); + + while ((*reinterpret_cast(&awbarrier->split.pending) & 0x7fffffff) == 0); + + if (_Drop) { + (void)atomicAdd_block(&awbarrier->split.expected, 1); + } + + __threadfence_block(); + + const uint32_t old_pending = atomicAdd_block(&awbarrier->split.pending, 1); + const uint32_t new_pending = old_pending + 1; + const bool reset = (old_pending ^ new_pending) & 0x80000000; + + if (reset) { + __threadfence_block(); + + uint32_t new_expected = *reinterpret_cast(&awbarrier->split.expected); + new_expected &= ~0x40000000; + if (new_expected & 0x20000000) { + new_expected |= 0x40000000; + } + atomicAdd_block(&awbarrier->split.pending, new_expected); + } + + return static_cast(old_pending) << 32; + ) +} + +template +_CUDA_AWBARRIER_STATIC_QUALIFIER +uint64_t awbarrier_arrive_drop_no_complete(uint64_t* barrier, uint32_t count) { + _CUDA_AWBARRIER_ASSERT(__isShared(barrier)); + _CUDA_AWBARRIER_ASSERT(count > 0 && count < (1 << 29)); + + NV_IF_TARGET(NV_PROVIDES_SM_80, + uint64_t token; + + if (_Drop) { + asm volatile ("mbarrier.arrive_drop.noComplete.shared.b64 %0, [%1], %2;" + : "=l"(token) + : "r"(__nvvm_get_smem_pointer(barrier)), "r"(count) + : "memory"); + } else { + asm volatile ("mbarrier.arrive.noComplete.shared.b64 %0, [%1], %2;" + : "=l"(token) + : "r"(__nvvm_get_smem_pointer(barrier)), "r"(count) + : "memory"); + } + + return token; + ) + NV_IF_TARGET(NV_PROVIDES_SM_70, + AWBarrier* awbarrier = reinterpret_cast(barrier); + + while ((*reinterpret_cast(&awbarrier->split.pending) & 0x7fffffff) == 0); + + if (_Drop) { + (void)atomicAdd_block(&awbarrier->split.expected, count); + } + + return static_cast(atomicAdd_block(&awbarrier->split.pending, count)) << 32; + ) +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +bool awbarrier_test_wait(uint64_t* barrier, uint64_t token) { + _CUDA_AWBARRIER_ASSERT(__isShared(barrier)); + + NV_IF_TARGET(NV_PROVIDES_SM_80, + uint32_t __wait_complete; + + asm volatile ("{" + " .reg .pred %%p;" + " mbarrier.test_wait.shared.b64 %%p, [%1], %2;" + " selp.b32 %0, 1, 0, %%p;" + "}" + : "=r"(__wait_complete) + : "r"(__nvvm_get_smem_pointer(barrier)), "l"(token) + : "memory"); + return bool(__wait_complete); + ) + NV_IF_TARGET(NV_PROVIDES_SM_70, + volatile AWBarrier* awbarrier = reinterpret_cast(barrier); + + return ((token >> 32) ^ awbarrier->split.pending) & 0x80000000; + ) +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +bool awbarrier_test_wait_parity(uint64_t* barrier, bool phase_parity) { + _CUDA_AWBARRIER_ASSERT(__isShared(barrier)); + + NV_IF_TARGET(NV_PROVIDES_SM_90, + uint32_t __wait_complete = 0; + + asm volatile ("{" + ".reg .pred %%p;" + "mbarrier.test_wait.parity.shared.b64 %%p, [%1], %2;" + "selp.b32 %0, 1, 0, %%p;" + "}" + : "=r"(__wait_complete) + : "r"(__nvvm_get_smem_pointer(barrier)), "r"(static_cast(phase_parity)) + : "memory"); + + return __wait_complete; + ) + _CUDA_AWBARRIER_ABORT() + return false; +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +bool awbarrier_try_wait(uint64_t* barrier, uint64_t token, uint32_t max_sleep_nanosec) { + _CUDA_AWBARRIER_ASSERT(__isShared(barrier)); + + NV_IF_TARGET(NV_PROVIDES_SM_90, + uint32_t __wait_complete = 0; + + asm volatile ("{\n\t" + ".reg .pred p;\n\t" + "mbarrier.try_wait.shared.b64 p, [%1], %2, %3;\n\t" + "selp.b32 %0, 1, 0, p;\n\t" + "}" + : "=r"(__wait_complete) + : "r"(__nvvm_get_smem_pointer(barrier)), "l"(token), "r"(max_sleep_nanosec) + : "memory"); + + return __wait_complete; + ) + _CUDA_AWBARRIER_ABORT() + return false; +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +bool awbarrier_try_wait_parity(uint64_t* barrier, bool phase_parity, uint32_t max_sleep_nanosec) { + _CUDA_AWBARRIER_ASSERT(__isShared(barrier)); + + NV_IF_TARGET(NV_PROVIDES_SM_90, + uint32_t __wait_complete = 0; + + asm volatile ("{\n\t" + ".reg .pred p;\n\t" + "mbarrier.try_wait.parity.shared.b64 p, [%1], %2, %3;\n\t" + "selp.b32 %0, 1, 0, p;\n\t" + "}" + : "=r"(__wait_complete) + : "r"(__nvvm_get_smem_pointer(barrier)), "r"(static_cast(phase_parity)), "r"(max_sleep_nanosec) + : "memory"); + + return __wait_complete; + ) + _CUDA_AWBARRIER_ABORT() + return false; +} + +_CUDA_AWBARRIER_END_INTERNAL_NAMESPACE + +#endif /* !_CUDA_AWBARRIER_HELPERS_H_ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier_primitives.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier_primitives.h new file mode 100644 index 0000000000000000000000000000000000000000..5562ef3f6afeb7fce4bad4cb8067b3cb1b9a690f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier_primitives.h @@ -0,0 +1,109 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CUDA_AWBARRIER_PRIMITIVES_H_ +#define _CUDA_AWBARRIER_PRIMITIVES_H_ + +#include "cuda_awbarrier_helpers.h" + +#if !defined(_CUDA_AWBARRIER_SM_TARGET) +# error This file requires compute capability 7.0 or greater. +#endif + +_CUDA_AWBARRIER_STATIC_QUALIFIER __host__ +uint32_t __mbarrier_maximum_count() { + return _CUDA_AWBARRIER_MAX_COUNT; +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +void __mbarrier_init(__mbarrier_t* barrier, uint32_t expected_count) { + _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_init(barrier, expected_count); +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +void __mbarrier_inval(__mbarrier_t* barrier) { + _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_inval(barrier); +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +__mbarrier_token_t __mbarrier_arrive(__mbarrier_t* barrier) { + return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop(barrier); +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +__mbarrier_token_t __mbarrier_arrive_and_drop(__mbarrier_t* barrier) { + return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop(barrier); +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +bool __mbarrier_test_wait(__mbarrier_t* barrier, __mbarrier_token_t token) { + return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait(barrier, token); +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +uint32_t __mbarrier_token_pending_count(__mbarrier_token_t token) { + return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_token_pending_count(token); +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +bool __mbarrier_test_wait_parity(__mbarrier_t* barrier, bool phase_parity) { + return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait_parity(barrier, phase_parity); +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +bool __mbarrier_try_wait(__mbarrier_t* barrier, __mbarrier_token_t token, uint32_t max_sleep_nanosec) { + return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_try_wait(barrier, token, max_sleep_nanosec); +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +bool __mbarrier_try_wait_parity(__mbarrier_t* barrier, bool phase_parity, uint32_t max_sleep_nanosec) { + return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_try_wait_parity(barrier, phase_parity, max_sleep_nanosec); +} + +#endif /* !_CUDA_AWBARRIER_PRIMITIVES_H_ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_bf16.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_bf16.h new file mode 100644 index 0000000000000000000000000000000000000000..a4dd3038ee08927fdb56a6deaf1f5995620b56fc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_bf16.h @@ -0,0 +1,3978 @@ +/* +* Copyright 1993-2022 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO LICENSEE: +* +* This source code and/or documentation ("Licensed Deliverables") are +* subject to NVIDIA intellectual property rights under U.S. and +* international Copyright laws. +* +* These Licensed Deliverables contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and +* conditions of a form of NVIDIA software license agreement by and +* between NVIDIA and Licensee ("License Agreement") or electronically +* accepted by Licensee. Notwithstanding any terms or conditions to +* the contrary in the License Agreement, reproduction or disclosure +* of the Licensed Deliverables to any third party without the express +* written consent of NVIDIA is prohibited. +* +* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE +* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE +* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS +* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. +* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED +* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, +* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE +* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY +* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY +* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +* OF THESE LICENSED DELIVERABLES. +* +* U.S. Government End Users. These Licensed Deliverables are a +* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT +* 1995), consisting of "commercial computer software" and "commercial +* computer software documentation" as such terms are used in 48 +* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government +* only as a commercial end item. Consistent with 48 C.F.R.12.212 and +* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all +* U.S. Government End Users acquire the Licensed Deliverables with +* only those rights set forth herein. +* +* Any use of the Licensed Deliverables in individual and commercial +* software must include, in the user documentation and internal +* comments to the code, the above Disclaimer and U.S. Government End +* Users Notice. +*/ + +/** +* \defgroup CUDA_MATH_INTRINSIC_BFLOAT16 Bfloat16 Precision Intrinsics +* This section describes nv_bfloat16 precision intrinsic functions that are +* only supported in device code. +* To use these functions, include the header file \p cuda_bf16.h in your program. +* The following macros are available to help users selectively enable/disable +* various definitions present in the header file: +* - \p CUDA_NO_BFLOAT16 - If defined, this macro will prevent the definition of +* additional type aliases in the global namespace, helping to avoid potential +* conflicts with symbols defined in the user program. +* - \p __CUDA_NO_BFLOAT16_CONVERSIONS__ - If defined, this macro will prevent +* the use of the C++ type conversions (converting constructors and conversion +* operators) that are common for built-in floating-point types, but may be +* undesirable for \p __nv_bfloat16 which is essentially a user-defined type. +* - \p __CUDA_NO_BFLOAT16_OPERATORS__ and \p __CUDA_NO_BFLOAT162_OPERATORS__ - +* If defined, these macros will prevent the inadvertent use of usual arithmetic +* and comparison operators. This enforces the storage-only type semantics and +* prevents C++ style computations on \p __nv_bfloat16 and \p __nv_bfloat162 types. +*/ + +/** +* \defgroup CUDA_MATH__BFLOAT16_ARITHMETIC Bfloat16 Arithmetic Functions +* \ingroup CUDA_MATH_INTRINSIC_BFLOAT16 +* To use these functions, include the header file \p cuda_bf16.h in your program. +*/ + +/** +* \defgroup CUDA_MATH__BFLOAT162_ARITHMETIC Bfloat162 Arithmetic Functions +* \ingroup CUDA_MATH_INTRINSIC_BFLOAT16 +* To use these functions, include the header file \p cuda_bf16.h in your program. +*/ + +/** +* \defgroup CUDA_MATH__BFLOAT16_COMPARISON Bfloat16 Comparison Functions +* \ingroup CUDA_MATH_INTRINSIC_BFLOAT16 +* To use these functions, include the header file \p cuda_bf16.h in your program. +*/ + +/** +* \defgroup CUDA_MATH__BFLOAT162_COMPARISON Bfloat162 Comparison Functions +* \ingroup CUDA_MATH_INTRINSIC_BFLOAT16 +* To use these functions, include the header file \p cuda_bf16.h in your program. +*/ + +/** +* \defgroup CUDA_MATH__BFLOAT16_MISC Bfloat16 Precision Conversion and Data Movement +* \ingroup CUDA_MATH_INTRINSIC_BFLOAT16 +* To use these functions, include the header file \p cuda_bf16.h in your program. +*/ + +/** +* \defgroup CUDA_MATH__BFLOAT16_FUNCTIONS Bfloat16 Math Functions +* \ingroup CUDA_MATH_INTRINSIC_BFLOAT16 +* To use these functions, include the header file \p cuda_bf16.h in your program. +*/ + +/** +* \defgroup CUDA_MATH__BFLOAT162_FUNCTIONS Bfloat162 Math Functions +* \ingroup CUDA_MATH_INTRINSIC_BFLOAT16 +* To use these functions, include the header file \p cuda_bf16.h in your program. +*/ + +#ifndef __CUDA_BF16_H__ +#define __CUDA_BF16_H__ + +#define ___CUDA_BF16_STRINGIFY_INNERMOST(x) #x +#define __CUDA_BF16_STRINGIFY(x) ___CUDA_BF16_STRINGIFY_INNERMOST(x) + +#if defined(__cplusplus) +#if defined(__CUDACC__) +#define __CUDA_BF16_DECL__ static __device__ __inline__ +#define __CUDA_HOSTDEVICE_BF16_DECL__ static __host__ __device__ __inline__ +#else +#define __CUDA_HOSTDEVICE_BF16_DECL__ static +#endif /* defined(__CUDACC__) */ + +#define __CUDA_BF16_TYPES_EXIST__ + +/* Forward-declaration of structures defined in "cuda_bf16.hpp" */ + +/** + * \brief nv_bfloat16 datatype + * + * \details This structure implements the datatype for storing + * nv_bfloat16 floating-point numbers. The structure implements + * assignment operators and type conversions. 16 bits are being + * used in total: 1 sign bit, 8 bits for the exponent, and + * the significand is being stored in 7 bits. The total + * precision is 8 bits. + * + */ +struct __nv_bfloat16; + +/** + * \brief nv_bfloat162 datatype + * + * \details This structure implements the datatype for storing two + * nv_bfloat16 floating-point numbers. + * The structure implements assignment operators and type conversions. + * + */ +struct __nv_bfloat162; + +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Converts double number to nv_bfloat16 precision in round-to-nearest-even mode +* and returns \p nv_bfloat16 with converted value. +* +* \details Converts double number \p a to nv_bfloat16 precision in round-to-nearest-even mode. +* \param[in] a - double. Is only being read. +* \returns nv_bfloat16 +* - \p a converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __double2bfloat16(const double a); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Converts float number to nv_bfloat16 precision in round-to-nearest-even mode +* and returns \p nv_bfloat16 with converted value. +* +* \details Converts float number \p a to nv_bfloat16 precision in round-to-nearest-even mode. +* \param[in] a - float. Is only being read. +* \returns nv_bfloat16 +* - \p a converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __float2bfloat16(const float a); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Converts float number to nv_bfloat16 precision in round-to-nearest-even mode +* and returns \p nv_bfloat16 with converted value. +* +* \details Converts float number \p a to nv_bfloat16 precision in round-to-nearest-even mode. +* \param[in] a - float. Is only being read. +* \returns nv_bfloat16 +* - \p a converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __float2bfloat16_rn(const float a); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Converts float number to nv_bfloat16 precision in round-towards-zero mode +* and returns \p nv_bfloat16 with converted value. +* +* \details Converts float number \p a to nv_bfloat16 precision in round-towards-zero mode. +* \param[in] a - float. Is only being read. +* \returns nv_bfloat16 +* - \p a converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __float2bfloat16_rz(const float a); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Converts float number to nv_bfloat16 precision in round-down mode +* and returns \p nv_bfloat16 with converted value. +* +* \details Converts float number \p a to nv_bfloat16 precision in round-down mode. +* \param[in] a - float. Is only being read. +* +* \returns nv_bfloat16 +* - \p a converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __float2bfloat16_rd(const float a); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Converts float number to nv_bfloat16 precision in round-up mode +* and returns \p nv_bfloat16 with converted value. +* +* \details Converts float number \p a to nv_bfloat16 precision in round-up mode. +* \param[in] a - float. Is only being read. +* +* \returns nv_bfloat16 +* - \p a converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __float2bfloat16_ru(const float a); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Converts \p nv_bfloat16 number to float. +* +* \details Converts nv_bfloat16 number \p a to float. +* \param[in] a - float. Is only being read. +* +* \returns float +* - \p a converted to float. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_BF16_DECL__ float __bfloat162float(const __nv_bfloat16 a); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Converts input to nv_bfloat16 precision in round-to-nearest-even mode and +* populates both halves of \p nv_bfloat162 with converted value. +* +* \details Converts input \p a to nv_bfloat16 precision in round-to-nearest-even mode and +* populates both halves of \p nv_bfloat162 with converted value. +* \param[in] a - float. Is only being read. +* +* \returns nv_bfloat162 +* - The \p nv_bfloat162 value with both halves equal to the converted nv_bfloat16 +* precision number. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat162 __float2bfloat162_rn(const float a); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Converts both input floats to nv_bfloat16 precision in round-to-nearest-even +* mode and returns \p nv_bfloat162 with converted values. +* +* \details Converts both input floats to nv_bfloat16 precision in round-to-nearest-even mode +* and combines the results into one \p nv_bfloat162 number. Low 16 bits of the return +* value correspond to the input \p a, high 16 bits correspond to the input \p +* b. +* \param[in] a - float. Is only being read. +* \param[in] b - float. Is only being read. +* +* \returns nv_bfloat162 +* - The \p nv_bfloat162 value with corresponding halves equal to the +* converted input floats. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat162 __floats2bfloat162_rn(const float a, const float b); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Converts low 16 bits of \p nv_bfloat162 to float and returns the result +* +* \details Converts low 16 bits of \p nv_bfloat162 input \p a to 32-bit floating-point number +* and returns the result. +* \param[in] a - nv_bfloat162. Is only being read. +* +* \returns float +* - The low 16 bits of \p a converted to float. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_BF16_DECL__ float __low2float(const __nv_bfloat162 a); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Converts high 16 bits of \p nv_bfloat162 to float and returns the result +* +* \details Converts high 16 bits of \p nv_bfloat162 input \p a to 32-bit floating-point number +* and returns the result. +* \param[in] a - nv_bfloat162. Is only being read. +* +* \returns float +* - The high 16 bits of \p a converted to float. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_BF16_DECL__ float __high2float(const __nv_bfloat162 a); + +#if defined(__CUDACC__) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800) || defined(_NVHPC_CUDA)) +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Converts both components of float2 number to nv_bfloat16 precision in +* round-to-nearest-even mode and returns \p nv_bfloat162 with converted values. +* +* \details Converts both components of float2 to nv_bfloat16 precision in round-to-nearest +* mode and combines the results into one \p nv_bfloat162 number. Low 16 bits of the +* return value correspond to \p a.x and high 16 bits of the return value +* correspond to \p a.y. +* \param[in] a - float2. Is only being read. +* +* \returns nv_bfloat162 +* - The \p nv_bfloat162 which has corresponding halves equal to the +* converted float2 components. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat162 __float22bfloat162_rn(const float2 a); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Converts both halves of \p nv_bfloat162 to float2 and returns the result. +* +* \details Converts both halves of \p nv_bfloat162 input \p a to float2 and returns the +* result. +* \param[in] a - nv_bfloat162. Is only being read. +* +* \returns float2 +* - \p a converted to float2. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_BF16_DECL__ float2 __bfloat1622float2(const __nv_bfloat162 a); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a nv_bfloat16 to a signed integer in round-to-nearest-even mode. +* +* \details Convert the nv_bfloat16 floating-point value \p h to a signed integer in +* round-to-nearest-even mode. NaN inputs are converted to 0. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns int +* - \p h converted to a signed integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ int __bfloat162int_rn(const __nv_bfloat16 h); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a nv_bfloat16 to a signed integer in round-towards-zero mode. +* +* \details Convert the nv_bfloat16 floating-point value \p h to a signed integer in +* round-towards-zero mode. NaN inputs are converted to 0. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns int +* - \p h converted to a signed integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_BF16_DECL__ int __bfloat162int_rz(const __nv_bfloat16 h); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a nv_bfloat16 to a signed integer in round-down mode. +* +* \details Convert the nv_bfloat16 floating-point value \p h to a signed integer in +* round-down mode. NaN inputs are converted to 0. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns int +* - \p h converted to a signed integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ int __bfloat162int_rd(const __nv_bfloat16 h); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a nv_bfloat16 to a signed integer in round-up mode. +* +* \details Convert the nv_bfloat16 floating-point value \p h to a signed integer in +* round-up mode. NaN inputs are converted to 0. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns int +* - \p h converted to a signed integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ int __bfloat162int_ru(const __nv_bfloat16 h); + +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a signed integer to a nv_bfloat16 in round-to-nearest-even mode. +* +* \details Convert the signed integer value \p i to a nv_bfloat16 floating-point +* value in round-to-nearest-even mode. +* \param[in] i - int. Is only being read. +* +* \returns nv_bfloat16 +* - \p i converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __int2bfloat16_rn(const int i); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a signed integer to a nv_bfloat16 in round-towards-zero mode. +* +* \details Convert the signed integer value \p i to a nv_bfloat16 floating-point +* value in round-towards-zero mode. +* \param[in] i - int. Is only being read. +* +* \returns nv_bfloat16 +* - \p i converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __int2bfloat16_rz(const int i); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a signed integer to a nv_bfloat16 in round-down mode. +* +* \details Convert the signed integer value \p i to a nv_bfloat16 floating-point +* value in round-down mode. +* \param[in] i - int. Is only being read. +* +* \returns nv_bfloat16 +* - \p i converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __int2bfloat16_rd(const int i); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a signed integer to a nv_bfloat16 in round-up mode. +* +* \details Convert the signed integer value \p i to a nv_bfloat16 floating-point +* value in round-up mode. +* \param[in] i - int. Is only being read. +* +* \returns nv_bfloat16 +* - \p i converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __int2bfloat16_ru(const int i); + +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a nv_bfloat16 to a signed short integer in round-to-nearest-even +* mode. +* +* \details Convert the nv_bfloat16 floating-point value \p h to a signed short +* integer in round-to-nearest-even mode. NaN inputs are converted to 0. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns short int +* - \p h converted to a signed short integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ short int __bfloat162short_rn(const __nv_bfloat16 h); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a nv_bfloat16 to a signed short integer in round-towards-zero mode. +* +* \details Convert the nv_bfloat16 floating-point value \p h to a signed short +* integer in round-towards-zero mode. NaN inputs are converted to 0. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns short int +* - \p h converted to a signed short integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_BF16_DECL__ short int __bfloat162short_rz(const __nv_bfloat16 h); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a nv_bfloat16 to a signed short integer in round-down mode. +* +* \details Convert the nv_bfloat16 floating-point value \p h to a signed short +* integer in round-down mode. NaN inputs are converted to 0. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns short int +* - \p h converted to a signed short integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ short int __bfloat162short_rd(const __nv_bfloat16 h); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a nv_bfloat16 to a signed short integer in round-up mode. +* +* \details Convert the nv_bfloat16 floating-point value \p h to a signed short +* integer in round-up mode. NaN inputs are converted to 0. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns short int +* - \p h converted to a signed short integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ short int __bfloat162short_ru(const __nv_bfloat16 h); + +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a signed short integer to a nv_bfloat16 in round-to-nearest-even +* mode. +* +* \details Convert the signed short integer value \p i to a nv_bfloat16 floating-point +* value in round-to-nearest-even mode. +* \param[in] i - short int. Is only being read. +* +* \returns nv_bfloat16 +* - \p i converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __short2bfloat16_rn(const short int i); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a signed short integer to a nv_bfloat16 in round-towards-zero mode. +* +* \details Convert the signed short integer value \p i to a nv_bfloat16 floating-point +* value in round-towards-zero mode. +* \param[in] i - short int. Is only being read. +* +* \returns nv_bfloat16 +* - \p i converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __short2bfloat16_rz(const short int i); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a signed short integer to a nv_bfloat16 in round-down mode. +* +* \details Convert the signed short integer value \p i to a nv_bfloat16 floating-point +* value in round-down mode. +* \param[in] i - short int. Is only being read. +* +* \returns nv_bfloat16 +* - \p i converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __short2bfloat16_rd(const short int i); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a signed short integer to a nv_bfloat16 in round-up mode. +* +* \details Convert the signed short integer value \p i to a nv_bfloat16 floating-point +* value in round-up mode. +* \param[in] i - short int. Is only being read. +* +* \returns nv_bfloat16 +* - \p i converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __short2bfloat16_ru(const short int i); + +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a nv_bfloat16 to an unsigned integer in round-to-nearest-even mode. +* +* \details Convert the nv_bfloat16 floating-point value \p h to an unsigned integer +* in round-to-nearest-even mode. NaN inputs are converted to 0. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns unsigned int +* - \p h converted to an unsigned integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ unsigned int __bfloat162uint_rn(const __nv_bfloat16 h); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a nv_bfloat16 to an unsigned integer in round-towards-zero mode. +* +* \details Convert the nv_bfloat16 floating-point value \p h to an unsigned integer +* in round-towards-zero mode. NaN inputs are converted to 0. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns unsigned int +* - \p h converted to an unsigned integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_BF16_DECL__ unsigned int __bfloat162uint_rz(const __nv_bfloat16 h); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a nv_bfloat16 to an unsigned integer in round-down mode. +* +* \details Convert the nv_bfloat16 floating-point value \p h to an unsigned integer +* in round-down mode. NaN inputs are converted to 0. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns unsigned int +* - \p h converted to an unsigned integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ unsigned int __bfloat162uint_rd(const __nv_bfloat16 h); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a nv_bfloat16 to an unsigned integer in round-up mode. +* +* \details Convert the nv_bfloat16 floating-point value \p h to an unsigned integer +* in round-up mode. NaN inputs are converted to 0. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns unsigned int +* - \p h converted to an unsigned integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ unsigned int __bfloat162uint_ru(const __nv_bfloat16 h); + +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert an unsigned integer to a nv_bfloat16 in round-to-nearest-even mode. +* +* \details Convert the unsigned integer value \p i to a nv_bfloat16 floating-point +* value in round-to-nearest-even mode. +* \param[in] i - unsigned int. Is only being read. +* +* \returns nv_bfloat16 +* - \p i converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __uint2bfloat16_rn(const unsigned int i); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert an unsigned integer to a nv_bfloat16 in round-towards-zero mode. +* +* \details Convert the unsigned integer value \p i to a nv_bfloat16 floating-point +* value in round-towards-zero mode. +* \param[in] i - unsigned int. Is only being read. +* +* \returns nv_bfloat16 +* - \p i converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __uint2bfloat16_rz(const unsigned int i); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert an unsigned integer to a nv_bfloat16 in round-down mode. +* +* \details Convert the unsigned integer value \p i to a nv_bfloat16 floating-point +* value in round-down mode. +* \param[in] i - unsigned int. Is only being read. +* +* \returns nv_bfloat16 +* - \p i converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __uint2bfloat16_rd(const unsigned int i); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert an unsigned integer to a nv_bfloat16 in round-up mode. +* +* \details Convert the unsigned integer value \p i to a nv_bfloat16 floating-point +* value in round-up mode. +* \param[in] i - unsigned int. Is only being read. +* +* \returns nv_bfloat16 +* - \p i converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __uint2bfloat16_ru(const unsigned int i); + +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a nv_bfloat16 to an unsigned short integer in round-to-nearest-even +* mode. +* +* \details Convert the nv_bfloat16 floating-point value \p h to an unsigned short +* integer in round-to-nearest-even mode. NaN inputs are converted to 0. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns unsigned short int +* - \p h converted to an unsigned short integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ unsigned short int __bfloat162ushort_rn(const __nv_bfloat16 h); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a nv_bfloat16 to an unsigned short integer in round-towards-zero +* mode. +* +* \details Convert the nv_bfloat16 floating-point value \p h to an unsigned short +* integer in round-towards-zero mode. NaN inputs are converted to 0. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns unsigned short int +* - \p h converted to an unsigned short integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_BF16_DECL__ unsigned short int __bfloat162ushort_rz(const __nv_bfloat16 h); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a nv_bfloat16 to an unsigned short integer in round-down mode. +* +* \details Convert the nv_bfloat16 floating-point value \p h to an unsigned short +* integer in round-down mode. NaN inputs are converted to 0. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns unsigned short int +* - \p h converted to an unsigned short integer. +*/ +__CUDA_BF16_DECL__ unsigned short int __bfloat162ushort_rd(const __nv_bfloat16 h); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a nv_bfloat16 to an unsigned short integer in round-up mode. +* +* \details Convert the nv_bfloat16 floating-point value \p h to an unsigned short +* integer in round-up mode. NaN inputs are converted to 0. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns unsigned short int +* - \p h converted to an unsigned short integer. +*/ +__CUDA_BF16_DECL__ unsigned short int __bfloat162ushort_ru(const __nv_bfloat16 h); + +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert an unsigned short integer to a nv_bfloat16 in round-to-nearest-even +* mode. +* +* \details Convert the unsigned short integer value \p i to a nv_bfloat16 floating-point +* value in round-to-nearest-even mode. +* \param[in] i - unsigned short int. Is only being read. +* +* \returns nv_bfloat16 +* - \p i converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __ushort2bfloat16_rn(const unsigned short int i); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert an unsigned short integer to a nv_bfloat16 in round-towards-zero +* mode. +* +* \details Convert the unsigned short integer value \p i to a nv_bfloat16 floating-point +* value in round-towards-zero mode. +* \param[in] i - unsigned short int. Is only being read. +* +* \returns nv_bfloat16 +* - \p i converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __ushort2bfloat16_rz(const unsigned short int i); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert an unsigned short integer to a nv_bfloat16 in round-down mode. +* +* \details Convert the unsigned short integer value \p i to a nv_bfloat16 floating-point +* value in round-down mode. +* \param[in] i - unsigned short int. Is only being read. +* +* \returns nv_bfloat16 +* - \p i converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __ushort2bfloat16_rd(const unsigned short int i); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert an unsigned short integer to a nv_bfloat16 in round-up mode. +* +* \details Convert the unsigned short integer value \p i to a nv_bfloat16 floating-point +* value in round-up mode. +* \param[in] i - unsigned short int. Is only being read. +* +* \returns nv_bfloat16 +* - \p i converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __ushort2bfloat16_ru(const unsigned short int i); + +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a nv_bfloat16 to an unsigned 64-bit integer in round-to-nearest-even +* mode. +* +* \details Convert the nv_bfloat16 floating-point value \p h to an unsigned 64-bit +* integer in round-to-nearest-even mode. NaN inputs return 0x8000000000000000. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns unsigned long long int +* - \p h converted to an unsigned 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ unsigned long long int __bfloat162ull_rn(const __nv_bfloat16 h); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a nv_bfloat16 to an unsigned 64-bit integer in round-towards-zero +* mode. +* +* \details Convert the nv_bfloat16 floating-point value \p h to an unsigned 64-bit +* integer in round-towards-zero mode. NaN inputs return 0x8000000000000000. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns unsigned long long int +* - \p h converted to an unsigned 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_BF16_DECL__ unsigned long long int __bfloat162ull_rz(const __nv_bfloat16 h); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a nv_bfloat16 to an unsigned 64-bit integer in round-down mode. +* +* \details Convert the nv_bfloat16 floating-point value \p h to an unsigned 64-bit +* integer in round-down mode. NaN inputs return 0x8000000000000000. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns unsigned long long int +* - \p h converted to an unsigned 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ unsigned long long int __bfloat162ull_rd(const __nv_bfloat16 h); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a nv_bfloat16 to an unsigned 64-bit integer in round-up mode. +* +* \details Convert the nv_bfloat16 floating-point value \p h to an unsigned 64-bit +* integer in round-up mode. NaN inputs return 0x8000000000000000. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns unsigned long long int +* - \p h converted to an unsigned 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ unsigned long long int __bfloat162ull_ru(const __nv_bfloat16 h); + +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert an unsigned 64-bit integer to a nv_bfloat16 in round-to-nearest-even +* mode. +* +* \details Convert the unsigned 64-bit integer value \p i to a nv_bfloat16 floating-point +* value in round-to-nearest-even mode. +* \param[in] i - unsigned long long int. Is only being read. +* +* \returns nv_bfloat16 +* - \p i converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __ull2bfloat16_rn(const unsigned long long int i); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert an unsigned 64-bit integer to a nv_bfloat16 in round-towards-zero +* mode. +* +* \details Convert the unsigned 64-bit integer value \p i to a nv_bfloat16 floating-point +* value in round-towards-zero mode. +* \param[in] i - unsigned long long int. Is only being read. +* +* \returns nv_bfloat16 +* - \p i converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __ull2bfloat16_rz(const unsigned long long int i); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert an unsigned 64-bit integer to a nv_bfloat16 in round-down mode. +* +* \details Convert the unsigned 64-bit integer value \p i to a nv_bfloat16 floating-point +* value in round-down mode. +* \param[in] i - unsigned long long int. Is only being read. +* +* \returns nv_bfloat16 +* - \p i converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __ull2bfloat16_rd(const unsigned long long int i); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert an unsigned 64-bit integer to a nv_bfloat16 in round-up mode. +* +* \details Convert the unsigned 64-bit integer value \p i to a nv_bfloat16 floating-point +* value in round-up mode. +* \param[in] i - unsigned long long int. Is only being read. +* +* \returns nv_bfloat16 +* - \p i converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __ull2bfloat16_ru(const unsigned long long int i); + +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a nv_bfloat16 to a signed 64-bit integer in round-to-nearest-even +* mode. +* +* \details Convert the nv_bfloat16 floating-point value \p h to a signed 64-bit +* integer in round-to-nearest-even mode. NaN inputs return a long long int with hex value of 0x8000000000000000. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns long long int +* - \p h converted to a signed 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ long long int __bfloat162ll_rn(const __nv_bfloat16 h); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a nv_bfloat16 to a signed 64-bit integer in round-towards-zero mode. +* +* \details Convert the nv_bfloat16 floating-point value \p h to a signed 64-bit +* integer in round-towards-zero mode. NaN inputs return a long long int with hex value of 0x8000000000000000. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns long long int +* - \p h converted to a signed 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_BF16_DECL__ long long int __bfloat162ll_rz(const __nv_bfloat16 h); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a nv_bfloat16 to a signed 64-bit integer in round-down mode. +* +* \details Convert the nv_bfloat16 floating-point value \p h to a signed 64-bit +* integer in round-down mode. NaN inputs return a long long int with hex value of 0x8000000000000000. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns long long int +* - \p h converted to a signed 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ long long int __bfloat162ll_rd(const __nv_bfloat16 h); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a nv_bfloat16 to a signed 64-bit integer in round-up mode. +* +* \details Convert the nv_bfloat16 floating-point value \p h to a signed 64-bit +* integer in round-up mode. NaN inputs return a long long int with hex value of 0x8000000000000000. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns long long int +* - \p h converted to a signed 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ long long int __bfloat162ll_ru(const __nv_bfloat16 h); + +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a signed 64-bit integer to a nv_bfloat16 in round-to-nearest-even +* mode. +* +* \details Convert the signed 64-bit integer value \p i to a nv_bfloat16 floating-point +* value in round-to-nearest-even mode. +* \param[in] i - long long int. Is only being read. +* +* \returns nv_bfloat16 +* - \p i converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __ll2bfloat16_rn(const long long int i); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a signed 64-bit integer to a nv_bfloat16 in round-towards-zero mode. +* +* \details Convert the signed 64-bit integer value \p i to a nv_bfloat16 floating-point +* value in round-towards-zero mode. +* \param[in] i - long long int. Is only being read. +* +* \returns nv_bfloat16 +* - \p i converted to nv_bfloat16. +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __ll2bfloat16_rz(const long long int i); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a signed 64-bit integer to a nv_bfloat16 in round-down mode. +* +* \details Convert the signed 64-bit integer value \p i to a nv_bfloat16 floating-point +* value in round-down mode. +* \param[in] i - long long int. Is only being read. +* +* \returns nv_bfloat16 +* - \p i converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __ll2bfloat16_rd(const long long int i); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Convert a signed 64-bit integer to a nv_bfloat16 in round-up mode. +* +* \details Convert the signed 64-bit integer value \p i to a nv_bfloat16 floating-point +* value in round-up mode. +* \param[in] i - long long int. Is only being read. +* +* \returns nv_bfloat16 +* - \p i converted to nv_bfloat16. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __ll2bfloat16_ru(const long long int i); + +/** +* \ingroup CUDA_MATH__BFLOAT16_FUNCTIONS +* \brief Truncate input argument to the integral part. +* +* \details Round \p h to the nearest integer value that does not exceed \p h in +* magnitude. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The truncated integer value. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 htrunc(const __nv_bfloat16 h); +/** +* \ingroup CUDA_MATH__BFLOAT16_FUNCTIONS +* \brief Calculate ceiling of the input argument. +* +* \details Compute the smallest integer value not less than \p h. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The smallest integer value not less than \p h. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 hceil(const __nv_bfloat16 h); +/** +* \ingroup CUDA_MATH__BFLOAT16_FUNCTIONS +* \brief Calculate the largest integer less than or equal to \p h. +* +* \details Calculate the largest integer value which is less than or equal to \p h. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The largest integer value which is less than or equal to \p h. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 hfloor(const __nv_bfloat16 h); +/** +* \ingroup CUDA_MATH__BFLOAT16_FUNCTIONS +* \brief Round input to nearest integer value in nv_bfloat16 floating-point +* number. +* +* \details Round \p h to the nearest integer value in nv_bfloat16 floating-point +* format, with bfloat16way cases rounded to the nearest even integer value. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The nearest integer to \p h. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 hrint(const __nv_bfloat16 h); + +/** +* \ingroup CUDA_MATH__BFLOAT162_FUNCTIONS +* \brief Truncate \p nv_bfloat162 vector input argument to the integral part. +* +* \details Round each component of vector \p h to the nearest integer value that does +* not exceed \p h in magnitude. +* \param[in] h - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The truncated \p h. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 h2trunc(const __nv_bfloat162 h); +/** +* \ingroup CUDA_MATH__BFLOAT162_FUNCTIONS +* \brief Calculate \p nv_bfloat162 vector ceiling of the input argument. +* +* \details For each component of vector \p h compute the smallest integer value not less +* than \p h. +* \param[in] h - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The vector of smallest integers not less than \p h. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 h2ceil(const __nv_bfloat162 h); +/** +* \ingroup CUDA_MATH__BFLOAT162_FUNCTIONS +* \brief Calculate the largest integer less than or equal to \p h. +* +* \details For each component of vector \p h calculate the largest integer value which +* is less than or equal to \p h. +* \param[in] h - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The vector of largest integers which is less than or equal to \p h. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 h2floor(const __nv_bfloat162 h); +/** +* \ingroup CUDA_MATH__BFLOAT162_FUNCTIONS +* \brief Round input to nearest integer value in nv_bfloat16 floating-point +* number. +* +* \details Round each component of \p nv_bfloat162 vector \p h to the nearest integer value in +* nv_bfloat16 floating-point format, with bfloat16way cases rounded to the +* nearest even integer value. +* \param[in] h - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The vector of rounded integer values. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 h2rint(const __nv_bfloat162 h); + +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Returns \p nv_bfloat162 with both halves equal to the input value. +* +* \details Returns \p nv_bfloat162 number with both halves equal to the input \p a \p nv_bfloat16 +* number. +* \param[in] a - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat162 +* - The vector which has both its halves equal to the input \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __bfloat162bfloat162(const __nv_bfloat16 a); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Swaps both halves of the \p nv_bfloat162 input. +* +* \details Swaps both halves of the \p nv_bfloat162 input and returns a new \p nv_bfloat162 number +* with swapped halves. +* \param[in] a - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - \p a with its halves being swapped. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __lowhigh2highlow(const __nv_bfloat162 a); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Extracts low 16 bits from each of the two \p nv_bfloat162 inputs and combines +* into one \p nv_bfloat162 number. +* +* \details Extracts low 16 bits from each of the two \p nv_bfloat162 inputs and combines into +* one \p nv_bfloat162 number. Low 16 bits from input \p a is stored in low 16 bits of +* the return value, low 16 bits from input \p b is stored in high 16 bits of +* the return value. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The low 16 bits of \p a and of \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __lows2bfloat162(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Extracts high 16 bits from each of the two \p nv_bfloat162 inputs and +* combines into one \p nv_bfloat162 number. +* +* \details Extracts high 16 bits from each of the two \p nv_bfloat162 inputs and combines into +* one \p nv_bfloat162 number. High 16 bits from input \p a is stored in low 16 bits of +* the return value, high 16 bits from input \p b is stored in high 16 bits of +* the return value. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The high 16 bits of \p a and of \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __highs2bfloat162(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Returns high 16 bits of \p nv_bfloat162 input. +* +* \details Returns high 16 bits of \p nv_bfloat162 input \p a. +* \param[in] a - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat16 +* - The high 16 bits of the input. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __high2bfloat16(const __nv_bfloat162 a); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Returns low 16 bits of \p nv_bfloat162 input. +* +* \details Returns low 16 bits of \p nv_bfloat162 input \p a. +* \param[in] a - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat16 +* - Returns \p nv_bfloat16 which contains low 16 bits of the input \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __low2bfloat16(const __nv_bfloat162 a); +/** +* \ingroup CUDA_MATH__BFLOAT16_COMPARISON +* \brief Checks if the input \p nv_bfloat16 number is infinite. +* +* \details Checks if the input \p nv_bfloat16 number \p a is infinite. +* \param[in] a - nv_bfloat16. Is only being read. +* +* \returns int +* - -1 iff \p a is equal to negative infinity, +* - 1 iff \p a is equal to positive infinity, +* - 0 otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ int __hisinf(const __nv_bfloat16 a); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Combines two \p nv_bfloat16 numbers into one \p nv_bfloat162 number. +* +* \details Combines two input \p nv_bfloat16 number \p a and \p b into one \p nv_bfloat162 number. +* Input \p a is stored in low 16 bits of the return value, input \p b is stored +* in high 16 bits of the return value. +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat162 +* - The nv_bfloat162 with one nv_bfloat16 equal to \p a and the other to \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __halves2bfloat162(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Extracts low 16 bits from \p nv_bfloat162 input. +* +* \details Extracts low 16 bits from \p nv_bfloat162 input \p a and returns a new \p nv_bfloat162 +* number which has both halves equal to the extracted bits. +* \param[in] a - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The nv_bfloat162 with both halves equal to the low 16 bits of the input. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __low2bfloat162(const __nv_bfloat162 a); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Extracts high 16 bits from \p nv_bfloat162 input. +* +* \details Extracts high 16 bits from \p nv_bfloat162 input \p a and returns a new \p nv_bfloat162 +* number which has both halves equal to the extracted bits. +* \param[in] a - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The nv_bfloat162 with both halves equal to the high 16 bits of the input. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __high2bfloat162(const __nv_bfloat162 a); + +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Reinterprets bits in a \p nv_bfloat16 as a signed short integer. +* +* \details Reinterprets the bits in the nv_bfloat16 floating-point number \p h +* as a signed short integer. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns short int +* - The reinterpreted value. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ short int __bfloat16_as_short(const __nv_bfloat16 h); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Reinterprets bits in a \p nv_bfloat16 as an unsigned short integer. +* +* \details Reinterprets the bits in the nv_bfloat16 floating-point \p h +* as an unsigned short number. +* \param[in] h - nv_bfloat16. Is only being read. +* +* \returns unsigned short int +* - The reinterpreted value. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ unsigned short int __bfloat16_as_ushort(const __nv_bfloat16 h); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Reinterprets bits in a signed short integer as a \p nv_bfloat16. +* +* \details Reinterprets the bits in the signed short integer \p i as a +* nv_bfloat16 floating-point number. +* \param[in] i - short int. Is only being read. +* +* \returns nv_bfloat16 +* - The reinterpreted value. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __short_as_bfloat16(const short int i); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Reinterprets bits in an unsigned short integer as a \p nv_bfloat16. +* +* \details Reinterprets the bits in the unsigned short integer \p i as a +* nv_bfloat16 floating-point number. +* \param[in] i - unsigned short int. Is only being read. +* +* \returns nv_bfloat16 +* - The reinterpreted value. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __ushort_as_bfloat16(const unsigned short int i); + +#if !defined warpSize && !defined __local_warpSize +#define warpSize 32 +#define __local_warpSize +#endif + +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Exchange a variable between threads within a warp. Direct copy from indexed thread. +* +* \details Returns the value of var held by the thread whose ID is given by delta. +* If width is less than warpSize then each subsection of the warp behaves as a separate +* entity with a starting logical thread ID of 0. If delta is outside the range [0:width-1], +* the value returned corresponds to the value of var held by the delta modulo width (i.e. +* within the same subsection). width must have a value which is a power of 2; +* results are undefined if width is not a power of 2, or is a number greater than +* warpSize. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - nv_bfloat162. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 4-byte word referenced by var from the source thread ID as nv_bfloat162. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \note_ref_guide_warp_shuffle +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __shfl_sync(const unsigned mask, const __nv_bfloat162 var, const int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Exchange a variable between threads within a warp. Copy from a thread with lower ID relative to the caller. +* +* \details Calculates a source thread ID by subtracting delta from the caller's lane ID. +* The value of var held by the resulting lane ID is returned: in effect, var is shifted up +* the warp by delta threads. If width is less than warpSize then each subsection of the warp +* behaves as a separate entity with a starting logical thread ID of 0. The source thread index +* will not wrap around the value of width, so effectively the lower delta threads will be unchanged. +* width must have a value which is a power of 2; results are undefined if width is not a power of 2, +* or is a number greater than warpSize. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - nv_bfloat162. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 4-byte word referenced by var from the source thread ID as nv_bfloat162. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \note_ref_guide_warp_shuffle +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __shfl_up_sync(const unsigned mask, const __nv_bfloat162 var, const unsigned int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Exchange a variable between threads within a warp. Copy from a thread with higher ID relative to the caller. +* +* \details Calculates a source thread ID by adding delta to the caller's thread ID. +* The value of var held by the resulting thread ID is returned: this has the effect +* of shifting var down the warp by delta threads. If width is less than warpSize then +* each subsection of the warp behaves as a separate entity with a starting logical +* thread ID of 0. As for __shfl_up_sync(), the ID number of the source thread +* will not wrap around the value of width and so the upper delta threads +* will remain unchanged. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - nv_bfloat162. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 4-byte word referenced by var from the source thread ID as nv_bfloat162. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \note_ref_guide_warp_shuffle +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __shfl_down_sync(const unsigned mask, const __nv_bfloat162 var, const unsigned int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Exchange a variable between threads within a warp. Copy from a thread based on bitwise XOR of own thread ID. +* +* \details Calculates a source thread ID by performing a bitwise XOR of the caller's thread ID with mask: +* the value of var held by the resulting thread ID is returned. If width is less than warpSize then each +* group of width consecutive threads are able to access elements from earlier groups of threads, +* however if they attempt to access elements from later groups of threads their own value of var +* will be returned. This mode implements a butterfly addressing pattern such as is used in tree +* reduction and broadcast. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - nv_bfloat162. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 4-byte word referenced by var from the source thread ID as nv_bfloat162. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \note_ref_guide_warp_shuffle +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __shfl_xor_sync(const unsigned mask, const __nv_bfloat162 var, const int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Exchange a variable between threads within a warp. Direct copy from indexed thread. +* +* \details Returns the value of var held by the thread whose ID is given by delta. +* If width is less than warpSize then each subsection of the warp behaves as a separate +* entity with a starting logical thread ID of 0. If delta is outside the range [0:width-1], +* the value returned corresponds to the value of var held by the delta modulo width (i.e. +* within the same subsection). width must have a value which is a power of 2; +* results are undefined if width is not a power of 2, or is a number greater than +* warpSize. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - nv_bfloat16. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 2-byte word referenced by var from the source thread ID as nv_bfloat16. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \note_ref_guide_warp_shuffle +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __shfl_sync(const unsigned mask, const __nv_bfloat16 var, const int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Exchange a variable between threads within a warp. Copy from a thread with lower ID relative to the caller. +* \details Calculates a source thread ID by subtracting delta from the caller's lane ID. +* The value of var held by the resulting lane ID is returned: in effect, var is shifted up +* the warp by delta threads. If width is less than warpSize then each subsection of the warp +* behaves as a separate entity with a starting logical thread ID of 0. The source thread index +* will not wrap around the value of width, so effectively the lower delta threads will be unchanged. +* width must have a value which is a power of 2; results are undefined if width is not a power of 2, +* or is a number greater than warpSize. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - nv_bfloat16. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 2-byte word referenced by var from the source thread ID as nv_bfloat16. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \note_ref_guide_warp_shuffle +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __shfl_up_sync(const unsigned mask, const __nv_bfloat16 var, const unsigned int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Exchange a variable between threads within a warp. Copy from a thread with higher ID relative to the caller. +* +* \details Calculates a source thread ID by adding delta to the caller's thread ID. +* The value of var held by the resulting thread ID is returned: this has the effect +* of shifting var down the warp by delta threads. If width is less than warpSize then +* each subsection of the warp behaves as a separate entity with a starting logical +* thread ID of 0. As for __shfl_up_sync(), the ID number of the source thread +* will not wrap around the value of width and so the upper delta threads +* will remain unchanged. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - nv_bfloat16. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 2-byte word referenced by var from the source thread ID as nv_bfloat16. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \note_ref_guide_warp_shuffle +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __shfl_down_sync(const unsigned mask, const __nv_bfloat16 var, const unsigned int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Exchange a variable between threads within a warp. Copy from a thread based on bitwise XOR of own thread ID. +* +* \details Calculates a source thread ID by performing a bitwise XOR of the caller's thread ID with mask: +* the value of var held by the resulting thread ID is returned. If width is less than warpSize then each +* group of width consecutive threads are able to access elements from earlier groups of threads, +* however if they attempt to access elements from later groups of threads their own value of var +* will be returned. This mode implements a butterfly addressing pattern such as is used in tree +* reduction and broadcast. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - nv_bfloat16. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 2-byte word referenced by var from the source thread ID as nv_bfloat16. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \note_ref_guide_warp_shuffle +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __shfl_xor_sync(const unsigned mask, const __nv_bfloat16 var, const int delta, const int width = warpSize); + +#if defined(__local_warpSize) +#undef warpSize +#undef __local_warpSize +#endif + +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Generates a `ld.global.nc` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __ldg(const __nv_bfloat162 *const ptr); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Generates a `ld.global.nc` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __ldg(const __nv_bfloat16 *const ptr); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Generates a `ld.global.cg` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __ldcg(const __nv_bfloat162 *const ptr); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Generates a `ld.global.cg` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __ldcg(const __nv_bfloat16 *const ptr); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Generates a `ld.global.ca` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __ldca(const __nv_bfloat162 *const ptr); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Generates a `ld.global.ca` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __ldca(const __nv_bfloat16 *const ptr); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Generates a `ld.global.cs` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __ldcs(const __nv_bfloat162 *const ptr); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Generates a `ld.global.cs` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __ldcs(const __nv_bfloat16 *const ptr); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Generates a `ld.global.lu` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __ldlu(const __nv_bfloat162 *const ptr); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Generates a `ld.global.lu` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __ldlu(const __nv_bfloat16 *const ptr); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Generates a `ld.global.cv` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __ldcv(const __nv_bfloat162 *const ptr); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Generates a `ld.global.cv` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __ldcv(const __nv_bfloat16 *const ptr); + +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Generates a `st.global.wb` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_BF16_DECL__ void __stwb(__nv_bfloat162 *const ptr, const __nv_bfloat162 value); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Generates a `st.global.wb` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_BF16_DECL__ void __stwb(__nv_bfloat16 *const ptr, const __nv_bfloat16 value); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Generates a `st.global.cg` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_BF16_DECL__ void __stcg(__nv_bfloat162 *const ptr, const __nv_bfloat162 value); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Generates a `st.global.cg` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_BF16_DECL__ void __stcg(__nv_bfloat16 *const ptr, const __nv_bfloat16 value); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Generates a `st.global.cs` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_BF16_DECL__ void __stcs(__nv_bfloat162 *const ptr, const __nv_bfloat162 value); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Generates a `st.global.cs` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_BF16_DECL__ void __stcs(__nv_bfloat16 *const ptr, const __nv_bfloat16 value); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Generates a `st.global.wt` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_BF16_DECL__ void __stwt(__nv_bfloat162 *const ptr, const __nv_bfloat162 value); +/** +* \ingroup CUDA_MATH__BFLOAT16_MISC +* \brief Generates a `st.global.wt` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_BF16_DECL__ void __stwt(__nv_bfloat16 *const ptr, const __nv_bfloat16 value); + +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs nv_bfloat162 vector if-equal comparison. +* +* \details Performs \p nv_bfloat162 vector if-equal comparison of inputs \p a and \p b. +* The corresponding \p nv_bfloat16 results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate false results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The vector result of if-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __heq2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector not-equal comparison. +* +* \details Performs \p nv_bfloat162 vector not-equal comparison of inputs \p a and \p b. +* The corresponding \p nv_bfloat16 results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate false results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The vector result of not-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hne2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector less-equal comparison. +* +* \details Performs \p nv_bfloat162 vector less-equal comparison of inputs \p a and \p b. +* The corresponding \p nv_bfloat16 results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate false results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The \p nv_bfloat162 result of less-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hle2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector greater-equal comparison. +* +* \details Performs \p nv_bfloat162 vector greater-equal comparison of inputs \p a and \p b. +* The corresponding \p nv_bfloat16 results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate false results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The vector result of greater-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hge2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector less-than comparison. +* +* \details Performs \p nv_bfloat162 vector less-than comparison of inputs \p a and \p b. +* The corresponding \p nv_bfloat16 results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate false results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The nv_bfloat162 vector result of less-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hlt2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector greater-than comparison. +* +* \details Performs \p nv_bfloat162 vector greater-than comparison of inputs \p a and \p b. +* The corresponding \p nv_bfloat16 results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate false results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The vector result of greater-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hgt2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector unordered if-equal comparison. +* +* \details Performs \p nv_bfloat162 vector if-equal comparison of inputs \p a and \p b. +* The corresponding \p nv_bfloat16 results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate true results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The vector result of unordered if-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hequ2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector unordered not-equal comparison. +* +* \details Performs \p nv_bfloat162 vector not-equal comparison of inputs \p a and \p b. +* The corresponding \p nv_bfloat16 results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate true results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The vector result of unordered not-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hneu2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector unordered less-equal comparison. +* +* Performs \p nv_bfloat162 vector less-equal comparison of inputs \p a and \p b. +* The corresponding \p nv_bfloat16 results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate true results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The vector result of unordered less-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hleu2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector unordered greater-equal comparison. +* +* \details Performs \p nv_bfloat162 vector greater-equal comparison of inputs \p a and \p b. +* The corresponding \p nv_bfloat16 results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate true results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The \p nv_bfloat162 vector result of unordered greater-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hgeu2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector unordered less-than comparison. +* +* \details Performs \p nv_bfloat162 vector less-than comparison of inputs \p a and \p b. +* The corresponding \p nv_bfloat16 results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate true results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The vector result of unordered less-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hltu2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector unordered greater-than comparison. +* +* \details Performs \p nv_bfloat162 vector greater-than comparison of inputs \p a and \p b. +* The corresponding \p nv_bfloat16 results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate true results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The \p nv_bfloat162 vector result of unordered greater-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hgtu2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs nv_bfloat162 vector if-equal comparison. +* +* \details Performs \p nv_bfloat162 vector if-equal comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate false results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns unsigned int +* - The vector mask result of if-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ unsigned __heq2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector not-equal comparison. +* +* \details Performs \p nv_bfloat162 vector not-equal comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate false results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns unsigned int +* - The vector mask result of not-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ unsigned __hne2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector less-equal comparison. +* +* \details Performs \p nv_bfloat162 vector less-equal comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate false results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns unsigned int +* - The vector mask result of less-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ unsigned __hle2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector greater-equal comparison. +* +* \details Performs \p nv_bfloat162 vector greater-equal comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate false results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns unsigned int +* - The vector mask result of greater-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ unsigned __hge2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector less-than comparison. +* +* \details Performs \p nv_bfloat162 vector less-than comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate false results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns unsigned int +* - The vector mask result of less-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ unsigned __hlt2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector greater-than comparison. +* +* \details Performs \p nv_bfloat162 vector greater-than comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate false results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns unsigned int +* - The vector mask result of greater-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ unsigned __hgt2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector unordered if-equal comparison. +* +* \details Performs \p nv_bfloat162 vector if-equal comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate true results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns unsigned int +* - The vector mask result of unordered if-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ unsigned __hequ2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector unordered not-equal comparison. +* +* \details Performs \p nv_bfloat162 vector not-equal comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate true results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns unsigned int +* - The vector mask result of unordered not-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ unsigned __hneu2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector unordered less-equal comparison. +* +* Performs \p nv_bfloat162 vector less-equal comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate true results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns unsigned int +* - The vector mask result of unordered less-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ unsigned __hleu2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector unordered greater-equal comparison. +* +* \details Performs \p nv_bfloat162 vector greater-equal comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate true results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns unsigned int +* - The vector mask result of unordered greater-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ unsigned __hgeu2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector unordered less-than comparison. +* +* \details Performs \p nv_bfloat162 vector less-than comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate true results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns unsigned int +* - The vector mask result of unordered less-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ unsigned __hltu2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector unordered greater-than comparison. +* +* \details Performs \p nv_bfloat162 vector greater-than comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate true results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns unsigned int +* - The vector mask result of unordered greater-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ unsigned __hgtu2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Determine whether \p nv_bfloat162 argument is a NaN. +* +* \details Determine whether each nv_bfloat16 of input \p nv_bfloat162 number \p a is a NaN. +* \param[in] a - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The nv_bfloat162 with the corresponding \p nv_bfloat16 results set to +* 1.0 for NaN, 0.0 otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hisnan2(const __nv_bfloat162 a); +/** +* \ingroup CUDA_MATH__BFLOAT162_ARITHMETIC +* \brief Performs \p nv_bfloat162 vector addition in round-to-nearest-even mode. +* +* \details Performs \p nv_bfloat162 vector add of inputs \p a and \p b, in round-to-nearest +* mode. +* \internal +* \req DEEPLEARN-SRM_REQ-95 +* \endinternal +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The sum of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hadd2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_ARITHMETIC +* \brief Performs \p nv_bfloat162 vector subtraction in round-to-nearest-even mode. +* +* \details Subtracts \p nv_bfloat162 input vector \p b from input vector \p a in +* round-to-nearest-even mode. +* \internal +* \req DEEPLEARN-SRM_REQ-104 +* \endinternal +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The subtraction of vector \p b from \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hsub2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_ARITHMETIC +* \brief Performs \p nv_bfloat162 vector multiplication in round-to-nearest-even mode. +* +* \details Performs \p nv_bfloat162 vector multiplication of inputs \p a and \p b, in +* round-to-nearest-even mode. +* \internal +* \req DEEPLEARN-SRM_REQ-102 +* \endinternal +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The result of elementwise multiplying the vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hmul2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_ARITHMETIC +* \brief Performs \p nv_bfloat162 vector addition in round-to-nearest-even mode. +* +* \details Performs \p nv_bfloat162 vector add of inputs \p a and \p b, in round-to-nearest +* mode. Prevents floating-point contractions of mul+add into fma. +* \internal +* \req DEEPLEARN-SRM_REQ-95 +* \endinternal +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The sum of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hadd2_rn(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_ARITHMETIC +* \brief Performs \p nv_bfloat162 vector subtraction in round-to-nearest-even mode. +* +* \details Subtracts \p nv_bfloat162 input vector \p b from input vector \p a in +* round-to-nearest-even mode. Prevents floating-point contractions of mul+sub into fma. +* \internal +* \req DEEPLEARN-SRM_REQ-104 +* \endinternal +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The subtraction of vector \p b from \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hsub2_rn(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_ARITHMETIC +* \brief Performs \p nv_bfloat162 vector multiplication in round-to-nearest-even mode. +* +* \details Performs \p nv_bfloat162 vector multiplication of inputs \p a and \p b, in +* round-to-nearest-even mode. Prevents floating-point contractions of mul+add +* or sub into fma. +* \internal +* \req DEEPLEARN-SRM_REQ-102 +* \endinternal +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The result of elementwise multiplying the vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hmul2_rn(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_ARITHMETIC +* \brief Performs \p nv_bfloat162 vector division in round-to-nearest-even mode. +* +* \details Divides \p nv_bfloat162 input vector \p a by input vector \p b in round-to-nearest +* mode. +* \internal +* \req DEEPLEARN-SRM_REQ-103 +* \endinternal +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The elementwise division of \p a with \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __h2div(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_ARITHMETIC +* \brief Calculates the absolute value of both halves of the input \p nv_bfloat162 number and +* returns the result. +* +* \details Calculates the absolute value of both halves of the input \p nv_bfloat162 number and +* returns the result. +* \param[in] a - nv_bfloat162. Is only being read. +* +* \returns bfloat2 +* - Returns \p a with the absolute value of both halves. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __habs2(const __nv_bfloat162 a); +/** +* \ingroup CUDA_MATH__BFLOAT162_ARITHMETIC +* \brief Performs \p nv_bfloat162 vector addition in round-to-nearest-even mode, with +* saturation to [0.0, 1.0]. +* +* \details Performs \p nv_bfloat162 vector add of inputs \p a and \p b, in round-to-nearest +* mode, and clamps the results to range [0.0, 1.0]. NaN results are flushed to +* +0.0. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The sum of \p a and \p b, with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hadd2_sat(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_ARITHMETIC +* \brief Performs \p nv_bfloat162 vector subtraction in round-to-nearest-even mode, +* with saturation to [0.0, 1.0]. +* +* \details Subtracts \p nv_bfloat162 input vector \p b from input vector \p a in +* round-to-nearest-even mode, and clamps the results to range [0.0, 1.0]. NaN +* results are flushed to +0.0. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The subtraction of vector \p b from \p a, with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hsub2_sat(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_ARITHMETIC +* \brief Performs \p nv_bfloat162 vector multiplication in round-to-nearest-even mode, +* with saturation to [0.0, 1.0]. +* +* \details Performs \p nv_bfloat162 vector multiplication of inputs \p a and \p b, in +* round-to-nearest-even mode, and clamps the results to range [0.0, 1.0]. NaN +* results are flushed to +0.0. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The result of elementwise multiplication of vectors \p a and \p b, +* with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hmul2_sat(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_ARITHMETIC +* \brief Performs \p nv_bfloat162 vector fused multiply-add in round-to-nearest-even +* mode. +* +* \details Performs \p nv_bfloat162 vector multiply on inputs \p a and \p b, +* then performs a \p nv_bfloat162 vector add of the result with \p c, +* rounding the result once in round-to-nearest-even mode. +* \internal +* \req DEEPLEARN-SRM_REQ-105 +* \endinternal +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* \param[in] c - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The result of elementwise fused multiply-add operation on vectors \p a, \p b, and \p c. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hfma2(const __nv_bfloat162 a, const __nv_bfloat162 b, const __nv_bfloat162 c); +/** +* \ingroup CUDA_MATH__BFLOAT162_ARITHMETIC +* \brief Performs \p nv_bfloat162 vector fused multiply-add in round-to-nearest-even +* mode, with saturation to [0.0, 1.0]. +* +* \details Performs \p nv_bfloat162 vector multiply on inputs \p a and \p b, +* then performs a \p nv_bfloat162 vector add of the result with \p c, +* rounding the result once in round-to-nearest-even mode, and clamps the +* results to range [0.0, 1.0]. NaN results are flushed to +0.0. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* \param[in] c - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The result of elementwise fused multiply-add operation on vectors \p a, \p b, and \p c, +* with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hfma2_sat(const __nv_bfloat162 a, const __nv_bfloat162 b, const __nv_bfloat162 c); +/** +* \ingroup CUDA_MATH__BFLOAT162_ARITHMETIC +* \brief Negates both halves of the input \p nv_bfloat162 number and returns the +* result. +* +* \details Negates both halves of the input \p nv_bfloat162 number \p a and returns the result. +* \internal +* \req DEEPLEARN-SRM_REQ-101 +* \endinternal +* \param[in] a - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - Returns \p a with both halves negated. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hneg2(const __nv_bfloat162 a); +/** +* \ingroup CUDA_MATH__BFLOAT16_ARITHMETIC +* \brief Calculates the absolute value of input \p nv_bfloat16 number and returns the result. +* +* \details Calculates the absolute value of input \p nv_bfloat16 number and returns the result. +* \param[in] a - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The absolute value of a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __habs(const __nv_bfloat16 a); +/** +* \ingroup CUDA_MATH__BFLOAT16_ARITHMETIC +* \brief Performs \p nv_bfloat16 addition in round-to-nearest-even mode. +* +* \details Performs \p nv_bfloat16 addition of inputs \p a and \p b, in round-to-nearest-even +* mode. +* \internal +* \req DEEPLEARN-SRM_REQ-94 +* \endinternal +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The sum of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __hadd(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_ARITHMETIC +* \brief Performs \p nv_bfloat16 subtraction in round-to-nearest-even mode. +* +* \details Subtracts \p nv_bfloat16 input \p b from input \p a in round-to-nearest +* mode. +* \internal +* \req DEEPLEARN-SRM_REQ-97 +* \endinternal +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The result of subtracting \p b from \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __hsub(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_ARITHMETIC +* \brief Performs \p nv_bfloat16 multiplication in round-to-nearest-even mode. +* +* \details Performs \p nv_bfloat16 multiplication of inputs \p a and \p b, in round-to-nearest +* mode. +* \internal +* \req DEEPLEARN-SRM_REQ-99 +* \endinternal +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The result of multiplying \p a and \p b. +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __hmul(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_ARITHMETIC +* \brief Performs \p nv_bfloat16 addition in round-to-nearest-even mode. +* +* \details Performs \p nv_bfloat16 addition of inputs \p a and \p b, in round-to-nearest-even +* mode. Prevents floating-point contractions of mul+add into fma. +* \internal +* \req DEEPLEARN-SRM_REQ-94 +* \endinternal +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The sum of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __hadd_rn(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_ARITHMETIC +* \brief Performs \p nv_bfloat16 subtraction in round-to-nearest-even mode. +* +* \details Subtracts \p nv_bfloat16 input \p b from input \p a in round-to-nearest +* mode. Prevents floating-point contractions of mul+sub into fma. +* \internal +* \req DEEPLEARN-SRM_REQ-97 +* \endinternal +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The result of subtracting \p b from \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __hsub_rn(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_ARITHMETIC +* \brief Performs \p nv_bfloat16 multiplication in round-to-nearest-even mode. +* +* \details Performs \p nv_bfloat16 multiplication of inputs \p a and \p b, in round-to-nearest +* mode. Prevents floating-point contractions of mul+add or sub into fma. +* \internal +* \req DEEPLEARN-SRM_REQ-99 +* \endinternal +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The result of multiplying \p a and \p b. +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __hmul_rn(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_ARITHMETIC +* \brief Performs \p nv_bfloat16 division in round-to-nearest-even mode. +* +* \details Divides \p nv_bfloat16 input \p a by input \p b in round-to-nearest +* mode. +* \internal +* \req DEEPLEARN-SRM_REQ-98 +* \endinternal +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The result of dividing \p a by \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __hdiv(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_ARITHMETIC +* \brief Performs \p nv_bfloat16 addition in round-to-nearest-even mode, with +* saturation to [0.0, 1.0]. +* +* \details Performs \p nv_bfloat16 add of inputs \p a and \p b, in round-to-nearest-even mode, +* and clamps the result to range [0.0, 1.0]. NaN results are flushed to +0.0. +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The sum of \p a and \p b, with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __hadd_sat(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_ARITHMETIC +* \brief Performs \p nv_bfloat16 subtraction in round-to-nearest-even mode, with +* saturation to [0.0, 1.0]. +* +* \details Subtracts \p nv_bfloat16 input \p b from input \p a in round-to-nearest +* mode, +* and clamps the result to range [0.0, 1.0]. NaN results are flushed to +0.0. +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The result of subtraction of \p b from \p a, with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __hsub_sat(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_ARITHMETIC +* \brief Performs \p nv_bfloat16 multiplication in round-to-nearest-even mode, with +* saturation to [0.0, 1.0]. +* +* \details Performs \p nv_bfloat16 multiplication of inputs \p a and \p b, in round-to-nearest +* mode, and clamps the result to range [0.0, 1.0]. NaN results are flushed to +* +0.0. +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The result of multiplying \p a and \p b, with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __hmul_sat(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_ARITHMETIC +* \brief Performs \p nv_bfloat16 fused multiply-add in round-to-nearest-even mode. +* +* \details Performs \p nv_bfloat16 multiply on inputs \p a and \p b, +* then performs a \p nv_bfloat16 add of the result with \p c, +* rounding the result once in round-to-nearest-even mode. +* \internal +* \req DEEPLEARN-SRM_REQ-96 +* \endinternal +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* \param[in] c - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The result of fused multiply-add operation on \p +* a, \p b, and \p c. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __hfma(const __nv_bfloat16 a, const __nv_bfloat16 b, const __nv_bfloat16 c); +/** +* \ingroup CUDA_MATH__BFLOAT16_ARITHMETIC +* \brief Performs \p nv_bfloat16 fused multiply-add in round-to-nearest-even mode, +* with saturation to [0.0, 1.0]. +* +* \details Performs \p nv_bfloat16 multiply on inputs \p a and \p b, +* then performs a \p nv_bfloat16 add of the result with \p c, +* rounding the result once in round-to-nearest-even mode, and clamps the result +* to range [0.0, 1.0]. NaN results are flushed to +0.0. +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* \param[in] c - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The result of fused multiply-add operation on \p +* a, \p b, and \p c, with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __hfma_sat(const __nv_bfloat16 a, const __nv_bfloat16 b, const __nv_bfloat16 c); +/** +* \ingroup CUDA_MATH__BFLOAT16_ARITHMETIC +* \brief Negates input \p nv_bfloat16 number and returns the result. +* +* \details Negates input \p nv_bfloat16 number and returns the result. +* \internal +* \req DEEPLEARN-SRM_REQ-100 +* \endinternal +* \param[in] a - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - minus a +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __hneg(const __nv_bfloat16 a); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector if-equal comparison and returns boolean true +* iff both \p nv_bfloat16 results are true, boolean false otherwise. +* +* \details Performs \p nv_bfloat162 vector if-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p nv_bfloat16 if-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate false results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns bool +* - true if both \p nv_bfloat16 results of if-equal comparison +* of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ bool __hbeq2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector not-equal comparison and returns boolean +* true iff both \p nv_bfloat16 results are true, boolean false otherwise. +* +* \details Performs \p nv_bfloat162 vector not-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p nv_bfloat16 not-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate false results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns bool +* - true if both \p nv_bfloat16 results of not-equal comparison +* of vectors \p a and \p b are true, +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ bool __hbne2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector less-equal comparison and returns boolean +* true iff both \p nv_bfloat16 results are true, boolean false otherwise. +* +* \details Performs \p nv_bfloat162 vector less-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p nv_bfloat16 less-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate false results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns bool +* - true if both \p nv_bfloat16 results of less-equal comparison +* of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ bool __hble2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector greater-equal comparison and returns boolean +* true iff both \p nv_bfloat16 results are true, boolean false otherwise. +* +* \details Performs \p nv_bfloat162 vector greater-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p nv_bfloat16 greater-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate false results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns bool +* - true if both \p nv_bfloat16 results of greater-equal +* comparison of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ bool __hbge2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector less-than comparison and returns boolean +* true iff both \p nv_bfloat16 results are true, boolean false otherwise. +* +* \details Performs \p nv_bfloat162 vector less-than comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p nv_bfloat16 less-than comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate false results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns bool +* - true if both \p nv_bfloat16 results of less-than comparison +* of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ bool __hblt2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector greater-than comparison and returns boolean +* true iff both \p nv_bfloat16 results are true, boolean false otherwise. +* +* \details Performs \p nv_bfloat162 vector greater-than comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p nv_bfloat16 greater-than comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate false results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns bool +* - true if both \p nv_bfloat16 results of greater-than +* comparison of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ bool __hbgt2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector unordered if-equal comparison and returns +* boolean true iff both \p nv_bfloat16 results are true, boolean false otherwise. +* +* \details Performs \p nv_bfloat162 vector if-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p nv_bfloat16 if-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate true results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns bool +* - true if both \p nv_bfloat16 results of unordered if-equal +* comparison of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ bool __hbequ2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector unordered not-equal comparison and returns +* boolean true iff both \p nv_bfloat16 results are true, boolean false otherwise. +* +* \details Performs \p nv_bfloat162 vector not-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p nv_bfloat16 not-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate true results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns bool +* - true if both \p nv_bfloat16 results of unordered not-equal +* comparison of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ bool __hbneu2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector unordered less-equal comparison and returns +* boolean true iff both \p nv_bfloat16 results are true, boolean false otherwise. +* +* \details Performs \p nv_bfloat162 vector less-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p nv_bfloat16 less-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate true results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns bool +* - true if both \p nv_bfloat16 results of unordered less-equal +* comparison of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ bool __hbleu2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector unordered greater-equal comparison and +* returns boolean true iff both \p nv_bfloat16 results are true, boolean false +* otherwise. +* +* \details Performs \p nv_bfloat162 vector greater-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p nv_bfloat16 greater-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate true results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns bool +* - true if both \p nv_bfloat16 results of unordered +* greater-equal comparison of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ bool __hbgeu2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector unordered less-than comparison and returns +* boolean true iff both \p nv_bfloat16 results are true, boolean false otherwise. +* +* \details Performs \p nv_bfloat162 vector less-than comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p nv_bfloat16 less-than comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate true results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns bool +* - true if both \p nv_bfloat16 results of unordered less-than comparison of +* vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ bool __hbltu2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Performs \p nv_bfloat162 vector unordered greater-than comparison and +* returns boolean true iff both \p nv_bfloat16 results are true, boolean false +* otherwise. +* +* \details Performs \p nv_bfloat162 vector greater-than comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p nv_bfloat16 greater-than comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate true results. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns bool +* - true if both \p nv_bfloat16 results of unordered +* greater-than comparison of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ bool __hbgtu2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_COMPARISON +* \brief Performs \p nv_bfloat16 if-equal comparison. +* +* \details Performs \p nv_bfloat16 if-equal comparison of inputs \p a and \p b. +* NaN inputs generate false results. +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns bool +* - The boolean result of if-equal comparison of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ bool __heq(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_COMPARISON +* \brief Performs \p nv_bfloat16 not-equal comparison. +* +* \details Performs \p nv_bfloat16 not-equal comparison of inputs \p a and \p b. +* NaN inputs generate false results. +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns bool +* - The boolean result of not-equal comparison of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ bool __hne(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_COMPARISON +* \brief Performs \p nv_bfloat16 less-equal comparison. +* +* \details Performs \p nv_bfloat16 less-equal comparison of inputs \p a and \p b. +* NaN inputs generate false results. +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns bool +* - The boolean result of less-equal comparison of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ bool __hle(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_COMPARISON +* \brief Performs \p nv_bfloat16 greater-equal comparison. +* +* \details Performs \p nv_bfloat16 greater-equal comparison of inputs \p a and \p b. +* NaN inputs generate false results. +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns bool +* - The boolean result of greater-equal comparison of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ bool __hge(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_COMPARISON +* \brief Performs \p nv_bfloat16 less-than comparison. +* +* \details Performs \p nv_bfloat16 less-than comparison of inputs \p a and \p b. +* NaN inputs generate false results. +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns bool +* - The boolean result of less-than comparison of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ bool __hlt(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_COMPARISON +* \brief Performs \p nv_bfloat16 greater-than comparison. +* +* \details Performs \p nv_bfloat16 greater-than comparison of inputs \p a and \p b. +* NaN inputs generate false results. +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns bool +* - The boolean result of greater-than comparison of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ bool __hgt(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_COMPARISON +* \brief Performs \p nv_bfloat16 unordered if-equal comparison. +* +* \details Performs \p nv_bfloat16 if-equal comparison of inputs \p a and \p b. +* NaN inputs generate true results. +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns bool +* - The boolean result of unordered if-equal comparison of \p a and +* \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ bool __hequ(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_COMPARISON +* \brief Performs \p nv_bfloat16 unordered not-equal comparison. +* +* \details Performs \p nv_bfloat16 not-equal comparison of inputs \p a and \p b. +* NaN inputs generate true results. +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns bool +* - The boolean result of unordered not-equal comparison of \p a and +* \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ bool __hneu(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_COMPARISON +* \brief Performs \p nv_bfloat16 unordered less-equal comparison. +* +* \details Performs \p nv_bfloat16 less-equal comparison of inputs \p a and \p b. +* NaN inputs generate true results. +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns bool +* - The boolean result of unordered less-equal comparison of \p a and +* \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ bool __hleu(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_COMPARISON +* \brief Performs \p nv_bfloat16 unordered greater-equal comparison. +* +* \details Performs \p nv_bfloat16 greater-equal comparison of inputs \p a and \p b. +* NaN inputs generate true results. +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns bool +* - The boolean result of unordered greater-equal comparison of \p a +* and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ bool __hgeu(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_COMPARISON +* \brief Performs \p nv_bfloat16 unordered less-than comparison. +* +* \details Performs \p nv_bfloat16 less-than comparison of inputs \p a and \p b. +* NaN inputs generate true results. +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns bool +* - The boolean result of unordered less-than comparison of \p a and +* \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ bool __hltu(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_COMPARISON +* \brief Performs \p nv_bfloat16 unordered greater-than comparison. +* +* \details Performs \p nv_bfloat16 greater-than comparison of inputs \p a and \p b. +* NaN inputs generate true results. +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns bool +* - The boolean result of unordered greater-than comparison of \p a +* and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ bool __hgtu(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_COMPARISON +* \brief Determine whether \p nv_bfloat16 argument is a NaN. +* +* \details Determine whether \p nv_bfloat16 value \p a is a NaN. +* \param[in] a - nv_bfloat16. Is only being read. +* +* \returns bool +* - true iff argument is NaN. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ bool __hisnan(const __nv_bfloat16 a); +/** +* \ingroup CUDA_MATH__BFLOAT16_COMPARISON +* \brief Calculates \p nv_bfloat16 maximum of two input values. +* +* \details Calculates \p nv_bfloat16 max(\p a, \p b) +* defined as (\p a > \p b) ? \p a : \p b. +* - If either of inputs is NaN, the other input is returned. +* - If both inputs are NaNs, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __hmax(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_COMPARISON +* \brief Calculates \p nv_bfloat16 minimum of two input values. +* +* \details Calculates \p nv_bfloat16 min(\p a, \p b) +* defined as (\p a < \p b) ? \p a : \p b. +* - If either of inputs is NaN, the other input is returned. +* - If both inputs are NaNs, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __hmin(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_COMPARISON +* \brief Calculates \p nv_bfloat16 maximum of two input values, NaNs pass through. +* +* \details Calculates \p nv_bfloat16 max(\p a, \p b) +* defined as (\p a > \p b) ? \p a : \p b. +* - If either of inputs is NaN, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __hmax_nan(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_COMPARISON +* \brief Calculates \p nv_bfloat16 minimum of two input values, NaNs pass through. +* +* \details Calculates \p nv_bfloat16 min(\p a, \p b) +* defined as (\p a < \p b) ? \p a : \p b. +* - If either of inputs is NaN, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __hmin_nan(const __nv_bfloat16 a, const __nv_bfloat16 b); +/** +* \ingroup CUDA_MATH__BFLOAT16_ARITHMETIC +* \brief Performs \p nv_bfloat16 fused multiply-add in round-to-nearest-even mode with relu saturation. +* +* \details Performs \p nv_bfloat16 multiply on inputs \p a and \p b, +* then performs a \p nv_bfloat16 add of the result with \p c, +* rounding the result once in round-to-nearest-even mode. +* Then negative result is clamped to 0. +* NaN result is converted to canonical NaN. +* \param[in] a - nv_bfloat16. Is only being read. +* \param[in] b - nv_bfloat16. Is only being read. +* \param[in] c - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The result of fused multiply-add operation on \p +* a, \p b, and \p c with relu saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 __hfma_relu(const __nv_bfloat16 a, const __nv_bfloat16 b, const __nv_bfloat16 c); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Calculates \p nv_bfloat162 vector maximum of two inputs. +* +* \details Calculates \p nv_bfloat162 vector max(\p a, \p b). +* Elementwise \p nv_bfloat16 operation is defined as +* (\p a > \p b) ? \p a : \p b. +* - If either of inputs is NaN, the other input is returned. +* - If both inputs are NaNs, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The result of elementwise maximum of vectors \p a and \p b +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hmax2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Calculates \p nv_bfloat162 vector minimum of two inputs. +* +* \details Calculates \p nv_bfloat162 vector min(\p a, \p b). +* Elementwise \p nv_bfloat16 operation is defined as +* (\p a < \p b) ? \p a : \p b. +* - If either of inputs is NaN, the other input is returned. +* - If both inputs are NaNs, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The result of elementwise minimum of vectors \p a and \p b +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hmin2(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Calculates \p nv_bfloat162 vector maximum of two inputs, NaNs pass through. +* +* \details Calculates \p nv_bfloat162 vector max(\p a, \p b). +* Elementwise \p nv_bfloat16 operation is defined as +* (\p a > \p b) ? \p a : \p b. +* - If either of inputs is NaN, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The result of elementwise maximum of vectors \p a and \p b, with NaNs pass through +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hmax2_nan(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_COMPARISON +* \brief Calculates \p nv_bfloat162 vector minimum of two inputs, NaNs pass through. +* +* \details Calculates \p nv_bfloat162 vector min(\p a, \p b). +* Elementwise \p nv_bfloat16 operation is defined as +* (\p a < \p b) ? \p a : \p b. +* - If either of inputs is NaN, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The result of elementwise minimum of vectors \p a and \p b, with NaNs pass through +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hmin2_nan(const __nv_bfloat162 a, const __nv_bfloat162 b); +/** +* \ingroup CUDA_MATH__BFLOAT162_ARITHMETIC +* \brief Performs \p nv_bfloat162 vector fused multiply-add in round-to-nearest-even +* mode with relu saturation. +* +* \details Performs \p nv_bfloat162 vector multiply on inputs \p a and \p b, +* then performs a \p nv_bfloat162 vector add of the result with \p c, +* rounding the result once in round-to-nearest-even mode. +* Then negative result is clamped to 0. +* NaN result is converted to canonical NaN. +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* \param[in] c - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The result of elementwise fused multiply-add operation on vectors \p a, \p b, and \p c with relu saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hfma2_relu(const __nv_bfloat162 a, const __nv_bfloat162 b, const __nv_bfloat162 c); +/** +* \ingroup CUDA_MATH__BFLOAT162_ARITHMETIC +* \brief Performs fast complex multiply-accumulate +* +* \details Interprets vector \p nv_bfloat162 input pairs \p a, \p b, and \p c as +* complex numbers in \p nv_bfloat16 precision and performs +* complex multiply-accumulate operation: a*b + c +* \param[in] a - nv_bfloat162. Is only being read. +* \param[in] b - nv_bfloat162. Is only being read. +* \param[in] c - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The result of complex multiply-accumulate operation on complex numbers \p a, \p b, and \p c +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hcmadd(const __nv_bfloat162 a, const __nv_bfloat162 b, const __nv_bfloat162 c); + +/** +* \ingroup CUDA_MATH__BFLOAT16_FUNCTIONS +* \brief Calculates \p nv_bfloat16 square root in round-to-nearest-even mode. +* +* \details Calculates \p nv_bfloat16 square root of input \p a in round-to-nearest-even mode. +* \param[in] a - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The square root of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 hsqrt(const __nv_bfloat16 a); +/** +* \ingroup CUDA_MATH__BFLOAT16_FUNCTIONS +* \brief Calculates \p nv_bfloat16 reciprocal square root in round-to-nearest-even +* mode. +* +* \details Calculates \p nv_bfloat16 reciprocal square root of input \p a in round-to-nearest +* mode. +* \param[in] a - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The reciprocal square root of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 hrsqrt(const __nv_bfloat16 a); +/** +* \ingroup CUDA_MATH__BFLOAT16_FUNCTIONS +* \brief Calculates \p nv_bfloat16 reciprocal in round-to-nearest-even mode. +* +* \details Calculates \p nv_bfloat16 reciprocal of input \p a in round-to-nearest-even mode. +* \param[in] a - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The reciprocal of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 hrcp(const __nv_bfloat16 a); +/** +* \ingroup CUDA_MATH__BFLOAT16_FUNCTIONS +* \brief Calculates \p nv_bfloat16 natural logarithm in round-to-nearest-even mode. +* +* \details Calculates \p nv_bfloat16 natural logarithm of input \p a in round-to-nearest-even +* mode. +* \param[in] a - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The natural logarithm of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 hlog(const __nv_bfloat16 a); +/** +* \ingroup CUDA_MATH__BFLOAT16_FUNCTIONS +* \brief Calculates \p nv_bfloat16 binary logarithm in round-to-nearest-even mode. +* +* \details Calculates \p nv_bfloat16 binary logarithm of input \p a in round-to-nearest-even +* mode. +* \param[in] a - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The binary logarithm of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 hlog2(const __nv_bfloat16 a); +/** +* \ingroup CUDA_MATH__BFLOAT16_FUNCTIONS +* \brief Calculates \p nv_bfloat16 decimal logarithm in round-to-nearest-even mode. +* +* \details Calculates \p nv_bfloat16 decimal logarithm of input \p a in round-to-nearest-even +* mode. +* \param[in] a - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The decimal logarithm of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 hlog10(const __nv_bfloat16 a); +/** +* \ingroup CUDA_MATH__BFLOAT16_FUNCTIONS +* \brief Calculates \p nv_bfloat16 natural exponential function in round-to-nearest +* mode. +* +* \details Calculates \p nv_bfloat16 natural exponential function of input \p a in +* round-to-nearest-even mode. +* \param[in] a - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The natural exponential function on \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 hexp(const __nv_bfloat16 a); +/** +* \ingroup CUDA_MATH__BFLOAT16_FUNCTIONS +* \brief Calculates \p nv_bfloat16 binary exponential function in round-to-nearest +* mode. +* +* \details Calculates \p nv_bfloat16 binary exponential function of input \p a in +* round-to-nearest-even mode. +* \param[in] a - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The binary exponential function on \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 hexp2(const __nv_bfloat16 a); +/** +* \ingroup CUDA_MATH__BFLOAT16_FUNCTIONS +* \brief Calculates \p nv_bfloat16 decimal exponential function in round-to-nearest +* mode. +* +* \details Calculates \p nv_bfloat16 decimal exponential function of input \p a in +* round-to-nearest-even mode. +* \param[in] a - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The decimal exponential function on \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 hexp10(const __nv_bfloat16 a); +/** +* \ingroup CUDA_MATH__BFLOAT16_FUNCTIONS +* \brief Calculates \p nv_bfloat16 cosine in round-to-nearest-even mode. +* +* \details Calculates \p nv_bfloat16 cosine of input \p a in round-to-nearest-even mode. +* \param[in] a - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The cosine of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 hcos(const __nv_bfloat16 a); +/** +* \ingroup CUDA_MATH__BFLOAT16_FUNCTIONS +* \brief Calculates \p nv_bfloat16 sine in round-to-nearest-even mode. +* +* \details Calculates \p nv_bfloat16 sine of input \p a in round-to-nearest-even mode. +* \param[in] a - nv_bfloat16. Is only being read. +* +* \returns nv_bfloat16 +* - The sine of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 hsin(const __nv_bfloat16 a); +/** +* \ingroup CUDA_MATH__BFLOAT162_FUNCTIONS +* \brief Calculates \p nv_bfloat162 vector square root in round-to-nearest-even mode. +* +* \details Calculates \p nv_bfloat162 square root of input vector \p a in round-to-nearest +* mode. +* \param[in] a - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The elementwise square root on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 h2sqrt(const __nv_bfloat162 a); +/** +* \ingroup CUDA_MATH__BFLOAT162_FUNCTIONS +* \brief Calculates \p nv_bfloat162 vector reciprocal square root in round-to-nearest +* mode. +* +* \details Calculates \p nv_bfloat162 reciprocal square root of input vector \p a in +* round-to-nearest-even mode. +* \param[in] a - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The elementwise reciprocal square root on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 h2rsqrt(const __nv_bfloat162 a); +/** +* \ingroup CUDA_MATH__BFLOAT162_FUNCTIONS +* \brief Calculates \p nv_bfloat162 vector reciprocal in round-to-nearest-even mode. +* +* \details Calculates \p nv_bfloat162 reciprocal of input vector \p a in round-to-nearest-even +* mode. +* \param[in] a - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The elementwise reciprocal on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 h2rcp(const __nv_bfloat162 a); +/** +* \ingroup CUDA_MATH__BFLOAT162_FUNCTIONS +* \brief Calculates \p nv_bfloat162 vector natural logarithm in round-to-nearest-even +* mode. +* +* \details Calculates \p nv_bfloat162 natural logarithm of input vector \p a in +* round-to-nearest-even mode. +* \param[in] a - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The elementwise natural logarithm on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 h2log(const __nv_bfloat162 a); +/** +* \ingroup CUDA_MATH__BFLOAT162_FUNCTIONS +* \brief Calculates \p nv_bfloat162 vector binary logarithm in round-to-nearest-even +* mode. +* +* \details Calculates \p nv_bfloat162 binary logarithm of input vector \p a in round-to-nearest +* mode. +* \param[in] a - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The elementwise binary logarithm on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 h2log2(const __nv_bfloat162 a); +/** +* \ingroup CUDA_MATH__BFLOAT162_FUNCTIONS +* \brief Calculates \p nv_bfloat162 vector decimal logarithm in round-to-nearest-even +* mode. +* +* \details Calculates \p nv_bfloat162 decimal logarithm of input vector \p a in +* round-to-nearest-even mode. +* \param[in] a - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The elementwise decimal logarithm on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 h2log10(const __nv_bfloat162 a); +/** +* \ingroup CUDA_MATH__BFLOAT162_FUNCTIONS +* \brief Calculates \p nv_bfloat162 vector exponential function in round-to-nearest +* mode. +* +* \details Calculates \p nv_bfloat162 exponential function of input vector \p a in +* round-to-nearest-even mode. +* \param[in] a - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The elementwise exponential function on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 h2exp(const __nv_bfloat162 a); +/** +* \ingroup CUDA_MATH__BFLOAT162_FUNCTIONS +* \brief Calculates \p nv_bfloat162 vector binary exponential function in +* round-to-nearest-even mode. +* +* \details Calculates \p nv_bfloat162 binary exponential function of input vector \p a in +* round-to-nearest-even mode. +* \param[in] a - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The elementwise binary exponential function on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 h2exp2(const __nv_bfloat162 a); +/** +* \ingroup CUDA_MATH__BFLOAT162_FUNCTIONS +* \brief Calculates \p nv_bfloat162 vector decimal exponential function in +* round-to-nearest-even mode. +* +* \details Calculates \p nv_bfloat162 decimal exponential function of input vector \p a in +* round-to-nearest-even mode. +* \param[in] a - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The elementwise decimal exponential function on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 h2exp10(const __nv_bfloat162 a); +/** +* \ingroup CUDA_MATH__BFLOAT162_FUNCTIONS +* \brief Calculates \p nv_bfloat162 vector cosine in round-to-nearest-even mode. +* +* \details Calculates \p nv_bfloat162 cosine of input vector \p a in round-to-nearest-even +* mode. +* \param[in] a - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The elementwise cosine on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 h2cos(const __nv_bfloat162 a); +/** +* \ingroup CUDA_MATH__BFLOAT162_FUNCTIONS +* \brief Calculates \p nv_bfloat162 vector sine in round-to-nearest-even mode. +* +* \details Calculates \p nv_bfloat162 sine of input vector \p a in round-to-nearest-even mode. +* \param[in] a - nv_bfloat162. Is only being read. +* +* \returns nv_bfloat162 +* - The elementwise sine on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 h2sin(const __nv_bfloat162 a); + +/** +* \ingroup CUDA_MATH__BFLOAT162_ARITHMETIC +* \brief Vector add \p val to the value stored at \p address in global or shared memory, and writes this +* value back to \p address. The atomicity of the add operation is guaranteed separately for each of the +* two nv_bfloat16 elements; the entire __nv_bfloat162 is not guaranteed to be atomic as a single 32-bit access. +* +* \details The location of \p address must be in global or shared memory. This operation has undefined +* behavior otherwise. This operation is only supported by devices of compute capability 8.x and higher. +* +* \param[in] address - __nv_bfloat162*. An address in global or shared memory. +* \param[in] val - __nv_bfloat162. The value to be added. +* +* \returns __nv_bfloat162 +* - The old value read from \p address. +* +* \note_ref_guide_atomic +*/ +__CUDA_BF16_DECL__ __nv_bfloat162 atomicAdd(__nv_bfloat162 *const address, const __nv_bfloat162 val); + +/** +* \ingroup CUDA_MATH__BFLOAT16_ARITHMETIC +* \brief Adds \p val to the value stored at \p address in global or shared memory, and writes this value +* back to \p address. This operation is performed in one atomic operation. +* +* \details The location of \p address must be in global or shared memory. This operation has undefined +* behavior otherwise. This operation is only supported by devices of compute capability 8.x and higher. +* +* \param[in] address - __nv_bfloat16*. An address in global or shared memory. +* \param[in] val - __nv_bfloat16. The value to be added. +* +* \returns __nv_bfloat16 +* - The old value read from \p address. +* +* \note_ref_guide_atomic +*/ +__CUDA_BF16_DECL__ __nv_bfloat16 atomicAdd(__nv_bfloat16 *const address, const __nv_bfloat16 val); + +#endif /* defined(__CUDACC__) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800) || defined(_NVHPC_CUDA)) */ + +#undef __CUDA_BF16_DECL__ +#undef __CUDA_HOSTDEVICE_BF16_DECL__ + +#endif /* defined(__cplusplus) */ + +/* Note the .hpp file is included even for host-side compilation, to capture the "nv_bfloat16" & "nv_bfloat162" definitions */ +#include "cuda_bf16.hpp" +#undef ___CUDA_BF16_STRINGIFY_INNERMOST +#undef __CUDA_BF16_STRINGIFY + +#endif /* end of include guard: __CUDA_BF16_H__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_bf16.hpp b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_bf16.hpp new file mode 100644 index 0000000000000000000000000000000000000000..ec55a7b364a8ba1a2220565d979dbee87dd23f09 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_bf16.hpp @@ -0,0 +1,2846 @@ +/* +* Copyright 1993-2022 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO LICENSEE: +* +* This source code and/or documentation ("Licensed Deliverables") are +* subject to NVIDIA intellectual property rights under U.S. and +* international Copyright laws. +* +* These Licensed Deliverables contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and +* conditions of a form of NVIDIA software license agreement by and +* between NVIDIA and Licensee ("License Agreement") or electronically +* accepted by Licensee. Notwithstanding any terms or conditions to +* the contrary in the License Agreement, reproduction or disclosure +* of the Licensed Deliverables to any third party without the express +* written consent of NVIDIA is prohibited. +* +* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE +* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE +* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS +* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. +* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED +* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, +* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE +* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY +* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY +* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +* OF THESE LICENSED DELIVERABLES. +* +* U.S. Government End Users. These Licensed Deliverables are a +* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT +* 1995), consisting of "commercial computer software" and "commercial +* computer software documentation" as such terms are used in 48 +* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government +* only as a commercial end item. Consistent with 48 C.F.R.12.212 and +* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all +* U.S. Government End Users acquire the Licensed Deliverables with +* only those rights set forth herein. +* +* Any use of the Licensed Deliverables in individual and commercial +* software must include, in the user documentation and internal +* comments to the code, the above Disclaimer and U.S. Government End +* Users Notice. +*/ + +#if !defined(__CUDA_BF16_HPP__) +#define __CUDA_BF16_HPP__ + +#if !defined(__CUDA_BF16_H__) +#error "Do not include this file directly. Instead, include cuda_bf16.h." +#endif + +#if !defined(_MSC_VER) && __cplusplus >= 201103L +# define __CPP_VERSION_AT_LEAST_11_BF16 +#elif _MSC_FULL_VER >= 190024210 && _MSVC_LANG >= 201103L +# define __CPP_VERSION_AT_LEAST_11_BF16 +#endif + +/* C++11 header for std::move. + * In RTC mode, std::move is provided implicitly; don't include the header + */ +#if defined(__CPP_VERSION_AT_LEAST_11_BF16) && !defined(__CUDACC_RTC__) +#include +#endif /* defined(__CPP_VERSION_AT_LEAST_11_BF16) && !defined(__CUDACC_RTC__) */ + +/* C++ header for std::memcpy (used for type punning in host-side implementations). + * When compiling as a CUDA source file memcpy is provided implicitly. + * !defined(__CUDACC__) implies !defined(__CUDACC_RTC__). + */ +#if defined(__cplusplus) && !defined(__CUDACC__) +#include +#endif /* defined(__cplusplus) && !defined(__CUDACC__) */ + +// implicitly provided by NVRTC +#if !defined(__CUDACC_RTC__) +#include +#endif /* !defined(__CUDACC_RTC__) */ + +#if !defined(IF_DEVICE_OR_CUDACC) +#if defined(__CUDACC__) + #define IF_DEVICE_OR_CUDACC(d, c, f) NV_IF_ELSE_TARGET(NV_IS_DEVICE, d, c) +#else + #define IF_DEVICE_OR_CUDACC(d, c, f) NV_IF_ELSE_TARGET(NV_IS_DEVICE, d, f) +#endif +#endif + +/* Set up function decorations */ +#if defined(__CUDACC__) +#define __CUDA_BF16_DECL__ static __device__ __inline__ +#define __CUDA_HOSTDEVICE_BF16_DECL__ static __host__ __device__ __inline__ +#define __VECTOR_FUNCTIONS_DECL__ static __inline__ __host__ __device__ +#define __CUDA_HOSTDEVICE__ __host__ __device__ +#else /* !defined(__CUDACC__) */ +#if defined(__GNUC__) +#define __CUDA_HOSTDEVICE_BF16_DECL__ static __attribute__ ((unused)) +#else +#define __CUDA_HOSTDEVICE_BF16_DECL__ static +#endif /* defined(__GNUC__) */ +#define __CUDA_HOSTDEVICE__ +#endif /* defined(__CUDACC_) */ + +/* Set up structure-alignment attribute */ +#if defined(__CUDACC__) +#define __CUDA_ALIGN__(align) __align__(align) +#else +/* Define alignment macro based on compiler type (cannot assume C11 "_Alignas" is available) */ +#if defined(__CPP_VERSION_AT_LEAST_11_BF16) +#define __CUDA_ALIGN__(n) alignas(n) /* C++11 kindly gives us a keyword for this */ +#else /* defined(__CPP_VERSION_AT_LEAST_11_BF16)*/ +#if defined(__GNUC__) +#define __CUDA_ALIGN__(n) __attribute__ ((aligned(n))) +#elif defined(_MSC_VER) +#define __CUDA_ALIGN__(n) __declspec(align(n)) +#else +#define __CUDA_ALIGN__(n) +#endif /* defined(__GNUC__) */ +#endif /* defined(__CPP_VERSION_AT_LEAST_11_BF16) */ +#endif /* defined(__CUDACC__) */ + +/* Macros to allow nv_bfloat16 & nv_bfloat162 to be used by inline assembly */ +#define __BFLOAT16_TO_US(var) *(reinterpret_cast(&(var))) +#define __BFLOAT16_TO_CUS(var) *(reinterpret_cast(&(var))) +#define __BFLOAT162_TO_UI(var) *(reinterpret_cast(&(var))) +#define __BFLOAT162_TO_CUI(var) *(reinterpret_cast(&(var))) + +/** +* Types which allow static initialization of "nv_bfloat16" and "nv_bfloat162" until +* these become an actual builtin. Note this initialization is as a +* bitfield representation of "nv_bfloat16", and not a conversion from short->nv_bfloat16. +* Such a representation will be deprecated in a future version of CUDA. +* (Note these are visible to non-nvcc compilers, including C-only compilation) +*/ +typedef struct __CUDA_ALIGN__(2) { + unsigned short x; +} __nv_bfloat16_raw; + +typedef struct __CUDA_ALIGN__(4) { + unsigned short x; + unsigned short y; +} __nv_bfloat162_raw; + +/* All other definitions in this file are only visible to C++ compilers */ +#if defined(__cplusplus) + +/* Hide GCC member initialization list warnings because of host/device in-function init requirement */ +#if defined(__GNUC__) +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wstrict-aliasing" +#pragma GCC diagnostic ignored "-Weffc++" +#endif /* __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) */ +#endif /* defined(__GNUC__) */ + +/* class' : multiple assignment operators specified + The class has multiple assignment operators of a single type. This warning is informational */ +#if defined(_MSC_VER) && _MSC_VER >= 1500 +#pragma warning( push ) +#pragma warning( disable:4522 ) +#endif /* defined(__GNUC__) */ + +struct __CUDA_ALIGN__(2) __nv_bfloat16 { +protected: + unsigned short __x; + +public: +#if defined(__CPP_VERSION_AT_LEAST_11_BF16) + __nv_bfloat16() = default; +#else + __CUDA_HOSTDEVICE__ __nv_bfloat16() { } +#endif /* defined(__CPP_VERSION_AT_LEAST_11_BF16) */ + + /* Convert to/from __nv_bfloat16_raw */ + __CUDA_HOSTDEVICE__ __nv_bfloat16(const __nv_bfloat16_raw &hr) : __x(hr.x) { } + __CUDA_HOSTDEVICE__ __nv_bfloat16 &operator=(const __nv_bfloat16_raw &hr) { __x = hr.x; return *this; } + __CUDA_HOSTDEVICE__ volatile __nv_bfloat16 &operator=(const __nv_bfloat16_raw &hr) volatile { __x = hr.x; return *this; } + __CUDA_HOSTDEVICE__ volatile __nv_bfloat16 &operator=(const volatile __nv_bfloat16_raw &hr) volatile { __x = hr.x; return *this; } + __CUDA_HOSTDEVICE__ operator __nv_bfloat16_raw() const { __nv_bfloat16_raw ret; ret.x = __x; return ret; } + __CUDA_HOSTDEVICE__ operator __nv_bfloat16_raw() const volatile { __nv_bfloat16_raw ret; ret.x = __x; return ret; } + +#if !defined(__CUDA_NO_BFLOAT16_CONVERSIONS__) + /* Construct from float/double */ + __CUDA_HOSTDEVICE__ __nv_bfloat16(const float f) { __x = __float2bfloat16(f).__x; } + __CUDA_HOSTDEVICE__ __nv_bfloat16(const double f) { __x = __double2bfloat16(f).__x; } + + __CUDA_HOSTDEVICE__ operator float() const { return __bfloat162float(*this); } + __CUDA_HOSTDEVICE__ __nv_bfloat16 &operator=(const float f) { __x = __float2bfloat16(f).__x; return *this; } + + /* We omit "cast to double" operator, so as to not be ambiguous about up-cast */ + __CUDA_HOSTDEVICE__ __nv_bfloat16 &operator=(const double f) { __x = __double2bfloat16(f).__x; return *this; } + +/* Member functions only available to nvcc compilation so far */ +#if (defined(__CUDACC__) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800))) || defined(_NVHPC_CUDA) + /* Allow automatic construction from types supported natively in hardware */ + /* Note we do avoid constructor init-list because of special host/device compilation rules */ + __CUDA_HOSTDEVICE__ __nv_bfloat16(short val) { __x = __short2bfloat16_rn(val).__x; } + __CUDA_HOSTDEVICE__ __nv_bfloat16(unsigned short val) { __x = __ushort2bfloat16_rn(val).__x; } + __CUDA_HOSTDEVICE__ __nv_bfloat16(int val) { __x = __int2bfloat16_rn(val).__x; } + __CUDA_HOSTDEVICE__ __nv_bfloat16(unsigned int val) { __x = __uint2bfloat16_rn(val).__x; } + __CUDA_HOSTDEVICE__ __nv_bfloat16(long long val) { __x = __ll2bfloat16_rn(val).__x; } + __CUDA_HOSTDEVICE__ __nv_bfloat16(unsigned long long val) { __x = __ull2bfloat16_rn(val).__x; } + + /* Allow automatic casts to supported builtin types, matching all that are permitted with float */ + __CUDA_HOSTDEVICE__ operator short() const { return __bfloat162short_rz(*this); } + __CUDA_HOSTDEVICE__ __nv_bfloat16 &operator=(short val) { __x = __short2bfloat16_rn(val).__x; return *this; } + + __CUDA_HOSTDEVICE__ operator unsigned short() const { return __bfloat162ushort_rz(*this); } + __CUDA_HOSTDEVICE__ __nv_bfloat16 &operator=(unsigned short val) { __x = __ushort2bfloat16_rn(val).__x; return *this; } + + __CUDA_HOSTDEVICE__ operator int() const { return __bfloat162int_rz(*this); } + __CUDA_HOSTDEVICE__ __nv_bfloat16 &operator=(int val) { __x = __int2bfloat16_rn(val).__x; return *this; } + + __CUDA_HOSTDEVICE__ operator unsigned int() const { return __bfloat162uint_rz(*this); } + __CUDA_HOSTDEVICE__ __nv_bfloat16 &operator=(unsigned int val) { __x = __uint2bfloat16_rn(val).__x; return *this; } + + __CUDA_HOSTDEVICE__ operator long long() const { return __bfloat162ll_rz(*this); } + __CUDA_HOSTDEVICE__ __nv_bfloat16 &operator=(long long val) { __x = __ll2bfloat16_rn(val).__x; return *this; } + + __CUDA_HOSTDEVICE__ operator unsigned long long() const { return __bfloat162ull_rz(*this); } + __CUDA_HOSTDEVICE__ __nv_bfloat16 &operator=(unsigned long long val) { __x = __ull2bfloat16_rn(val).__x; return *this; } + + /* Boolean conversion - note both 0 and -0 must return false */ + __CUDA_HOSTDEVICE__ operator bool() const { return (__x & 0x7FFF) != 0; } +#endif /* (defined(__CUDACC__) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800))) || defined(_NVHPC_CUDA) */ +#endif /* !defined(__CUDA_NO_BFLOAT16_CONVERSIONS__) */ +}; + +/* Global-space operator functions are only available to nvcc compilation */ +#if defined(__CUDACC__) || defined(_NVHPC_CUDA) + +#if (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800)) || defined(_NVHPC_CUDA) +#if !defined(__CUDA_NO_BFLOAT16_OPERATORS__) +/* Some basic arithmetic operations expected of a builtin */ +__device__ __forceinline__ __nv_bfloat16 operator+(const __nv_bfloat16 &lh, const __nv_bfloat16 &rh) { return __hadd(lh, rh); } +__device__ __forceinline__ __nv_bfloat16 operator-(const __nv_bfloat16 &lh, const __nv_bfloat16 &rh) { return __hsub(lh, rh); } +__device__ __forceinline__ __nv_bfloat16 operator*(const __nv_bfloat16 &lh, const __nv_bfloat16 &rh) { return __hmul(lh, rh); } +__device__ __forceinline__ __nv_bfloat16 operator/(const __nv_bfloat16 &lh, const __nv_bfloat16 &rh) { return __hdiv(lh, rh); } + +__device__ __forceinline__ __nv_bfloat16 &operator+=(__nv_bfloat16 &lh, const __nv_bfloat16 &rh) { lh = __hadd(lh, rh); return lh; } +__device__ __forceinline__ __nv_bfloat16 &operator-=(__nv_bfloat16 &lh, const __nv_bfloat16 &rh) { lh = __hsub(lh, rh); return lh; } +__device__ __forceinline__ __nv_bfloat16 &operator*=(__nv_bfloat16 &lh, const __nv_bfloat16 &rh) { lh = __hmul(lh, rh); return lh; } +__device__ __forceinline__ __nv_bfloat16 &operator/=(__nv_bfloat16 &lh, const __nv_bfloat16 &rh) { lh = __hdiv(lh, rh); return lh; } + +/* Note for increment and decrement we use the raw value 0x3F80 equating to nv_bfloat16(1.0f), to avoid the extra conversion */ +__device__ __forceinline__ __nv_bfloat16 &operator++(__nv_bfloat16 &h) { __nv_bfloat16_raw one; one.x = 0x3F80; h += one; return h; } +__device__ __forceinline__ __nv_bfloat16 &operator--(__nv_bfloat16 &h) { __nv_bfloat16_raw one; one.x = 0x3F80; h -= one; return h; } +__device__ __forceinline__ __nv_bfloat16 operator++(__nv_bfloat16 &h, const int ignored) +{ + // ignored on purpose. Parameter only needed to distinguish the function declaration from other types of operators. + static_cast(ignored); + + const __nv_bfloat16 ret = h; + __nv_bfloat16_raw one; + one.x = 0x3F80; + h += one; + return ret; +} +__device__ __forceinline__ __nv_bfloat16 operator--(__nv_bfloat16 &h, const int ignored) +{ + // ignored on purpose. Parameter only needed to distinguish the function declaration from other types of operators. + static_cast(ignored); + + const __nv_bfloat16 ret = h; + __nv_bfloat16_raw one; + one.x = 0x3F80; + h -= one; + return ret; +} +/* Unary plus and inverse operators */ +__device__ __forceinline__ __nv_bfloat16 operator+(const __nv_bfloat16 &h) { return h; } +__device__ __forceinline__ __nv_bfloat16 operator-(const __nv_bfloat16 &h) { return __hneg(h); } + +/* Some basic comparison operations to make it look like a builtin */ +__device__ __forceinline__ bool operator==(const __nv_bfloat16 &lh, const __nv_bfloat16 &rh) { return __heq(lh, rh); } +__device__ __forceinline__ bool operator!=(const __nv_bfloat16 &lh, const __nv_bfloat16 &rh) { return __hneu(lh, rh); } +__device__ __forceinline__ bool operator> (const __nv_bfloat16 &lh, const __nv_bfloat16 &rh) { return __hgt(lh, rh); } +__device__ __forceinline__ bool operator< (const __nv_bfloat16 &lh, const __nv_bfloat16 &rh) { return __hlt(lh, rh); } +__device__ __forceinline__ bool operator>=(const __nv_bfloat16 &lh, const __nv_bfloat16 &rh) { return __hge(lh, rh); } +__device__ __forceinline__ bool operator<=(const __nv_bfloat16 &lh, const __nv_bfloat16 &rh) { return __hle(lh, rh); } +#endif /* !defined(__CUDA_NO_BFLOAT16_OPERATORS__) */ +#endif /* (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800)) || defined(_NVHPC_CUDA) */ +#endif /* defined(__CUDACC__) */ + +/* __nv_bfloat162 is visible to non-nvcc host compilers */ +struct __CUDA_ALIGN__(4) __nv_bfloat162 { + __nv_bfloat16 x; + __nv_bfloat16 y; + + // All construct/copy/assign/move +public: +#if defined(__CPP_VERSION_AT_LEAST_11_BF16) + __nv_bfloat162() = default; + __CUDA_HOSTDEVICE__ __nv_bfloat162(__nv_bfloat162 &&src) { __BFLOAT162_TO_UI(*this) = std::move(__BFLOAT162_TO_CUI(src)); } + __CUDA_HOSTDEVICE__ __nv_bfloat162 &operator=(__nv_bfloat162 &&src) { __BFLOAT162_TO_UI(*this) = std::move(__BFLOAT162_TO_CUI(src)); return *this; } +#else + __CUDA_HOSTDEVICE__ __nv_bfloat162() { } +#endif /* defined(__CPP_VERSION_AT_LEAST_11_BF16) */ + __CUDA_HOSTDEVICE__ __nv_bfloat162(const __nv_bfloat16 &a, const __nv_bfloat16 &b) : x(a), y(b) { } + __CUDA_HOSTDEVICE__ __nv_bfloat162(const __nv_bfloat162 &src) { __BFLOAT162_TO_UI(*this) = __BFLOAT162_TO_CUI(src); } + __CUDA_HOSTDEVICE__ __nv_bfloat162 &operator=(const __nv_bfloat162 &src) { __BFLOAT162_TO_UI(*this) = __BFLOAT162_TO_CUI(src); return *this; } + + /* Convert to/from __nv_bfloat162_raw */ + __CUDA_HOSTDEVICE__ __nv_bfloat162(const __nv_bfloat162_raw &h2r ) { __BFLOAT162_TO_UI(*this) = __BFLOAT162_TO_CUI(h2r); } + __CUDA_HOSTDEVICE__ __nv_bfloat162 &operator=(const __nv_bfloat162_raw &h2r) { __BFLOAT162_TO_UI(*this) = __BFLOAT162_TO_CUI(h2r); return *this; } + __CUDA_HOSTDEVICE__ operator __nv_bfloat162_raw() const { __nv_bfloat162_raw ret; ret.x = 0U; ret.y = 0U; __BFLOAT162_TO_UI(ret) = __BFLOAT162_TO_CUI(*this); return ret; } +}; + +/* Global-space operator functions are only available to nvcc compilation */ +#if defined(__CUDACC__) + +#if ((!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800)) && !defined(__CUDA_NO_BFLOAT162_OPERATORS__)) || defined(_NVHPC_CUDA) + +__device__ __forceinline__ __nv_bfloat162 operator+(const __nv_bfloat162 &lh, const __nv_bfloat162 &rh) { return __hadd2(lh, rh); } +__device__ __forceinline__ __nv_bfloat162 operator-(const __nv_bfloat162 &lh, const __nv_bfloat162 &rh) { return __hsub2(lh, rh); } +__device__ __forceinline__ __nv_bfloat162 operator*(const __nv_bfloat162 &lh, const __nv_bfloat162 &rh) { return __hmul2(lh, rh); } +__device__ __forceinline__ __nv_bfloat162 operator/(const __nv_bfloat162 &lh, const __nv_bfloat162 &rh) { return __h2div(lh, rh); } + +__device__ __forceinline__ __nv_bfloat162& operator+=(__nv_bfloat162 &lh, const __nv_bfloat162 &rh) { lh = __hadd2(lh, rh); return lh; } +__device__ __forceinline__ __nv_bfloat162& operator-=(__nv_bfloat162 &lh, const __nv_bfloat162 &rh) { lh = __hsub2(lh, rh); return lh; } +__device__ __forceinline__ __nv_bfloat162& operator*=(__nv_bfloat162 &lh, const __nv_bfloat162 &rh) { lh = __hmul2(lh, rh); return lh; } +__device__ __forceinline__ __nv_bfloat162& operator/=(__nv_bfloat162 &lh, const __nv_bfloat162 &rh) { lh = __h2div(lh, rh); return lh; } + +__device__ __forceinline__ __nv_bfloat162 &operator++(__nv_bfloat162 &h) { __nv_bfloat162_raw one; one.x = 0x3F80; one.y = 0x3F80; h = __hadd2(h, one); return h; } +__device__ __forceinline__ __nv_bfloat162 &operator--(__nv_bfloat162 &h) { __nv_bfloat162_raw one; one.x = 0x3F80; one.y = 0x3F80; h = __hsub2(h, one); return h; } +__device__ __forceinline__ __nv_bfloat162 operator++(__nv_bfloat162 &h, const int ignored) +{ + // ignored on purpose. Parameter only needed to distinguish the function declaration from other types of operators. + static_cast(ignored); + + const __nv_bfloat162 ret = h; + __nv_bfloat162_raw one; + one.x = 0x3F80; + one.y = 0x3F80; + h = __hadd2(h, one); + return ret; +} +__device__ __forceinline__ __nv_bfloat162 operator--(__nv_bfloat162 &h, const int ignored) +{ + // ignored on purpose. Parameter only needed to distinguish the function declaration from other types of operators. + static_cast(ignored); + + const __nv_bfloat162 ret = h; + __nv_bfloat162_raw one; + one.x = 0x3F80; + one.y = 0x3F80; + h = __hsub2(h, one); + return ret; +} +__device__ __forceinline__ __nv_bfloat162 operator+(const __nv_bfloat162 &h) { return h; } +__device__ __forceinline__ __nv_bfloat162 operator-(const __nv_bfloat162 &h) { return __hneg2(h); } + +__device__ __forceinline__ bool operator==(const __nv_bfloat162 &lh, const __nv_bfloat162 &rh) { return __hbeq2(lh, rh); } +__device__ __forceinline__ bool operator!=(const __nv_bfloat162 &lh, const __nv_bfloat162 &rh) { return __hbneu2(lh, rh); } +__device__ __forceinline__ bool operator>(const __nv_bfloat162 &lh, const __nv_bfloat162 &rh) { return __hbgt2(lh, rh); } +__device__ __forceinline__ bool operator<(const __nv_bfloat162 &lh, const __nv_bfloat162 &rh) { return __hblt2(lh, rh); } +__device__ __forceinline__ bool operator>=(const __nv_bfloat162 &lh, const __nv_bfloat162 &rh) { return __hbge2(lh, rh); } +__device__ __forceinline__ bool operator<=(const __nv_bfloat162 &lh, const __nv_bfloat162 &rh) { return __hble2(lh, rh); } + +#endif /* ((!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800)) && !defined(__CUDA_NO_BFLOAT162_OPERATORS__)) || defined(_NVHPC_CUDA) */ +#endif /* defined(__CUDACC__) */ + +/* Restore warning for multiple assignment operators */ +#if defined(_MSC_VER) && _MSC_VER >= 1500 +#pragma warning( pop ) +#endif /* defined(_MSC_VER) && _MSC_VER >= 1500 */ + +/* Restore -Weffc++ warnings from here on */ +#if defined(__GNUC__) +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic pop +#endif /* __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) */ +#endif /* defined(__GNUC__) */ + +#undef __CUDA_HOSTDEVICE__ +#undef __CUDA_ALIGN__ + +__CUDA_HOSTDEVICE_BF16_DECL__ unsigned int __internal_float_as_uint(const float f) +{ + unsigned int u; +IF_DEVICE_OR_CUDACC( + u = __float_as_uint(f); +, + memcpy(&u, &f, sizeof(f)); +, + std::memcpy(&u, &f, sizeof(f)); +) + return u; +} + +__CUDA_HOSTDEVICE_BF16_DECL__ float __internal_uint_as_float(unsigned int u) +{ + float f; +IF_DEVICE_OR_CUDACC( + f = __uint_as_float(u); +, + memcpy(&f, &u, sizeof(u)); +, + std::memcpy(&f, &u, sizeof(u)); +) + return f; +} + +__CUDA_HOSTDEVICE_BF16_DECL__ unsigned short __internal_float2bfloat16(const float f, unsigned int &sign, unsigned int &remainder) +{ + unsigned int x; + + x = __internal_float_as_uint(f); + + if ((x & 0x7fffffffU) > 0x7f800000U) { + sign = 0U; + remainder = 0U; + return static_cast(0x7fffU); + } + sign = x >> 31U; + remainder = x << 16U; + return static_cast(x >> 16U); +} + +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __double2bfloat16(const double x) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("{ cvt.rn.bf16.f64 %0, %1;}\n" : "=h"(__BFLOAT16_TO_US(val)) : "d"(x)); + return val; +, + float f = static_cast(x); + const double d = static_cast(f); + unsigned int u = __internal_float_as_uint(f); + + bool x_is_not_nan = ((u << (unsigned)1U) <= (unsigned)0xFF000000U); + + + if ((x > 0.0) && (d > x)) { + u--; + } + if ((x < 0.0) && (d < x)) { + u--; + } + if ((d != x) && x_is_not_nan) { + u |= 1U; + } + + f = __internal_uint_as_float(u); + + return __float2bfloat16(f); +) +} + +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __float2bfloat16(const float a) +{ + __nv_bfloat16 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80, + asm("{ cvt.rn.bf16.f32 %0, %1;}\n" : "=h"(__BFLOAT16_TO_US(val)) : "f"(a)); +, + __nv_bfloat16_raw r; + unsigned int sign = 0U; + unsigned int remainder = 0U; + r.x = __internal_float2bfloat16(a, sign, remainder); + if ((remainder > 0x80000000U) || ((remainder == 0x80000000U) && ((r.x & 0x1U) != 0U))) { + r.x++; + } + val = r; +) + return val; +} +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __float2bfloat16_rn(const float a) +{ + __nv_bfloat16 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80, + asm("{ cvt.rn.bf16.f32 %0, %1;}\n" : "=h"(__BFLOAT16_TO_US(val)) : "f"(a)); +, + __nv_bfloat16_raw r; + unsigned int sign = 0U; + unsigned int remainder = 0U; + r.x = __internal_float2bfloat16(a, sign, remainder); + if ((remainder > 0x80000000U) || ((remainder == 0x80000000U) && ((r.x & 0x1U) != 0U))) { + r.x++; + } + val = r; +) + return val; +} +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __float2bfloat16_rz(const float a) +{ + __nv_bfloat16 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80, + asm("{ cvt.rz.bf16.f32 %0, %1;}\n" : "=h"(__BFLOAT16_TO_US(val)) : "f"(a)); +, + __nv_bfloat16_raw r; + unsigned int sign = 0U; + unsigned int remainder = 0U; + r.x = __internal_float2bfloat16(a, sign, remainder); + val = r; +) + return val; +} +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __float2bfloat16_rd(const float a) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("{ cvt.rm.bf16.f32 %0, %1;}\n" : "=h"(__BFLOAT16_TO_US(val)) : "f"(a)); + return val; +, + __nv_bfloat16 val; + __nv_bfloat16_raw r; + unsigned int sign = 0U; + unsigned int remainder = 0U; + r.x = __internal_float2bfloat16(a, sign, remainder); + if ((remainder != 0U) && (sign != 0U)) { + r.x++; + } + val = r; + return val; +) +} +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __float2bfloat16_ru(const float a) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("{ cvt.rp.bf16.f32 %0, %1;}\n" : "=h"(__BFLOAT16_TO_US(val)) : "f"(a)); + return val; +, + __nv_bfloat16 val; + __nv_bfloat16_raw r; + unsigned int sign = 0U; + unsigned int remainder = 0U; + r.x = __internal_float2bfloat16(a, sign, remainder); + if ((remainder != 0U) && (sign == 0U)) { + r.x++; + } + val = r; + return val; +) +} +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat162 __float2bfloat162_rn(const float a) +{ + __nv_bfloat162 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80, + asm("{.reg .b16 low;\n" + " cvt.rn.bf16.f32 low, %1;\n" + " mov.b32 %0, {low,low};}\n" : "=r"(__BFLOAT162_TO_UI(val)) : "f"(a)); +, + val = __nv_bfloat162(__float2bfloat16_rn(a), __float2bfloat16_rn(a)); +) + return val; +} +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat162 __floats2bfloat162_rn(const float a, const float b) +{ + __nv_bfloat162 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80, + asm("{ cvt.rn.bf16x2.f32 %0, %2, %1;}\n" + : "=r"(__BFLOAT162_TO_UI(val)) : "f"(a), "f"(b)); +, + val = __nv_bfloat162(__float2bfloat16_rn(a), __float2bfloat16_rn(b)); +) + return val; +} + +#if (defined __CUDACC__) +__CUDA_BF16_DECL__ float __internal_device_bfloat162float(const unsigned short h) +{ + float f; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm("{ cvt.f32.bf16 %0, %1;}\n" : "=f"(f) : "h"(h)); +, + asm("{ mov.b32 %0, {0,%1};}\n" : "=f"(f) : "h"(h)); +) + return f; +} +#endif /* (defined __CUDACC__) */ + +__CUDA_HOSTDEVICE_BF16_DECL__ float __internal_bfloat162float(const unsigned short h) +{ + float f; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + f = __internal_device_bfloat162float(h); +, + unsigned int u = static_cast(h) << 16; + f = __internal_uint_as_float(u); +) + return f; +} + +__CUDA_HOSTDEVICE_BF16_DECL__ float __bfloat162float(const __nv_bfloat16 a) +{ + return __internal_bfloat162float(static_cast<__nv_bfloat16_raw>(a).x); +} +__CUDA_HOSTDEVICE_BF16_DECL__ float __low2float(const __nv_bfloat162 a) +{ + return __internal_bfloat162float(static_cast<__nv_bfloat162_raw>(a).x); +} + +__CUDA_HOSTDEVICE_BF16_DECL__ float __high2float(const __nv_bfloat162 a) +{ + return __internal_bfloat162float(static_cast<__nv_bfloat162_raw>(a).y); +} + +#if (defined(__CUDACC__) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800))) || defined(_NVHPC_CUDA) + +/* CUDA vector-types compatible vector creation function (note returns __nv_bfloat162, not nv_bfloat162) */ +__VECTOR_FUNCTIONS_DECL__ __nv_bfloat162 make_bfloat162(const __nv_bfloat16 x, const __nv_bfloat16 y) +{ + __nv_bfloat162 t; t.x = x; t.y = y; return t; +} +#undef __VECTOR_FUNCTIONS_DECL__ + + +/* Definitions of intrinsics */ +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat162 __float22bfloat162_rn(const float2 a) +{ + __nv_bfloat162 val = __floats2bfloat162_rn(a.x, a.y); + return val; +} +__CUDA_HOSTDEVICE_BF16_DECL__ float2 __bfloat1622float2(const __nv_bfloat162 a) +{ + float hi_float; + float lo_float; + lo_float = __internal_bfloat162float(((__nv_bfloat162_raw)a).x); + hi_float = __internal_bfloat162float(((__nv_bfloat162_raw)a).y); + return make_float2(lo_float, hi_float); +} +__CUDA_BF16_DECL__ int __bfloat162int_rn(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + int val; + asm("{ cvt.rni.s32.bf16 %0, %1;}\n" : "=r"(val) : "h"(__BFLOAT16_TO_CUS(h))); + return val; +, + return __float2int_rn(__bfloat162float(h)); +) +} + +__CUDA_HOSTDEVICE_BF16_DECL__ int __internal_bfloat162int_rz(const __nv_bfloat16 h) +{ + const float f = __bfloat162float(h); + int i; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + i = __float2int_rz(f); +, + const int max_val = (int)0x7fffffffU; + const int min_val = (int)0x80000000U; + const unsigned short bits = static_cast(static_cast<__nv_bfloat16_raw>(h).x << 1U); + // saturation fixup + if (bits > (unsigned short)0xFF00U) { + // NaN + i = 0; + } else if (f >= static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + i = static_cast(f); + } +) + return i; +} + +__CUDA_HOSTDEVICE_BF16_DECL__ int __bfloat162int_rz(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + int val; + asm("{ cvt.rzi.s32.bf16 %0, %1;}\n" : "=r"(val) : "h"(__BFLOAT16_TO_CUS(h))); + return val; +, + return __internal_bfloat162int_rz(h); +) +} + +__CUDA_BF16_DECL__ int __bfloat162int_rd(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + int val; + asm("{ cvt.rmi.s32.bf16 %0, %1;}\n" : "=r"(val) : "h"(__BFLOAT16_TO_CUS(h))); + return val; +, + return __float2int_rd(__bfloat162float(h)); +) +} +__CUDA_BF16_DECL__ int __bfloat162int_ru(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + int val; + asm("{ cvt.rpi.s32.bf16 %0, %1;}\n" : "=r"(val) : "h"(__BFLOAT16_TO_CUS(h))); + return val; +, + return __float2int_ru(__bfloat162float(h)); +) +} + +__CUDA_BF16_DECL__ __nv_bfloat16 __internal_device_int2bfloat16_rn(const int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rn.bf16.s32 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "r"(i)); + return val; +, + const float ru = __int2float_ru(i); + const float rd = __int2float_rd(i); + float rz = __int2float_rz(i); + if (ru != rd) { + rz = __uint_as_float(__float_as_uint(rz) | 1U); + } + return __float2bfloat16_rn(rz); +) +} + +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __int2bfloat16_rn(const int i) +{ +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + return __internal_device_int2bfloat16_rn(i); +, + const double d = static_cast(i); + return __double2bfloat16(d); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __int2bfloat16_rz(const int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rz.bf16.s32 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "r"(i)); + return val; +, + return __float2bfloat16_rz(__int2float_rz(i)); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __int2bfloat16_rd(const int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rm.bf16.s32 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "r"(i)); + return val; +, + return __float2bfloat16_rd(__int2float_rd(i)); +) +} + +__CUDA_BF16_DECL__ __nv_bfloat16 __int2bfloat16_ru(const int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rp.bf16.s32 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "r"(i)); + return val; +, + return __float2bfloat16_ru(__int2float_ru(i)); +) +} + +__CUDA_BF16_DECL__ short int __bfloat162short_rn(const __nv_bfloat16 h) +{ + short int val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm("cvt.rni.s16.bf16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +, + asm("{ .reg.f32 f;\n" + " mov.b32 f, {0,%1};\n" + " cvt.rni.s16.f32 %0,f;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +) + return val; +} + +__CUDA_BF16_DECL__ short int __internal_device_bfloat162short_rz(const __nv_bfloat16 h) +{ + short int val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm("cvt.rzi.s16.bf16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +, + asm("{ .reg.f32 f;\n" + " mov.b32 f, {0,%1};\n" + " cvt.rzi.s16.f32 %0,f;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +) + return val; +} + +__CUDA_HOSTDEVICE_BF16_DECL__ short int __bfloat162short_rz(const __nv_bfloat16 h) +{ + short int val; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + val = __internal_device_bfloat162short_rz(h); +, + const float f = __bfloat162float(h); + const short int max_val = (short int)0x7fffU; + const short int min_val = (short int)0x8000U; + const unsigned short bits = static_cast(static_cast<__nv_bfloat16_raw>(h).x << 1U); + // saturation fixup + if (bits > (unsigned short)0xFF00U) { + // NaN + val = 0; + } else if (f > static_cast(max_val)) { + // saturate maximum + val = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + val = min_val; + } else { + val = static_cast(f); + } +) + return val; +} + +__CUDA_BF16_DECL__ short int __bfloat162short_rd(const __nv_bfloat16 h) +{ + short int val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm("cvt.rmi.s16.bf16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +, + asm("{ .reg.f32 f;\n" + " mov.b32 f, {0,%1};\n" + " cvt.rmi.s16.f32 %0,f;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +) + return val; +} +__CUDA_BF16_DECL__ short int __bfloat162short_ru(const __nv_bfloat16 h) +{ + short int val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm("cvt.rpi.s16.bf16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +, + asm("{ .reg.f32 f;\n" + " mov.b32 f, {0,%1};\n" + " cvt.rpi.s16.f32 %0,f;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +) + return val; +} +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __short2bfloat16_rn(const short int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rn.bf16.s16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(i)); + return val; +, + const float f = static_cast(i); + return __float2bfloat16_rn(f); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __short2bfloat16_rz(const short int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rz.bf16.s16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(i)); + return val; +, + return __float2bfloat16_rz(__int2float_rz(static_cast(i))); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __short2bfloat16_rd(const short int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rm.bf16.s16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(i)); + return val; +, + return __float2bfloat16_rd(__int2float_rd(static_cast(i))); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __short2bfloat16_ru(const short int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rp.bf16.s16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(i)); + return val; +, + return __float2bfloat16_ru(__int2float_ru(static_cast(i))); +) +} + +__CUDA_BF16_DECL__ unsigned int __bfloat162uint_rn(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + unsigned int val; + asm("{ cvt.rni.u32.bf16 %0, %1;}\n" : "=r"(val) : "h"(__BFLOAT16_TO_CUS(h))); + return val; +, + return __float2uint_rn(__bfloat162float(h)); +) +} + +__CUDA_HOSTDEVICE_BF16_DECL__ unsigned int __internal_bfloat162uint_rz(const __nv_bfloat16 h) +{ + const float f = __bfloat162float(h); + unsigned int i; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + i = __float2uint_rz(f); +, + const unsigned int max_val = 0xffffffffU; + const unsigned int min_val = 0U; + const unsigned short bits = static_cast(static_cast<__nv_bfloat16_raw>(h).x << 1U); + // saturation fixup + if (bits > (unsigned short)0xFF00U) { + // NaN + i = 0U; + } else if (f >= static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + i = static_cast(f); + } +) + return i; +} + +__CUDA_HOSTDEVICE_BF16_DECL__ unsigned int __bfloat162uint_rz(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + unsigned int val; + asm("{ cvt.rzi.u32.bf16 %0, %1;}\n" : "=r"(val) : "h"(__BFLOAT16_TO_CUS(h))); + return val; +, + return __internal_bfloat162uint_rz(h); +) +} +__CUDA_BF16_DECL__ unsigned int __bfloat162uint_rd(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + unsigned int val; + asm("{ cvt.rmi.u32.bf16 %0, %1;}\n" : "=r"(val) : "h"(__BFLOAT16_TO_CUS(h))); + return val; +, + return __float2uint_rd(__bfloat162float(h)); +) +} +__CUDA_BF16_DECL__ unsigned int __bfloat162uint_ru(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + unsigned int val; + asm("{ cvt.rpi.u32.bf16 %0, %1;}\n" : "=r"(val) : "h"(__BFLOAT16_TO_CUS(h))); + return val; +, + return __float2uint_ru(__bfloat162float(h)); +) +} + +__CUDA_BF16_DECL__ __nv_bfloat16 __internal_device_uint2bfloat16_rn(const unsigned int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rn.bf16.u32 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "r"(i)); + return val; +, + const float ru = __uint2float_ru(i); + const float rd = __uint2float_rd(i); + float rz = __uint2float_rz(i); + if (ru != rd) { + rz = __uint_as_float(__float_as_uint(rz) | 1U); + } + return __float2bfloat16_rn(rz); +) +} + +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __uint2bfloat16_rn(const unsigned int i) +{ +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + return __internal_device_uint2bfloat16_rn(i); +, + const double d = static_cast(i); + return __double2bfloat16(d); +) +} + +__CUDA_BF16_DECL__ __nv_bfloat16 __uint2bfloat16_rz(const unsigned int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rz.bf16.u32 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "r"(i)); + return val; +, + return __float2bfloat16_rz(__uint2float_rz(i)); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __uint2bfloat16_rd(const unsigned int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rm.bf16.u32 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "r"(i)); + return val; +, + return __float2bfloat16_rd(__uint2float_rd(i)); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __uint2bfloat16_ru(const unsigned int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rp.bf16.u32 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "r"(i)); + return val; +, + return __float2bfloat16_ru(__uint2float_ru(i)); +) +} + +__CUDA_BF16_DECL__ unsigned short int __bfloat162ushort_rn(const __nv_bfloat16 h) +{ + unsigned short int val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm("cvt.rni.u16.bf16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +, + asm("{ .reg.f32 f;\n" + " mov.b32 f, {0,%1};\n" + " cvt.rni.u16.f32 %0,f;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +) + return val; +} + +__CUDA_BF16_DECL__ unsigned short int __internal_device_bfloat162ushort_rz(const __nv_bfloat16 h) +{ + unsigned short int val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm("cvt.rzi.u16.bf16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +, + asm("{ .reg.f32 f;\n" + " mov.b32 f, {0,%1};\n" + " cvt.rzi.u16.f32 %0,f;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +) + return val; +} + +__CUDA_HOSTDEVICE_BF16_DECL__ unsigned short int __bfloat162ushort_rz(const __nv_bfloat16 h) +{ + unsigned short int val; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + val = __internal_device_bfloat162ushort_rz(h); +, + const float f = __bfloat162float(h); + const unsigned short int max_val = 0xffffU; + const unsigned short int min_val = 0U; + const unsigned short bits = static_cast(static_cast<__nv_bfloat16_raw>(h).x << 1U); + // saturation fixup + if (bits > (unsigned short)0xFF00U) { + // NaN + val = 0U; + } else if (f > static_cast(max_val)) { + // saturate maximum + val = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + val = min_val; + } else { + val = static_cast(f); + } +) + return val; +} +__CUDA_BF16_DECL__ unsigned short int __bfloat162ushort_rd(const __nv_bfloat16 h) +{ + unsigned short int val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm("cvt.rmi.u16.bf16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +, + asm("{ .reg.f32 f;\n" + " mov.b32 f, {0,%1};\n" + " cvt.rmi.u16.f32 %0,f;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +) + return val; +} +__CUDA_BF16_DECL__ unsigned short int __bfloat162ushort_ru(const __nv_bfloat16 h) +{ + unsigned short int val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm("cvt.rpi.u16.bf16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +, + asm("{ .reg.f32 f;\n" + " mov.b32 f, {0,%1};\n" + " cvt.rpi.u16.f32 %0,f;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(h))); +) + return val; +} +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __ushort2bfloat16_rn(const unsigned short int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rn.bf16.u16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(i)); + return val; +, + const float f = static_cast(i); + return __float2bfloat16_rn(f); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ushort2bfloat16_rz(const unsigned short int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rz.bf16.u16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(i)); + return val; +, + return __float2bfloat16_rz(__uint2float_rz(static_cast(i))); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ushort2bfloat16_rd(const unsigned short int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rm.bf16.u16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(i)); + return val; +, + return __float2bfloat16_rd(__uint2float_rd(static_cast(i))); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ushort2bfloat16_ru(const unsigned short int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 val; + asm("cvt.rp.bf16.u16 %0, %1;" : "=h"(__BFLOAT16_TO_US(val)) : "h"(i)); + return val; +, + return __float2bfloat16_ru(__uint2float_ru(static_cast(i))); +) +} + +__CUDA_BF16_DECL__ unsigned long long int __bfloat162ull_rn(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + unsigned long long int i; + asm("cvt.rni.u64.bf16 %0, %1;" : "=l"(i) : "h"(__BFLOAT16_TO_CUS(h))); + return i; +, + return __float2ull_rn(__bfloat162float(h)); +) +} + +__CUDA_BF16_DECL__ unsigned long long int __internal_device_bfloat162ull_rz(const __nv_bfloat16 h) +{ + unsigned long long int i; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm("cvt.rzi.u64.bf16 %0, %1;" : "=l"(i) : "h"(__BFLOAT16_TO_CUS(h))); +, + const float f = __bfloat162float(h); + i = __float2ull_rz(f); +) + return i; +} + +__CUDA_HOSTDEVICE_BF16_DECL__ unsigned long long int __bfloat162ull_rz(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + return __internal_device_bfloat162ull_rz(h); +, + const float f = __bfloat162float(h); + unsigned long long int i; + const unsigned long long int max_val = 0xffffffffffffffffULL; + const unsigned long long int min_val = 0ULL; + const unsigned short bits = static_cast(static_cast<__nv_bfloat16_raw>(h).x << 1U); + // saturation fixup + if (bits > (unsigned short)0xFF00U) { + // NaN + i = 0x8000000000000000ULL; + } else if (f >= static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + i = static_cast(f); + } + return i; +) +} + +__CUDA_BF16_DECL__ unsigned long long int __bfloat162ull_rd(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + unsigned long long int i; + asm("cvt.rmi.u64.bf16 %0, %1;" : "=l"(i) : "h"(__BFLOAT16_TO_CUS(h))); + return i; +, + return __float2ull_rd(__bfloat162float(h)); +) +} +__CUDA_BF16_DECL__ unsigned long long int __bfloat162ull_ru(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + unsigned long long int i; + asm("cvt.rpi.u64.bf16 %0, %1;" : "=l"(i) : "h"(__BFLOAT16_TO_CUS(h))); + return i; +, + return __float2ull_ru(__bfloat162float(h)); +) +} + +__CUDA_BF16_DECL__ __nv_bfloat16 __internal_device_ull2bfloat16_rn(const unsigned long long int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 h; + asm("cvt.rn.bf16.u64 %0, %1;" : "=h"(__BFLOAT16_TO_US(h)) : "l"(i)); + return h; +, + const float ru = __ull2float_ru(i); + const float rd = __ull2float_rd(i); + float rz = __ull2float_rz(i); + if (ru != rd) { + rz = __uint_as_float(__float_as_uint(rz) | 1U); + } + return __float2bfloat16_rn(rz); +) +} + +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __ull2bfloat16_rn(const unsigned long long int i) +{ + +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + return __internal_device_ull2bfloat16_rn(i); +, + float f = static_cast(i); + const unsigned long long int uf = static_cast(f); + unsigned int u = __internal_float_as_uint(f); + // round up happened here + // note: no need to handle round up to f == 0x1.p64 specially + if (uf > i) { + u--; + } + if (uf != i) { + u |= 1U; + } + f = __internal_uint_as_float(u); + return __float2bfloat16_rn(f); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ull2bfloat16_rz(const unsigned long long int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 h; + asm("cvt.rz.bf16.u64 %0, %1;" : "=h"(__BFLOAT16_TO_US(h)) : "l"(i)); + return h; +, + return __float2bfloat16_rz(__ull2float_rz(i)); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ull2bfloat16_rd(const unsigned long long int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 h; + asm("cvt.rm.bf16.u64 %0, %1;" : "=h"(__BFLOAT16_TO_US(h)) : "l"(i)); + return h; +, + return __float2bfloat16_rd(__ull2float_rd(i)); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ull2bfloat16_ru(const unsigned long long int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 h; + asm("cvt.rp.bf16.u64 %0, %1;" : "=h"(__BFLOAT16_TO_US(h)) : "l"(i)); + return h; +, + return __float2bfloat16_ru(__ull2float_ru(i)); +) +} +__CUDA_BF16_DECL__ long long int __bfloat162ll_rn(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + long long int i; + asm("cvt.rni.s64.bf16 %0, %1;" : "=l"(i) : "h"(__BFLOAT16_TO_CUS(h))); + return i; +, + return __float2ll_rn(__bfloat162float(h)); +) +} + +__CUDA_BF16_DECL__ long long int __internal_device_bfloat162ll_rz(const __nv_bfloat16 h) +{ + long long int i; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm("cvt.rzi.s64.bf16 %0, %1;" : "=l"(i) : "h"(__BFLOAT16_TO_CUS(h))); +, + const float f = __bfloat162float(h); + i = __float2ll_rz(f); +) + return i; +} + +__CUDA_HOSTDEVICE_BF16_DECL__ long long int __bfloat162ll_rz(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + return __internal_device_bfloat162ll_rz(h); +, + long long int i; + const float f = __bfloat162float(h); + const long long int max_val = (long long int)0x7fffffffffffffffULL; + const long long int min_val = (long long int)0x8000000000000000ULL; + const unsigned short bits = static_cast(static_cast<__nv_bfloat16_raw>(h).x << 1U); + // saturation fixup + if (bits > (unsigned short)0xFF00U) { + // NaN + i = min_val; + } else if (f >= static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + i = static_cast(f); + } + return i; +) +} +__CUDA_BF16_DECL__ long long int __bfloat162ll_rd(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + long long int i; + asm("cvt.rmi.s64.bf16 %0, %1;" : "=l"(i) : "h"(__BFLOAT16_TO_CUS(h))); + return i; +, + return __float2ll_rd(__bfloat162float(h)); +) +} +__CUDA_BF16_DECL__ long long int __bfloat162ll_ru(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + long long int i; + asm("cvt.rpi.s64.bf16 %0, %1;" : "=l"(i) : "h"(__BFLOAT16_TO_CUS(h))); + return i; +, + return __float2ll_ru(__bfloat162float(h)); +) +} + +__CUDA_BF16_DECL__ __nv_bfloat16 __internal_device_ll2bfloat16_rn(const long long int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 h; + asm("cvt.rn.bf16.s64 %0, %1;" : "=h"(__BFLOAT16_TO_US(h)) : "l"(i)); + return h; +, + const float ru = __ll2float_ru(i); + const float rd = __ll2float_rd(i); + float rz = __ll2float_rz(i); + if (ru != rd) { + rz = __uint_as_float(__float_as_uint(rz) | 1U); + } + return __float2bfloat16_rn(rz); +) +} + +__CUDA_HOSTDEVICE_BF16_DECL__ __nv_bfloat16 __ll2bfloat16_rn(const long long int i) +{ +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + return __internal_device_ll2bfloat16_rn(i); +, + float f = static_cast(i); + const long long int lf = static_cast(f); + unsigned int u = __internal_float_as_uint(f); + + if ((f > 0.0f) && (lf > i)) { + u--; + } + if ((f < 0.0f) && (lf < i)) { + u--; + } + if (lf != i) { + u |= 1U; + } + + f = __internal_uint_as_float(u); + return __float2bfloat16_rn(f); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ll2bfloat16_rz(const long long int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 h; + asm("cvt.rz.bf16.s64 %0, %1;" : "=h"(__BFLOAT16_TO_US(h)) : "l"(i)); + return h; +, + return __float2bfloat16_rz(__ll2float_rz(i)); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ll2bfloat16_rd(const long long int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 h; + asm("cvt.rm.bf16.s64 %0, %1;" : "=h"(__BFLOAT16_TO_US(h)) : "l"(i)); + return h; +, + return __float2bfloat16_rd(__ll2float_rd(i)); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ll2bfloat16_ru(const long long int i) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 h; + asm("cvt.rp.bf16.s64 %0, %1;" : "=h"(__BFLOAT16_TO_US(h)) : "l"(i)); + return h; +, + return __float2bfloat16_ru(__ll2float_ru(i)); +) +} + +__CUDA_BF16_DECL__ __nv_bfloat16 htrunc(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 r; + asm("cvt.rzi.bf16.bf16 %0, %1;" : "=h"(__BFLOAT16_TO_US(r)) : "h"(__BFLOAT16_TO_CUS(h))); + return r; +, + return __float2bfloat16_rz(truncf(__bfloat162float(h))); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 hceil(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 r; + asm("cvt.rpi.bf16.bf16 %0, %1;" : "=h"(__BFLOAT16_TO_US(r)) : "h"(__BFLOAT16_TO_CUS(h))); + return r; +, + return __float2bfloat16_ru(ceilf(__bfloat162float(h))); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 hfloor(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 r; + asm("cvt.rmi.bf16.bf16 %0, %1;" : "=h"(__BFLOAT16_TO_US(r)) : "h"(__BFLOAT16_TO_CUS(h))); + return r; +, + return __float2bfloat16_rd(floorf(__bfloat162float(h))); +) +} +__CUDA_BF16_DECL__ __nv_bfloat16 hrint(const __nv_bfloat16 h) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 r; + asm("cvt.rni.bf16.bf16 %0, %1;" : "=h"(__BFLOAT16_TO_US(r)) : "h"(__BFLOAT16_TO_CUS(h))); + return r; +, + return __float2bfloat16_rn(rintf(__bfloat162float(h))); +) +} + +__CUDA_BF16_DECL__ __nv_bfloat162 h2trunc(const __nv_bfloat162 h) +{ + const __nv_bfloat16 low = htrunc(h.x); + const __nv_bfloat16 high = htrunc(h.y); + return __nv_bfloat162(low, high); +} +__CUDA_BF16_DECL__ __nv_bfloat162 h2ceil(const __nv_bfloat162 h) +{ + const __nv_bfloat16 low = hceil(h.x); + const __nv_bfloat16 high = hceil(h.y); + return __nv_bfloat162(low, high); +} +__CUDA_BF16_DECL__ __nv_bfloat162 h2floor(const __nv_bfloat162 h) +{ + const __nv_bfloat16 low = hfloor(h.x); + const __nv_bfloat16 high = hfloor(h.y); + return __nv_bfloat162(low, high); +} + +__CUDA_BF16_DECL__ __nv_bfloat162 h2rint(const __nv_bfloat162 h) +{ + return __halves2bfloat162(hrint(__low2bfloat16(h)), hrint(__high2bfloat16(h))); +} +__CUDA_BF16_DECL__ __nv_bfloat162 __lows2bfloat162(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; + asm("{.reg .b16 alow,ahigh,blow,bhigh;\n" + " mov.b32 {alow,ahigh}, %1;\n" + " mov.b32 {blow,bhigh}, %2;\n" + " mov.b32 %0, {alow,blow};}\n" : "=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)), "r"(__BFLOAT162_TO_CUI(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __highs2bfloat162(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; + asm("{.reg .b16 alow,ahigh,blow,bhigh;\n" + " mov.b32 {alow,ahigh}, %1;\n" + " mov.b32 {blow,bhigh}, %2;\n" + " mov.b32 %0, {ahigh,bhigh};}\n" : "=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)), "r"(__BFLOAT162_TO_CUI(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __low2bfloat16(const __nv_bfloat162 a) +{ + __nv_bfloat16 ret; + asm("{.reg .b16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " mov.b16 %0, low;}" : "=h"(__BFLOAT16_TO_US(ret)) : "r"(__BFLOAT162_TO_CUI(a))); + return ret; +} +__CUDA_BF16_DECL__ int __hisinf(const __nv_bfloat16 a) +{ + int retval; + if (__BFLOAT16_TO_CUS(a) == 0xFF80U) { + retval = -1; + } else if (__BFLOAT16_TO_CUS(a) == 0x7F80U) { + retval = 1; + } else { + retval = 0; + } + return retval; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __low2bfloat162(const __nv_bfloat162 a) +{ + __nv_bfloat162 val; + asm("{.reg .b16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " mov.b32 %0, {low,low};}\n" : "=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __high2bfloat162(const __nv_bfloat162 a) +{ + __nv_bfloat162 val; + asm("{.reg .b16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " mov.b32 %0, {high,high};}\n" : "=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __high2bfloat16(const __nv_bfloat162 a) +{ + __nv_bfloat16 ret; + asm("{.reg .b16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " mov.b16 %0, high;}" : "=h"(__BFLOAT16_TO_US(ret)) : "r"(__BFLOAT162_TO_CUI(a))); + return ret; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __halves2bfloat162(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat162 val; + asm("{ mov.b32 %0, {%1,%2};}\n" + : "=r"(__BFLOAT162_TO_UI(val)) : "h"(__BFLOAT16_TO_CUS(a)), "h"(__BFLOAT16_TO_CUS(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __bfloat162bfloat162(const __nv_bfloat16 a) +{ + __nv_bfloat162 val; + asm("{ mov.b32 %0, {%1,%1};}\n" + : "=r"(__BFLOAT162_TO_UI(val)) : "h"(__BFLOAT16_TO_CUS(a))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __lowhigh2highlow(const __nv_bfloat162 a) +{ + __nv_bfloat162 val; + asm("{.reg .b16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " mov.b32 %0, {high,low};}\n" : "=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a))); + return val; +} +__CUDA_BF16_DECL__ short int __bfloat16_as_short(const __nv_bfloat16 h) +{ + return static_cast(__BFLOAT16_TO_CUS(h)); +} +__CUDA_BF16_DECL__ unsigned short int __bfloat16_as_ushort(const __nv_bfloat16 h) +{ + return __BFLOAT16_TO_CUS(h); +} +__CUDA_BF16_DECL__ __nv_bfloat16 __short_as_bfloat16(const short int i) +{ + __nv_bfloat16 h; + __BFLOAT16_TO_US(h) = static_cast(i); + return h; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ushort_as_bfloat16(const unsigned short int i) +{ + __nv_bfloat16 h; + __BFLOAT16_TO_US(h) = i; + return h; +} + +/****************************************************************************** +* __nv_bfloat16, __nv_bfloat162 warp shuffle * +******************************************************************************/ +#define __SHUFFLE_SYNC_BFLOAT162_MACRO(name) /* do */ {\ + __nv_bfloat162 r; \ + asm volatile ("{" __CUDA_BF16_STRINGIFY(name) " %0,%1,%2,%3,%4;\n}" \ + :"=r"(__BFLOAT162_TO_UI(r)): "r"(__BFLOAT162_TO_CUI(var)), "r"(delta), "r"(c), "r"(mask)); \ + return r; \ +} /* while(0) */ + +__CUDA_BF16_DECL__ __nv_bfloat162 __shfl_sync(const unsigned mask, const __nv_bfloat162 var, const int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = ((warp_size - static_cast(width)) << 8U) | 0x1fU; + __SHUFFLE_SYNC_BFLOAT162_MACRO(shfl.sync.idx.b32) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __shfl_up_sync(const unsigned mask, const __nv_bfloat162 var, const unsigned int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = (warp_size - static_cast(width)) << 8U; + __SHUFFLE_SYNC_BFLOAT162_MACRO(shfl.sync.up.b32) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __shfl_down_sync(const unsigned mask, const __nv_bfloat162 var, const unsigned int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = ((warp_size - static_cast(width)) << 8U) | 0x1fU; + __SHUFFLE_SYNC_BFLOAT162_MACRO(shfl.sync.down.b32) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __shfl_xor_sync(const unsigned mask, const __nv_bfloat162 var, const int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = ((warp_size - static_cast(width)) << 8U) | 0x1fU; + __SHUFFLE_SYNC_BFLOAT162_MACRO(shfl.sync.bfly.b32) +} + +#undef __SHUFFLE_SYNC_BFLOAT162_MACRO + +__CUDA_BF16_DECL__ __nv_bfloat16 __shfl_sync(const unsigned mask, const __nv_bfloat16 var, const int delta, const int width) +{ + const __nv_bfloat162 temp1 = __halves2bfloat162(var, var); + const __nv_bfloat162 temp2 = __shfl_sync(mask, temp1, delta, width); + return __low2bfloat16(temp2); +} +__CUDA_BF16_DECL__ __nv_bfloat16 __shfl_up_sync(const unsigned mask, const __nv_bfloat16 var, const unsigned int delta, const int width) +{ + const __nv_bfloat162 temp1 = __halves2bfloat162(var, var); + const __nv_bfloat162 temp2 = __shfl_up_sync(mask, temp1, delta, width); + return __low2bfloat16(temp2); +} +__CUDA_BF16_DECL__ __nv_bfloat16 __shfl_down_sync(const unsigned mask, const __nv_bfloat16 var, const unsigned int delta, const int width) +{ + const __nv_bfloat162 temp1 = __halves2bfloat162(var, var); + const __nv_bfloat162 temp2 = __shfl_down_sync(mask, temp1, delta, width); + return __low2bfloat16(temp2); +} +__CUDA_BF16_DECL__ __nv_bfloat16 __shfl_xor_sync(const unsigned mask, const __nv_bfloat16 var, const int delta, const int width) +{ + const __nv_bfloat162 temp1 = __halves2bfloat162(var, var); + const __nv_bfloat162 temp2 = __shfl_xor_sync(mask, temp1, delta, width); + return __low2bfloat16(temp2); +} + +/****************************************************************************** +* __nv_bfloat16 and __nv_bfloat162 __ldg,__ldcg,__ldca,__ldcs * +******************************************************************************/ + +#if defined(__cplusplus) +#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__) +#define __LDG_PTR "l" +#else +#define __LDG_PTR "r" +#endif /*(defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)*/ +__CUDA_BF16_DECL__ __nv_bfloat162 __ldg(const __nv_bfloat162 *const ptr) +{ + __nv_bfloat162 ret; + asm ("ld.global.nc.b32 %0, [%1];" : "=r"(__BFLOAT162_TO_UI(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ldg(const __nv_bfloat16 *const ptr) +{ + __nv_bfloat16 ret; + asm ("ld.global.nc.b16 %0, [%1];" : "=h"(__BFLOAT16_TO_US(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __ldcg(const __nv_bfloat162 *const ptr) +{ + __nv_bfloat162 ret; + asm ("ld.global.cg.b32 %0, [%1];" : "=r"(__BFLOAT162_TO_UI(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ldcg(const __nv_bfloat16 *const ptr) +{ + __nv_bfloat16 ret; + asm ("ld.global.cg.b16 %0, [%1];" : "=h"(__BFLOAT16_TO_US(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __ldca(const __nv_bfloat162 *const ptr) +{ + __nv_bfloat162 ret; + asm ("ld.global.ca.b32 %0, [%1];" : "=r"(__BFLOAT162_TO_UI(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ldca(const __nv_bfloat16 *const ptr) +{ + __nv_bfloat16 ret; + asm ("ld.global.ca.b16 %0, [%1];" : "=h"(__BFLOAT16_TO_US(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __ldcs(const __nv_bfloat162 *const ptr) +{ + __nv_bfloat162 ret; + asm ("ld.global.cs.b32 %0, [%1];" : "=r"(__BFLOAT162_TO_UI(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ldcs(const __nv_bfloat16 *const ptr) +{ + __nv_bfloat16 ret; + asm ("ld.global.cs.b16 %0, [%1];" : "=h"(__BFLOAT16_TO_US(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __ldlu(const __nv_bfloat162 *const ptr) +{ + __nv_bfloat162 ret; + asm ("ld.global.lu.b32 %0, [%1];" : "=r"(__BFLOAT162_TO_UI(ret)) : __LDG_PTR(ptr) : "memory"); + return ret; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ldlu(const __nv_bfloat16 *const ptr) +{ + __nv_bfloat16 ret; + asm ("ld.global.lu.b16 %0, [%1];" : "=h"(__BFLOAT16_TO_US(ret)) : __LDG_PTR(ptr) : "memory"); + return ret; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __ldcv(const __nv_bfloat162 *const ptr) +{ + __nv_bfloat162 ret; + asm ("ld.global.cv.b32 %0, [%1];" : "=r"(__BFLOAT162_TO_UI(ret)) : __LDG_PTR(ptr) : "memory"); + return ret; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __ldcv(const __nv_bfloat16 *const ptr) +{ + __nv_bfloat16 ret; + asm ("ld.global.cv.b16 %0, [%1];" : "=h"(__BFLOAT16_TO_US(ret)) : __LDG_PTR(ptr) : "memory"); + return ret; +} + +__CUDA_BF16_DECL__ void __stwb(__nv_bfloat162 *const ptr, const __nv_bfloat162 value) +{ + asm ("st.global.wb.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__BFLOAT162_TO_CUI(value)) : "memory"); +} +__CUDA_BF16_DECL__ void __stwb(__nv_bfloat16 *const ptr, const __nv_bfloat16 value) +{ + asm ("st.global.wb.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__BFLOAT16_TO_CUS(value)) : "memory"); +} +__CUDA_BF16_DECL__ void __stcg(__nv_bfloat162 *const ptr, const __nv_bfloat162 value) +{ + asm ("st.global.cg.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__BFLOAT162_TO_CUI(value)) : "memory"); +} +__CUDA_BF16_DECL__ void __stcg(__nv_bfloat16 *const ptr, const __nv_bfloat16 value) +{ + asm ("st.global.cg.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__BFLOAT16_TO_CUS(value)) : "memory"); +} +__CUDA_BF16_DECL__ void __stcs(__nv_bfloat162 *const ptr, const __nv_bfloat162 value) +{ + asm ("st.global.cs.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__BFLOAT162_TO_CUI(value)) : "memory"); +} +__CUDA_BF16_DECL__ void __stcs(__nv_bfloat16 *const ptr, const __nv_bfloat16 value) +{ + asm ("st.global.cs.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__BFLOAT16_TO_CUS(value)) : "memory"); +} +__CUDA_BF16_DECL__ void __stwt(__nv_bfloat162 *const ptr, const __nv_bfloat162 value) +{ + asm ("st.global.wt.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__BFLOAT162_TO_CUI(value)) : "memory"); +} +__CUDA_BF16_DECL__ void __stwt(__nv_bfloat16 *const ptr, const __nv_bfloat16 value) +{ + asm ("st.global.wt.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__BFLOAT16_TO_CUS(value)) : "memory"); +} + +#undef __LDG_PTR +#endif /*defined(__cplusplus) */ +/****************************************************************************** +* __nv_bfloat162 comparison * +******************************************************************************/ +#define __COMPARISON_OP_BFLOAT162_MACRO(name) {\ + __nv_bfloat162 val; \ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90,\ + asm( "{ " __CUDA_BF16_STRINGIFY(name) ".bf16x2.bf16x2 %0,%1,%2;\n}" \ + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); \ +,\ + asm( "{.reg .b32 low_a,low_b,high_a,high_b,high_res,low_res;\n"\ + " and.b32 high_a, %1, 0xffff0000U;\n"\ + " and.b32 high_b, %2, 0xffff0000U;\n"\ + " shl.b32 low_a, %1, 16;\n"\ + " shl.b32 low_b, %2, 16;\n"\ + " " __CUDA_BF16_STRINGIFY(name) ".f32.f32 low_res, low_a, low_b;\n"\ + " " __CUDA_BF16_STRINGIFY(name) ".f32.f32 high_res, high_a, high_b;\n"\ + " shr.u32 low_res, low_res, 16;\n"\ + " or.b32 %0, high_res, low_res;}\n"\ + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); \ +)\ + return val; \ +} + +__CUDA_BF16_DECL__ __nv_bfloat162 __heq2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO(set.eq) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hne2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO(set.ne) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hle2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO(set.le) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hge2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO(set.ge) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hlt2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO(set.lt) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hgt2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO(set.gt) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hequ2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO(set.equ) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hneu2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO(set.neu) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hleu2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO(set.leu) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hgeu2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO(set.geu) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hltu2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO(set.ltu) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hgtu2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO(set.gtu) +} +#undef __COMPARISON_OP_BFLOAT162_MACRO +/****************************************************************************** +* __nv_bfloat162 comparison with mask output * +******************************************************************************/ +#define __COMPARISON_OP_BFLOAT162_MACRO_MASK(name) {\ + unsigned val; \ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90,\ + asm( "{ " __CUDA_BF16_STRINGIFY(name) ".u32.bf16x2 %0,%1,%2;\n}" \ + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); \ +,\ + asm( "{.reg .b32 low_a,low_b,high_a,high_b;\n"\ + " .reg .u32 high_res,low_res;\n"\ + " and.b32 high_a, %1, 0xffff0000U;\n"\ + " and.b32 high_b, %2, 0xffff0000U;\n"\ + " shl.b32 low_a, %1, 16;\n"\ + " shl.b32 low_b, %2, 16;\n"\ + " " __CUDA_BF16_STRINGIFY(name) ".u32.f32 low_res, low_a, low_b;\n"\ + " " __CUDA_BF16_STRINGIFY(name) ".u32.f32 high_res, high_a, high_b;\n"\ + " shr.u32 low_res, low_res, 16;\n"\ + " shl.b32 high_res, high_res, 16;\n"\ + " or.b32 %0, high_res, low_res;}\n"\ + :"=r"(val) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); \ +)\ + return val; \ +} + +__CUDA_BF16_DECL__ unsigned __heq2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO_MASK(set.eq) +} +__CUDA_BF16_DECL__ unsigned __hne2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO_MASK(set.ne) +} +__CUDA_BF16_DECL__ unsigned __hle2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO_MASK(set.le) +} +__CUDA_BF16_DECL__ unsigned __hge2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO_MASK(set.ge) +} +__CUDA_BF16_DECL__ unsigned __hlt2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO_MASK(set.lt) +} +__CUDA_BF16_DECL__ unsigned __hgt2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO_MASK(set.gt) +} +__CUDA_BF16_DECL__ unsigned __hequ2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO_MASK(set.equ) +} +__CUDA_BF16_DECL__ unsigned __hneu2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO_MASK(set.neu) +} +__CUDA_BF16_DECL__ unsigned __hleu2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO_MASK(set.leu) +} +__CUDA_BF16_DECL__ unsigned __hgeu2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO_MASK(set.geu) +} +__CUDA_BF16_DECL__ unsigned __hltu2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO_MASK(set.ltu) +} +__CUDA_BF16_DECL__ unsigned __hgtu2_mask(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __COMPARISON_OP_BFLOAT162_MACRO_MASK(set.gtu) +} +#undef __COMPARISON_OP_BFLOAT162_MACRO_MASK + +#define __BOOL_COMPARISON_OP_BFLOAT162_MACRO(name) {\ + unsigned int val; \ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90,\ + bool retval; \ + asm( "{ " __CUDA_BF16_STRINGIFY(name) ".bf16x2.bf16x2 %0,%1,%2;\n}" \ + :"=r"(val) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); \ + if (val == 0x3F803F80U) {\ + retval = true; \ + } else { \ + retval = false; \ + }\ + return retval;\ +,\ + asm( "{.reg .b32 low_a,low_b,high_a,high_b,high_res,low_res;\n"\ + " and.b32 high_a, %1, 0xffff0000U;\n"\ + " and.b32 high_b, %2, 0xffff0000U;\n"\ + " shl.b32 low_a, %1, 16;\n"\ + " shl.b32 low_b, %2, 16;\n"\ + " " __CUDA_BF16_STRINGIFY(name) ".f32.f32 low_res, low_a, low_b;\n"\ + " " __CUDA_BF16_STRINGIFY(name) ".f32.f32 high_res, high_a, high_b;\n"\ + " and.b32 %0, high_res, low_res;}\n"\ + :"=r"(val) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); \ + return (val != 0U) ? true : false; \ +)\ +} + +__CUDA_BF16_DECL__ bool __hbeq2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __BOOL_COMPARISON_OP_BFLOAT162_MACRO(set.eq) +} +__CUDA_BF16_DECL__ bool __hbne2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __BOOL_COMPARISON_OP_BFLOAT162_MACRO(set.ne) +} +__CUDA_BF16_DECL__ bool __hble2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __BOOL_COMPARISON_OP_BFLOAT162_MACRO(set.le) +} +__CUDA_BF16_DECL__ bool __hbge2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __BOOL_COMPARISON_OP_BFLOAT162_MACRO(set.ge) +} +__CUDA_BF16_DECL__ bool __hblt2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __BOOL_COMPARISON_OP_BFLOAT162_MACRO(set.lt) +} +__CUDA_BF16_DECL__ bool __hbgt2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __BOOL_COMPARISON_OP_BFLOAT162_MACRO(set.gt) +} +__CUDA_BF16_DECL__ bool __hbequ2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __BOOL_COMPARISON_OP_BFLOAT162_MACRO(set.equ) +} +__CUDA_BF16_DECL__ bool __hbneu2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __BOOL_COMPARISON_OP_BFLOAT162_MACRO(set.neu) +} +__CUDA_BF16_DECL__ bool __hbleu2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __BOOL_COMPARISON_OP_BFLOAT162_MACRO(set.leu) +} +__CUDA_BF16_DECL__ bool __hbgeu2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __BOOL_COMPARISON_OP_BFLOAT162_MACRO(set.geu) +} +__CUDA_BF16_DECL__ bool __hbltu2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __BOOL_COMPARISON_OP_BFLOAT162_MACRO(set.ltu) +} +__CUDA_BF16_DECL__ bool __hbgtu2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __BOOL_COMPARISON_OP_BFLOAT162_MACRO(set.gtu) +} +#undef __BOOL_COMPARISON_OP_BFLOAT162_MACRO +/****************************************************************************** +* __nv_bfloat16 comparison * +******************************************************************************/ +#define __COMPARISON_OP_BFLOAT16_MACRO(name) {\ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90,\ + unsigned short val; \ + asm( "{ .reg .pred __$temp3;\n" \ + " setp." __CUDA_BF16_STRINGIFY(name) ".bf16 __$temp3, %1, %2;\n" \ + " selp.u16 %0, 1, 0, __$temp3;}" \ + : "=h"(val) : "h"(__BFLOAT16_TO_CUS(a)), "h"(__BFLOAT16_TO_CUS(b))); \ + return (val != 0U) ? true : false; \ +,\ + unsigned int val; \ + asm( "{.reg .b32 a,b;\n"\ + " mov.b32 a, {0, %1};\n"\ + " mov.b32 b, {0, %2};\n"\ + " set." __CUDA_BF16_STRINGIFY(name) ".f32.f32 %0, a, b;}\n"\ + :"=r"(val) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); \ + return (val != 0U) ? true : false; \ +)\ +} +__CUDA_BF16_DECL__ bool __heq(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __COMPARISON_OP_BFLOAT16_MACRO(eq) +} +__CUDA_BF16_DECL__ bool __hne(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __COMPARISON_OP_BFLOAT16_MACRO(ne) +} +__CUDA_BF16_DECL__ bool __hle(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __COMPARISON_OP_BFLOAT16_MACRO(le) +} +__CUDA_BF16_DECL__ bool __hge(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __COMPARISON_OP_BFLOAT16_MACRO(ge) +} +__CUDA_BF16_DECL__ bool __hlt(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __COMPARISON_OP_BFLOAT16_MACRO(lt) +} +__CUDA_BF16_DECL__ bool __hgt(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __COMPARISON_OP_BFLOAT16_MACRO(gt) +} +__CUDA_BF16_DECL__ bool __hequ(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __COMPARISON_OP_BFLOAT16_MACRO(equ) +} +__CUDA_BF16_DECL__ bool __hneu(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __COMPARISON_OP_BFLOAT16_MACRO(neu) +} +__CUDA_BF16_DECL__ bool __hleu(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __COMPARISON_OP_BFLOAT16_MACRO(leu) +} +__CUDA_BF16_DECL__ bool __hgeu(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __COMPARISON_OP_BFLOAT16_MACRO(geu) +} +__CUDA_BF16_DECL__ bool __hltu(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __COMPARISON_OP_BFLOAT16_MACRO(ltu) +} +__CUDA_BF16_DECL__ bool __hgtu(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __COMPARISON_OP_BFLOAT16_MACRO(gtu) +} +#undef __COMPARISON_OP_BFLOAT16_MACRO +/****************************************************************************** +* __nv_bfloat162 arithmetic * +******************************************************************************/ +#define __BINARY_OP_BFLOAT162_MACRO(name) /* do */ {\ + __nv_bfloat162 val; \ + asm( "{.reg .b32 low_a,low_b,high_a,high_b,high_res,low_res;\n"\ + " .reg .b16 low,high;\n"\ + " and.b32 high_a, %1, 0xffff0000U;\n"\ + " and.b32 high_b, %2, 0xffff0000U;\n"\ + " shl.b32 low_a, %1, 16;\n"\ + " shl.b32 low_b, %2, 16;\n"\ + " " __CUDA_BF16_STRINGIFY(name) ".f32 low_res, low_a, low_b;\n"\ + " " __CUDA_BF16_STRINGIFY(name) ".f32 high_res, high_a, high_b;\n"\ + " cvt.rn.bf16.f32 low, low_res;\n"\ + " cvt.rn.bf16.f32 high, high_res;\n"\ + " mov.b32 %0, {low,high};}\n"\ + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); \ + return val; \ +} /* while(0) */ + +__CUDA_BF16_DECL__ __nv_bfloat162 __hadd2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm( "{ add.bf16x2 %0,%1,%2; }\n" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); +, + asm( "{.reg .b32 c;\n" + " mov.b32 c, 0x3f803f80U;\n" + " fma.rn.bf16x2 %0,%1,c,%2;}\n" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); +) + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hsub2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm( "{ sub.bf16x2 %0,%1,%2; }\n" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); +, + asm( "{.reg .b32 c;\n" + " mov.b32 c, 0xbf80bf80U;\n" + " fma.rn.bf16x2 %0,%2,c,%1;}\n" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); +) + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hmul2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm( "{ mul.bf16x2 %0,%1,%2; }\n" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); +, + asm( "{.reg .b32 c;\n" + " mov.b32 c, 0x80008000U;\n" + " fma.rn.bf16x2 %0,%1,%2,c;}\n" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); +) + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hadd2_rn(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm( "{ add.rn.bf16x2 %0,%1,%2; }\n" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); +, + asm( "{.reg .b32 c;\n" + " mov.b32 c, 0x3f803f80U;\n" + " fma.rn.bf16x2 %0,%1,c,%2;}\n" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); +) + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hsub2_rn(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm( "{ sub.rn.bf16x2 %0,%1,%2; }\n" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); +, + asm( "{.reg .b32 c;\n" + " mov.b32 c, 0xbf80bf80U;\n" + " fma.rn.bf16x2 %0,%2,c,%1;}\n" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); +) + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hmul2_rn(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm( "{ mul.rn.bf16x2 %0,%1,%2; }\n" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); +, + asm( "{.reg .b32 c;\n" + " mov.b32 c, 0x80008000U;\n" + " fma.rn.bf16x2 %0,%1,%2,c;}\n" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); +) + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hadd2_sat(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; + asm( "{.reg .b32 f, one, zero;\n" + " mov.b32 one, 0x3f803f80U;\n" + " mov.b32 zero, 0;\n" + " fma.rn.bf16x2 f,%1,one,%2;\n" + " max.bf16x2 f, f, zero;\n" + " min.bf16x2 %0, f, one;\n}" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hsub2_sat(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; + asm( "{.reg .b32 f, one, zero, mone;\n" + " mov.b32 one, 0x3f803f80U;\n" + " mov.b32 zero, 0;\n" + " mov.b32 mone, 0xbf80bf80U;\n" + " fma.rn.bf16x2 f,%2,mone,%1;\n" + " max.bf16x2 f, f, zero;\n" + " min.bf16x2 %0, f, one;\n}" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hmul2_sat(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; + asm( "{.reg .b32 f, one, zero, mzero;\n" + " mov.b32 one, 0x3f803f80U;\n" + " mov.b32 zero, 0;\n" + " mov.b32 mzero, 0x80008000U;\n" + " fma.rn.bf16x2 f,%1,%2,mzero;\n" + " max.bf16x2 f, f, zero;\n" + " min.bf16x2 %0, f, one;\n}" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hfma2(const __nv_bfloat162 a, const __nv_bfloat162 b, const __nv_bfloat162 c) +{ + __nv_bfloat162 val; + asm( "{fma.rn.bf16x2 %0,%1,%2,%3;\n}" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b)),"r"(__BFLOAT162_TO_CUI(c))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hfma2_sat(const __nv_bfloat162 a, const __nv_bfloat162 b, const __nv_bfloat162 c) +{ + __nv_bfloat162 val; + asm( "{ .reg .b32 f, one, zero;\n" + " mov.b32 one, 0x3f803f80U;\n" + " mov.b32 zero, 0;\n" + " fma.rn.bf16x2 f, %1, %2, %3;\n" + " max.bf16x2 f, f, zero;\n" + " min.bf16x2 %0, f, one;\n}" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b)),"r"(__BFLOAT162_TO_CUI(c))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __h2div(const __nv_bfloat162 a, const __nv_bfloat162 b) { + __nv_bfloat16 ha, hb; + + ha = __low2bfloat16(a); + hb = __low2bfloat16(b); + + const __nv_bfloat16 v1 = __hdiv(ha, hb); + + ha = __high2bfloat16(a); + hb = __high2bfloat16(b); + + const __nv_bfloat16 v2 = __hdiv(ha, hb); + + return __halves2bfloat162(v1, v2); +} +/****************************************************************************** +* __nv_bfloat16 arithmetic * +******************************************************************************/ +#define __BINARY_OP_BFLOAT16_MACRO(name) /* do */ {\ + __nv_bfloat16 val; \ + asm( "{.reg .b32 a,b,res;\n"\ + " mov.b32 a, {0,%1};\n"\ + " mov.b32 b, {0,%2};\n"\ + " " __CUDA_BF16_STRINGIFY(name) ".f32 res, a, b;\n"\ + " cvt.rn.bf16.f32 %0, res;}\n"\ + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); \ + return val; \ +} /* while(0) */ + +__CUDA_BF16_DECL__ __nv_bfloat16 __hadd(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm( "{ add.bf16 %0,%1,%2; }\n" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); +, + asm( "{.reg .b16 c;\n" + " mov.b16 c, 0x3f80U;\n" + " fma.rn.bf16 %0,%1,c,%2;}\n" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); +) + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hsub(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm( "{ sub.bf16 %0,%1,%2; }\n" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); +, + asm( "{.reg .b16 c;\n" + " mov.b16 c, 0xbf80U;\n" + " fma.rn.bf16 %0,%2,c,%1;}\n" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); +) + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hmul(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm( "{ mul.bf16 %0,%1,%2; }\n" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); +, + asm( "{.reg .b16 c;\n" + " mov.b16 c, 0x8000U;\n" + " fma.rn.bf16 %0,%1,%2,c;}\n" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); +) + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hadd_rn(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm( "{ add.rn.bf16 %0,%1,%2; }\n" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); +, + asm( "{.reg .b16 c;\n" + " mov.b16 c, 0x3f80U;\n" + " fma.rn.bf16 %0,%1,c,%2;}\n" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); +) + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hsub_rn(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm( "{ sub.rn.bf16 %0,%1,%2; }\n" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); +, + asm( "{.reg .b16 c;\n" + " mov.b16 c, 0xbf80U;\n" + " fma.rn.bf16 %0,%2,c,%1;}\n" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); +) + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hmul_rn(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + asm( "{ mul.rn.bf16 %0,%1,%2; }\n" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); +, + asm( "{.reg .b16 c;\n" + " mov.b16 c, 0x8000U;\n" + " fma.rn.bf16 %0,%1,%2,c;}\n" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); +) + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hadd_sat(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; + asm( "{ .reg .b16 f, one, zero;\n" + " mov.b16 one, 0x3f80U;\n" + " mov.b16 zero, 0;\n" + " fma.rn.bf16 f, %1, one, %2;\n" + " max.bf16 f, f, zero;\n" + " min.bf16 %0, f, one;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hsub_sat(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; + asm( "{ .reg .b16 f, one, zero, mone;\n" + " mov.b16 one, 0x3f80U;\n" + " mov.b16 zero, 0;\n" + " mov.b16 mone, 0xbf80U;\n" + " fma.rn.bf16 f, %2, mone, %1;\n" + " max.bf16 f, f, zero;\n" + " min.bf16 %0, f, one;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hmul_sat(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; + asm( "{ .reg .b16 f, one, zero, mzero;\n" + " mov.b16 one, 0x3f80U;\n" + " mov.b16 zero, 0;\n" + " mov.b16 mzero, 0x8000U;\n" + " fma.rn.bf16 f, %1, %2, mzero;\n" + " max.bf16 f, f, zero;\n" + " min.bf16 %0, f, one;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hfma(const __nv_bfloat16 a, const __nv_bfloat16 b, const __nv_bfloat16 c) +{ + __nv_bfloat16 val; + asm( "{fma.rn.bf16 %0,%1,%2,%3;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b)),"h"(__BFLOAT16_TO_CUS(c))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hfma_sat(const __nv_bfloat16 a, const __nv_bfloat16 b, const __nv_bfloat16 c) +{ + __nv_bfloat16 val; + asm( "{ .reg .b16 f, one, zero;\n" + " mov.b16 one, 0x3f80U;\n" + " mov.b16 zero, 0;\n" + " fma.rn.bf16 f, %1, %2, %3;\n" + " max.bf16 f, f, zero;\n" + " min.bf16 %0, f, one;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b)),"h"(__BFLOAT16_TO_CUS(c))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hdiv(const __nv_bfloat16 a, const __nv_bfloat16 b) { + __BINARY_OP_BFLOAT16_MACRO(div.rn) +} + +/****************************************************************************** +* __nv_bfloat162 functions * +******************************************************************************/ +#define __APPROX_FCAST(fun) /* do */ {\ + __nv_bfloat16 val;\ + asm("{.reg.b32 f; \n"\ + " .reg.b16 r; \n"\ + " mov.b16 r,%1; \n"\ + " mov.b32 f,{0,r}; \n"\ + " " __CUDA_BF16_STRINGIFY(fun) ".approx.f32 f,f; \n"\ + " cvt.rn.bf16.f32 r,f; \n"\ + " mov.b16 %0,r; \n"\ + "}": "=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)));\ + return val;\ +} /* while(0) */ +#define __APPROX_FCAST2(fun) /* do */ {\ + __nv_bfloat162 val;\ + asm("{.reg.b16 hl, hu; \n"\ + " .reg.b32 fl, fu; \n"\ + " mov.b32 {hl, hu}, %1; \n"\ + " mov.b32 fl, {0,hl}; \n"\ + " mov.b32 fu, {0,hu}; \n"\ + " " __CUDA_BF16_STRINGIFY(fun) ".approx.f32 fl, fl; \n"\ + " " __CUDA_BF16_STRINGIFY(fun) ".approx.f32 fu, fu; \n"\ + " cvt.rn.bf16.f32 hl, fl; \n"\ + " cvt.rn.bf16.f32 hu, fu; \n"\ + " mov.b32 %0, {hl, hu}; \n"\ + "}":"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a))); \ + return val;\ +} /* while(0) */ +__CUDA_BF16_DECL__ __nv_bfloat16 __hsin_internal(const __nv_bfloat16 a) { + float f = __bfloat162float(a); + f = sinf(f); + return __float2bfloat16_rn(f); +} +__CUDA_BF16_DECL__ __nv_bfloat16 hsin(const __nv_bfloat16 a) { + return __hsin_internal(a); +} +__CUDA_BF16_DECL__ __nv_bfloat162 h2sin(const __nv_bfloat162 a) { + const __nv_bfloat16 l = __low2bfloat16(a); + const __nv_bfloat16 h = __high2bfloat16(a); + return __halves2bfloat162(__hsin_internal(l), __hsin_internal(h)); +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hcos_internal(const __nv_bfloat16 a) { + float f = __bfloat162float(a); + f = cosf(f); + return __float2bfloat16_rn(f); +} +__CUDA_BF16_DECL__ __nv_bfloat16 hcos(const __nv_bfloat16 a) { + return __hcos_internal(a); +} +__CUDA_BF16_DECL__ __nv_bfloat162 h2cos(const __nv_bfloat162 a) { + const __nv_bfloat16 l = __low2bfloat16(a); + const __nv_bfloat16 h = __high2bfloat16(a); + return __halves2bfloat162(__hcos_internal(l), __hcos_internal(h)); +} + +#define __BF16_SPEC_CASE2(i,r, spc, ulp) \ + "{.reg.b32 spc, ulp, p;\n"\ + " mov.b32 spc," __CUDA_BF16_STRINGIFY(spc) ";\n"\ + " mov.b32 ulp," __CUDA_BF16_STRINGIFY(ulp) ";\n"\ + " set.eq.f16x2.f16x2 p," __CUDA_BF16_STRINGIFY(i) ", spc;\n"\ + " fma.rn.bf16x2 " __CUDA_BF16_STRINGIFY(r) ",p,ulp," __CUDA_BF16_STRINGIFY(r) ";\n}\n" +#define __BF16_SPEC_CASE(i,r, spc, ulp) \ + "{.reg.b16 spc, ulp, p;\n"\ + " mov.b16 spc," __CUDA_BF16_STRINGIFY(spc) ";\n"\ + " mov.b16 ulp," __CUDA_BF16_STRINGIFY(ulp) ";\n"\ + " set.eq.f16.f16 p," __CUDA_BF16_STRINGIFY(i) ", spc;\n"\ + " fma.rn.bf16 " __CUDA_BF16_STRINGIFY(r) ",p,ulp," __CUDA_BF16_STRINGIFY(r) ";\n}\n" + +__CUDA_BF16_DECL__ __nv_bfloat16 hexp(const __nv_bfloat16 a) { + __nv_bfloat16 val; + asm("{.reg.b32 f, C; \n" + " .reg.b16 h,r; \n" + " mov.b16 h,%1; \n" + " mov.b32 f,{0,h}; \n" + " mov.b32 C, 0x3FB8AA3CU; \n" + " mul.f32 f,f,C; \n" + " ex2.approx.f32 f,f; \n" + " cvt.rn.bf16.f32 r,f; \n" + " mov.b16 %0,r; \n" + "}": "=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 h2exp(const __nv_bfloat162 a) { + __nv_bfloat162 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 h,r,fl,fu, C; \n" + " mov.b32 {hl, hu}, %1; \n" + " mov.b32 h, %1; \n" + " mov.b32 fl, {0,hl}; \n" + " mov.b32 fu, {0,hu}; \n" + " mov.b32 C, 0x3FB8AA3CU; \n" + " mul.f32 fl,fl,C; \n" + " mul.f32 fu,fu,C; \n" + " ex2.approx.f32 fl, fl; \n" + " ex2.approx.f32 fu, fu; \n" + " cvt.rn.bf16.f32 hl, fl; \n" + " cvt.rn.bf16.f32 hu, fu; \n" + " mov.b32 r, {hl, hu}; \n" + " mov.b32 %0, r; \n" + "}":"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 hexp2(const __nv_bfloat16 a) { + __APPROX_FCAST(ex2) +} +__CUDA_BF16_DECL__ __nv_bfloat162 h2exp2(const __nv_bfloat162 a) { + __APPROX_FCAST2(ex2) +} +__CUDA_BF16_DECL__ __nv_bfloat16 hexp10(const __nv_bfloat16 a) { + __nv_bfloat16 val; + asm("{.reg.b16 h, r; \n" + " .reg.b32 f, C; \n" + " mov.b16 h, %1; \n" + " mov.b32 f, {0,h}; \n" + " mov.b32 C, 0x40549A78U; \n" + " mul.f32 f,f,C; \n" + " ex2.approx.f32 f, f; \n" + " cvt.rn.bf16.f32 r, f; \n" + __BF16_SPEC_CASE(%1, r, 0xBC95U,0xBF00U) + " mov.b16 %0, r; \n" + "}":"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 h2exp10(const __nv_bfloat162 a) { + __nv_bfloat162 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 h,r,fl,fu, C; \n" + " mov.b32 {hl, hu}, %1; \n" + " mov.b32 fl, {0,hl}; \n" + " mov.b32 fu, {0,hu}; \n" + " mov.b32 C, 0x40549A78U; \n" + " mul.f32 fl,fl,C; \n" + " mul.f32 fu,fu,C; \n" + " ex2.approx.f32 fl, fl; \n" + " ex2.approx.f32 fu, fu; \n" + " cvt.rn.bf16.f32 hl, fl; \n" + " cvt.rn.bf16.f32 hu, fu; \n" + " mov.b32 r, {hl, hu}; \n" + __BF16_SPEC_CASE2(%1, r, 0xBC95BC95U,0xBF00BF00U) + " mov.b32 %0, r; \n" + "}":"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 hlog2(const __nv_bfloat16 a) { + __APPROX_FCAST(lg2) +} +__CUDA_BF16_DECL__ __nv_bfloat162 h2log2(const __nv_bfloat162 a) { + __APPROX_FCAST2(lg2) +} +__CUDA_BF16_DECL__ __nv_bfloat16 hlog(const __nv_bfloat16 a) { + __nv_bfloat16 val; + asm("{.reg.b32 f, C; \n" + " .reg.b16 r,h; \n" + " mov.b16 h,%1; \n" + " mov.b32 f,{0,h}; \n" + " lg2.approx.f32 f,f; \n" + " mov.b32 C, 0x3f317218U; \n" + " mul.f32 f,f,C; \n" + " cvt.rn.bf16.f32 r,f; \n" + " mov.b16 %0,r; \n" + "}": "=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 h2log(const __nv_bfloat162 a) { + __nv_bfloat162 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 r, fl, fu, C, h; \n" + " mov.b32 {hl, hu}, %1; \n" + " mov.b32 h, %1; \n" + " mov.b32 fl, {0,hl}; \n" + " mov.b32 fu, {0,hu}; \n" + " lg2.approx.f32 fl, fl; \n" + " lg2.approx.f32 fu, fu; \n" + " mov.b32 C, 0x3f317218U; \n" + " mul.f32 fl,fl,C; \n" + " mul.f32 fu,fu,C; \n" + " cvt.rn.bf16.f32 hl, fl; \n" + " cvt.rn.bf16.f32 hu, fu; \n" + " mov.b32 r, {hl, hu}; \n" + " mov.b32 %0, r; \n" + "}":"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 hlog10(const __nv_bfloat16 a) { + __nv_bfloat16 val; + asm("{.reg.b16 h, r; \n" + " .reg.b32 f, C; \n" + " mov.b16 h, %1; \n" + " mov.b32 f, {0,h}; \n" + " lg2.approx.f32 f, f; \n" + " mov.b32 C, 0x3E9A209BU; \n" + " mul.f32 f,f,C; \n" + " cvt.rn.bf16.f32 r, f; \n" + " mov.b16 %0, r; \n" + "}":"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 h2log10(const __nv_bfloat162 a) { + __nv_bfloat162 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 r, fl, fu, C, h; \n" + " mov.b32 {hl, hu}, %1; \n" + " mov.b32 h, %1; \n" + " mov.b32 fl, {0,hl}; \n" + " mov.b32 fu, {0,hu}; \n" + " lg2.approx.f32 fl, fl; \n" + " lg2.approx.f32 fu, fu; \n" + " mov.b32 C, 0x3E9A209BU; \n" + " mul.f32 fl,fl,C; \n" + " mul.f32 fu,fu,C; \n" + " cvt.rn.bf16.f32 hl, fl; \n" + " cvt.rn.bf16.f32 hu, fu; \n" + " mov.b32 r, {hl, hu}; \n" + " mov.b32 %0, r; \n" + "}":"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a))); + return val; +} +#undef __BF16_SPEC_CASE2 +#undef __BF16_SPEC_CASE +__CUDA_BF16_DECL__ __nv_bfloat162 h2rcp(const __nv_bfloat162 a) { + __APPROX_FCAST2(rcp) +} +__CUDA_BF16_DECL__ __nv_bfloat16 hrcp(const __nv_bfloat16 a) { + __APPROX_FCAST(rcp) +} +__CUDA_BF16_DECL__ __nv_bfloat162 h2rsqrt(const __nv_bfloat162 a) { + __APPROX_FCAST2(rsqrt) +} +__CUDA_BF16_DECL__ __nv_bfloat16 hrsqrt(const __nv_bfloat16 a) { + __APPROX_FCAST(rsqrt) +} +__CUDA_BF16_DECL__ __nv_bfloat162 h2sqrt(const __nv_bfloat162 a) { + __APPROX_FCAST2(sqrt) +} +__CUDA_BF16_DECL__ __nv_bfloat16 hsqrt(const __nv_bfloat16 a) { + __APPROX_FCAST(sqrt) +} +#undef __APPROX_FCAST +#undef __APPROX_FCAST2 +__CUDA_BF16_DECL__ __nv_bfloat162 __hisnan2(const __nv_bfloat162 a) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat162 r; + asm("{set.nan.bf16x2.bf16x2 %0,%1,%1;\n}" + :"=r"(__BFLOAT162_TO_UI(r)) : "r"(__BFLOAT162_TO_CUI(a))); + return r; +, + const __nv_bfloat162 b = a; + __BINARY_OP_BFLOAT162_MACRO(set.nan.f32) +) +} +__CUDA_BF16_DECL__ bool __hisnan(const __nv_bfloat16 a) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 r; + asm("{set.nan.bf16.bf16 %0,%1,%1;\n}" + :"=h"(__BFLOAT16_TO_US(r)) : "h"(__BFLOAT16_TO_CUS(a))); + return __BFLOAT16_TO_CUS(r) != 0U; +, + unsigned int r; + asm( "{.reg .b32 a;\n" + " mov.b32 a, {0,%1};\n" + " set.nan.f32.f32 %0, a, a;}\n" + :"=r"(r) : "h"(__BFLOAT16_TO_CUS(a))); + return r != 0U; +) +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hneg2(const __nv_bfloat162 a) +{ + __nv_bfloat162 r; + asm("{neg.bf16x2 %0,%1;\n}" + :"=r"(__BFLOAT162_TO_UI(r)) : "r"(__BFLOAT162_TO_CUI(a))); + return r; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hneg(const __nv_bfloat16 a) +{ + __nv_bfloat16 r; + asm("{neg.bf16 %0,%1;\n}" + :"=h"(__BFLOAT16_TO_US(r)) : "h"(__BFLOAT16_TO_CUS(a))); + return r; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __habs2(const __nv_bfloat162 a) +{ + __nv_bfloat162 r; + asm("{abs.bf16x2 %0,%1;\n}" + :"=r"(__BFLOAT162_TO_UI(r)) : "r"(__BFLOAT162_TO_CUI(a))); + return r; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __habs(const __nv_bfloat16 a) +{ + __nv_bfloat16 r; + asm("{abs.bf16 %0,%1;\n}" + :"=h"(__BFLOAT16_TO_US(r)) : "h"(__BFLOAT16_TO_CUS(a))); + return r; +} +/****************************************************************************** +* __nv_bfloat16 arithmetic * +******************************************************************************/ +__CUDA_BF16_DECL__ __nv_bfloat16 __hmax(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; + asm( "{ max.bf16 %0,%1,%2;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hmin(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; + asm( "{ min.bf16 %0,%1,%2;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hmax_nan(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; + asm( "{ max.NaN.bf16 %0,%1,%2;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hmin_nan(const __nv_bfloat16 a, const __nv_bfloat16 b) +{ + __nv_bfloat16 val; + asm( "{ min.NaN.bf16 %0,%1,%2;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat16 __hfma_relu(const __nv_bfloat16 a, const __nv_bfloat16 b, const __nv_bfloat16 c) +{ + __nv_bfloat16 val; + asm( "{ fma.rn.relu.bf16 %0,%1,%2,%3;\n}" + :"=h"(__BFLOAT16_TO_US(val)) : "h"(__BFLOAT16_TO_CUS(a)),"h"(__BFLOAT16_TO_CUS(b)),"h"(__BFLOAT16_TO_CUS(c))); + return val; +} +/****************************************************************************** +* __nv_bfloat162 arithmetic * +******************************************************************************/ +__CUDA_BF16_DECL__ __nv_bfloat162 __hmax2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; + asm( "{ max.bf16x2 %0,%1,%2;\n}" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hmin2(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; + asm( "{ min.bf16x2 %0,%1,%2;\n}" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hmax2_nan(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; + asm( "{ max.NaN.bf16x2 %0,%1,%2;\n}" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hmin2_nan(const __nv_bfloat162 a, const __nv_bfloat162 b) +{ + __nv_bfloat162 val; + asm( "{ min.NaN.bf16x2 %0,%1,%2;\n}" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b))); + return val; +} +__CUDA_BF16_DECL__ __nv_bfloat162 __hfma2_relu(const __nv_bfloat162 a, const __nv_bfloat162 b, const __nv_bfloat162 c) +{ + __nv_bfloat162 val; + asm( "{ fma.rn.relu.bf16x2 %0,%1,%2,%3;\n}" + :"=r"(__BFLOAT162_TO_UI(val)) : "r"(__BFLOAT162_TO_CUI(a)),"r"(__BFLOAT162_TO_CUI(b)),"r"(__BFLOAT162_TO_CUI(c))); + return val; +} + +__CUDA_BF16_DECL__ __nv_bfloat162 __hcmadd(const __nv_bfloat162 a, const __nv_bfloat162 b, const __nv_bfloat162 c) +{ + // fast version of complex multiply-accumulate + // (a.re, a.im) * (b.re, b.im) + (c.re, c.im) + // acc.re = (c.re + a.re*b.re) - a.im*b.im + // acc.im = (c.im + a.re*b.im) + a.im*b.re + __nv_bfloat16 real_tmp = __hfma(a.x, b.x, c.x); + __nv_bfloat16 img_tmp = __hfma(a.x, b.y, c.y); + real_tmp = __hfma(__hneg(a.y), b.y, real_tmp); + img_tmp = __hfma(a.y, b.x, img_tmp); + return make_bfloat162(real_tmp, img_tmp); +} + + +/* Define __PTR for atomicAdd prototypes below, undef after done */ +#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__) +#define __PTR "l" +#else +#define __PTR "r" +#endif /*(defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)*/ + +__CUDA_BF16_DECL__ __nv_bfloat162 atomicAdd(__nv_bfloat162 *const address, const __nv_bfloat162 val) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat162 r; + asm volatile ("{ atom.add.noftz.bf16x2 %0,[%1],%2; }\n" + : "=r"(__BFLOAT162_TO_UI(r)) : __PTR(address), "r"(__BFLOAT162_TO_CUI(val)) + : "memory"); + return r; +, + unsigned int* address_as_uint = (unsigned int*)address; + unsigned int old = *address_as_uint; + unsigned int assumed; + do { + assumed = old; + __nv_bfloat162 new_val = __hadd2(val, *(__nv_bfloat162*)&assumed); + old = atomicCAS(address_as_uint, assumed, *(unsigned int*)&new_val); + } while (assumed != old); + return *(__nv_bfloat162*)&old; +) +} + +__CUDA_BF16_DECL__ __nv_bfloat16 atomicAdd(__nv_bfloat16 *const address, const __nv_bfloat16 val) +{ +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_90, + __nv_bfloat16 r; + asm volatile ("{ atom.add.noftz.bf16 %0,[%1],%2; }\n" + : "=h"(__BFLOAT16_TO_US(r)) + : __PTR(address), "h"(__BFLOAT16_TO_CUS(val)) + : "memory"); + return r; +, + unsigned short int* address_as_us = (unsigned short int*)address; + unsigned short int old = *address_as_us; + unsigned short int assumed; + do { + assumed = old; + old = atomicCAS(address_as_us, assumed, + __bfloat16_as_ushort(__hadd(val, __ushort_as_bfloat16(assumed)))); + } while (assumed != old); + return __ushort_as_bfloat16(old); +) +} + +#undef __PTR +#undef __CUDA_BF16_DECL__ +#endif /* (defined(__CUDACC__) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800))) || defined(_NVHPC_CUDA) */ +#endif /* defined(__cplusplus) */ + +#undef __BINARY_OP_BFLOAT162_MACRO +#undef __BINARY_OP_BFLOAT16_MACRO + +#undef __CUDA_HOSTDEVICE_BF16_DECL__ +#undef __CUDA_BF16_DECL__ + +/* Define first-class types "nv_bfloat16" and "nv_bfloat162", unless user specifies otherwise via "#define CUDA_NO_BFLOAT16" */ +/* C cannot ever have these types defined here, because __nv_bfloat16 and __nv_bfloat162 are C++ classes */ +#if defined(__cplusplus) && !defined(CUDA_NO_BFLOAT16) +typedef __nv_bfloat16 nv_bfloat16; +typedef __nv_bfloat162 nv_bfloat162; + +#endif /* defined(__cplusplus) && !defined(CUDA_NO_BFLOAT16) */ + +#if defined(__CPP_VERSION_AT_LEAST_11_BF16) +#undef __CPP_VERSION_AT_LEAST_11_BF16 +#endif /* defined(__CPP_VERSION_AT_LEAST_11_BF16) */ + +#endif /* end of include guard: __CUDA_BF16_HPP__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_device_runtime_api.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_device_runtime_api.h new file mode 100644 index 0000000000000000000000000000000000000000..2730d51bf8c4b0931b8bbfc3c9a0ff0c72694725 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_device_runtime_api.h @@ -0,0 +1,735 @@ +/* + * Copyright 1993-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_DEVICE_RUNTIME_API_H__) +#define __CUDA_DEVICE_RUNTIME_API_H__ + +#if defined(__CUDACC__) && !defined(__CUDACC_RTC__) +#include +#endif + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if !defined(CUDA_FORCE_CDP1_IF_SUPPORTED) && !defined(__CUDADEVRT_INTERNAL__) && !defined(_NVHPC_CUDA) && !(defined(_WIN32) && !defined(_WIN64)) +#define __CUDA_INTERNAL_USE_CDP2 +#endif + +#if !defined(__CUDACC_RTC__) + +#if !defined(__CUDACC_INTERNAL_NO_STUBS__) && !defined(__CUDACC_RDC__) && !defined(__CUDACC_EWP__) && defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 350) && !defined(__CUDADEVRT_INTERNAL__) + +#if defined(__cplusplus) +extern "C" { +#endif + +struct cudaFuncAttributes; + + +#ifndef __CUDA_INTERNAL_USE_CDP2 +inline __device__ cudaError_t CUDARTAPI cudaMalloc(void **p, size_t s) +{ + return cudaErrorUnknown; +} + +inline __device__ cudaError_t CUDARTAPI cudaFuncGetAttributes(struct cudaFuncAttributes *p, const void *c) +{ + return cudaErrorUnknown; +} + +inline __device__ cudaError_t CUDARTAPI cudaDeviceGetAttribute(int *value, enum cudaDeviceAttr attr, int device) +{ + return cudaErrorUnknown; +} + +inline __device__ cudaError_t CUDARTAPI cudaGetDevice(int *device) +{ + return cudaErrorUnknown; +} + +inline __device__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize) +{ + return cudaErrorUnknown; +} + +inline __device__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize, unsigned int flags) +{ + return cudaErrorUnknown; +} +#else // __CUDA_INTERNAL_USE_CDP2 +inline __device__ cudaError_t CUDARTAPI __cudaCDP2Malloc(void **p, size_t s) +{ + return cudaErrorUnknown; +} + +inline __device__ cudaError_t CUDARTAPI __cudaCDP2FuncGetAttributes(struct cudaFuncAttributes *p, const void *c) +{ + return cudaErrorUnknown; +} + +inline __device__ cudaError_t CUDARTAPI __cudaCDP2DeviceGetAttribute(int *value, enum cudaDeviceAttr attr, int device) +{ + return cudaErrorUnknown; +} + +inline __device__ cudaError_t CUDARTAPI __cudaCDP2GetDevice(int *device) +{ + return cudaErrorUnknown; +} + +inline __device__ cudaError_t CUDARTAPI __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize) +{ + return cudaErrorUnknown; +} + +inline __device__ cudaError_t CUDARTAPI __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize, unsigned int flags) +{ + return cudaErrorUnknown; +} +#endif // __CUDA_INTERNAL_USE_CDP2 + + +#if defined(__cplusplus) +} +#endif + +#endif /* !defined(__CUDACC_INTERNAL_NO_STUBS__) && !defined(__CUDACC_RDC__) && !defined(__CUDACC_EWP__) && defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 350) && !defined(__CUDADEVRT_INTERNAL__) */ + +#endif /* !defined(__CUDACC_RTC__) */ + +#if defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED) +# define __DEPRECATED__(msg) +#elif defined(_WIN32) +# define __DEPRECATED__(msg) __declspec(deprecated(msg)) +#elif (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 5 && !defined(__clang__)))) +# define __DEPRECATED__(msg) __attribute__((deprecated)) +#else +# define __DEPRECATED__(msg) __attribute__((deprecated(msg))) +#endif + +#if defined(__CUDA_ARCH__) && !defined(__CDPRT_SUPPRESS_SYNC_DEPRECATION_WARNING) +# define __CDPRT_DEPRECATED(func_name) __DEPRECATED__("Use of "#func_name" from device code is deprecated. Moreover, such use will cause this module to fail to load on sm_90+ devices. If calls to "#func_name" from device code cannot be removed for older devices at this time, you may guard them with __CUDA_ARCH__ macros to remove them only for sm_90+ devices, making sure to generate code for compute_90 for the macros to take effect. Note that this mitigation will no longer work when support for "#func_name" from device code is eventually dropped for all devices. Disable this warning with -D__CDPRT_SUPPRESS_SYNC_DEPRECATION_WARNING.") +#else +# define __CDPRT_DEPRECATED(func_name) +#endif + +#if defined(__cplusplus) && defined(__CUDACC__) /* Visible to nvcc front-end only */ +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 350) // Visible to SM>=3.5 and "__host__ __device__" only + +#include "driver_types.h" +#include "crt/host_defines.h" + +#define cudaStreamGraphTailLaunch (cudaStream_t)0x0100000000000000 +#define cudaStreamGraphFireAndForget (cudaStream_t)0x0200000000000000 + +#ifdef __CUDA_INTERNAL_USE_CDP2 +#define cudaStreamTailLaunch ((cudaStream_t)0x3) /**< Per-grid stream with a fire-and-forget synchronization behavior. Only applicable when used with CUDA Dynamic Parallelism. */ +#define cudaStreamFireAndForget ((cudaStream_t)0x4) /**< Per-grid stream with a tail launch semantics. Only applicable when used with CUDA Dynamic Parallelism. */ +#endif + +extern "C" +{ + +// Symbols beginning with __cudaCDP* should not be used outside +// this header file. Instead, compile with -DCUDA_FORCE_CDP1_IF_SUPPORTED if +// CDP1 support is required. + +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaDeviceSynchronizeDeprecationAvoidance(void); + +#ifndef __CUDA_INTERNAL_USE_CDP2 +//// CDP1 endpoints +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetAttribute(int *value, enum cudaDeviceAttr attr, int device); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetLimit(size_t *pValue, enum cudaLimit limit); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetCacheConfig(enum cudaFuncCache *pCacheConfig); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetSharedMemConfig(enum cudaSharedMemConfig *pConfig); +#if (__CUDA_ARCH__ < 900) && (defined(CUDA_FORCE_CDP1_IF_SUPPORTED) || (defined(_WIN32) && !defined(_WIN64))) +// cudaDeviceSynchronize is removed on sm_90+ +extern __device__ __cudart_builtin__ __CDPRT_DEPRECATED(cudaDeviceSynchronize) cudaError_t CUDARTAPI cudaDeviceSynchronize(void); +#endif +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetLastError(void); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaPeekAtLastError(void); +extern __device__ __cudart_builtin__ const char* CUDARTAPI cudaGetErrorString(cudaError_t error); +extern __device__ __cudart_builtin__ const char* CUDARTAPI cudaGetErrorName(cudaError_t error); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetDeviceCount(int *count); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetDevice(int *device); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamCreateWithFlags(cudaStream_t *pStream, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamDestroy(cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamWaitEvent(cudaStream_t stream, cudaEvent_t event, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamWaitEvent_ptsz(cudaStream_t stream, cudaEvent_t event, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventCreateWithFlags(cudaEvent_t *event, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecord(cudaEvent_t event, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecord_ptsz(cudaEvent_t event, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecordWithFlags(cudaEvent_t event, cudaStream_t stream, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecordWithFlags_ptsz(cudaEvent_t event, cudaStream_t stream, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventDestroy(cudaEvent_t event); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaFuncGetAttributes(struct cudaFuncAttributes *attr, const void *func); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaFree(void *devPtr); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMalloc(void **devPtr, size_t size); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpyAsync(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpyAsync_ptsz(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy2DAsync(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy2DAsync_ptsz(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy3DAsync(const struct cudaMemcpy3DParms *p, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy3DAsync_ptsz(const struct cudaMemcpy3DParms *p, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemsetAsync(void *devPtr, int value, size_t count, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemsetAsync_ptsz(void *devPtr, int value, size_t count, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset2DAsync(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset2DAsync_ptsz(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset3DAsync(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset3DAsync_ptsz(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaRuntimeGetVersion(int *runtimeVersion); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize, unsigned int flags); +#endif // __CUDA_INTERNAL_USE_CDP2 + +//// CDP2 endpoints +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2DeviceGetAttribute(int *value, enum cudaDeviceAttr attr, int device); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2DeviceGetLimit(size_t *pValue, enum cudaLimit limit); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2DeviceGetCacheConfig(enum cudaFuncCache *pCacheConfig); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2DeviceGetSharedMemConfig(enum cudaSharedMemConfig *pConfig); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2GetLastError(void); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2PeekAtLastError(void); +extern __device__ __cudart_builtin__ const char* CUDARTAPI __cudaCDP2GetErrorString(cudaError_t error); +extern __device__ __cudart_builtin__ const char* CUDARTAPI __cudaCDP2GetErrorName(cudaError_t error); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2GetDeviceCount(int *count); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2GetDevice(int *device); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2StreamCreateWithFlags(cudaStream_t *pStream, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2StreamDestroy(cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2StreamWaitEvent(cudaStream_t stream, cudaEvent_t event, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2StreamWaitEvent_ptsz(cudaStream_t stream, cudaEvent_t event, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2EventCreateWithFlags(cudaEvent_t *event, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2EventRecord(cudaEvent_t event, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2EventRecord_ptsz(cudaEvent_t event, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2EventRecordWithFlags(cudaEvent_t event, cudaStream_t stream, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2EventRecordWithFlags_ptsz(cudaEvent_t event, cudaStream_t stream, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2EventDestroy(cudaEvent_t event); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2FuncGetAttributes(struct cudaFuncAttributes *attr, const void *func); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Free(void *devPtr); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Malloc(void **devPtr, size_t size); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2MemcpyAsync(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2MemcpyAsync_ptsz(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memcpy2DAsync(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memcpy2DAsync_ptsz(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memcpy3DAsync(const struct cudaMemcpy3DParms *p, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memcpy3DAsync_ptsz(const struct cudaMemcpy3DParms *p, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2MemsetAsync(void *devPtr, int value, size_t count, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2MemsetAsync_ptsz(void *devPtr, int value, size_t count, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memset2DAsync(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memset2DAsync_ptsz(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memset3DAsync(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memset3DAsync_ptsz(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2RuntimeGetVersion(int *runtimeVersion); +extern __device__ __cudart_builtin__ void * CUDARTAPI __cudaCDP2GetParameterBuffer(size_t alignment, size_t size); +extern __device__ __cudart_builtin__ void * CUDARTAPI __cudaCDP2GetParameterBufferV2(void *func, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2LaunchDevice_ptsz(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2LaunchDeviceV2_ptsz(void *parameterBuffer, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2LaunchDevice(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2LaunchDeviceV2(void *parameterBuffer, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize, unsigned int flags); + + +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGraphLaunch(cudaGraphExec_t graphExec, cudaStream_t stream); +#if defined(CUDA_API_PER_THREAD_DEFAULT_STREAM) +static inline __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGraphLaunch_ptsz(cudaGraphExec_t graphExec, cudaStream_t stream) +{ + if (stream == 0) { + stream = cudaStreamPerThread; + } + return cudaGraphLaunch(graphExec, stream); +} +#endif + +/** + * \ingroup CUDART_GRAPH + * \brief Get the currently running device graph id. + * + * Get the currently running device graph id. + * \return Returns the current device graph id, 0 if the call is outside of a device graph. + * \sa cudaLaunchDevice + */ +static inline __device__ __cudart_builtin__ cudaGraphExec_t CUDARTAPI cudaGetCurrentGraphExec(void) +{ + unsigned long long current_graph_exec; + asm ("mov.u64 %0, %%current_graph_exec;" : "=l"(current_graph_exec)); + return (cudaGraphExec_t)current_graph_exec; +} + +/** + * \ingroup CUDART_EXECUTION + * \brief Programmatic dependency trigger + * + * This device function ensures the programmatic launch completion edges / + * events are fulfilled. See + * ::cudaLaunchAttributeID::cudaLaunchAttributeProgrammaticStreamSerialization + * and ::cudaLaunchAttributeID::cudaLaunchAttributeProgrammaticEvent for more + * information. The event / edge kick off only happens when every CTAs + * in the grid has either exited or called this function at least once, + * otherwise the kick off happens automatically after all warps finishes + * execution but before the grid completes. The kick off only enables + * scheduling of the secondary kernel. It provides no memory visibility + * guarantee itself. The user could enforce memory visibility by inserting a + * memory fence of the correct scope. + */ +static inline __device__ __cudart_builtin__ void CUDARTAPI cudaTriggerProgrammaticLaunchCompletion(void) +{ + asm volatile("griddepcontrol.launch_dependents;":::); +} + +/** + * \ingroup CUDART_EXECUTION + * \brief Programmatic grid dependency synchronization + * + * This device function will block the thread until all direct grid + * dependencies have completed. This API is intended to use in conjuncture with + * programmatic / launch event / dependency. See + * ::cudaLaunchAttributeID::cudaLaunchAttributeProgrammaticStreamSerialization + * and ::cudaLaunchAttributeID::cudaLaunchAttributeProgrammaticEvent for more + * information. + */ +static inline __device__ __cudart_builtin__ void CUDARTAPI cudaGridDependencySynchronize(void) +{ + asm volatile("griddepcontrol.wait;":::"memory"); +} + + +//// CG API +extern __device__ __cudart_builtin__ unsigned long long CUDARTAPI cudaCGGetIntrinsicHandle(enum cudaCGScope scope); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaCGSynchronize(unsigned long long handle, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaCGSynchronizeGrid(unsigned long long handle, unsigned int flags); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaCGGetSize(unsigned int *numThreads, unsigned int *numGrids, unsigned long long handle); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaCGGetRank(unsigned int *threadRank, unsigned int *gridRank, unsigned long long handle); + + +//// CDP API + +#ifdef __CUDA_ARCH__ + +#ifdef __CUDA_INTERNAL_USE_CDP2 +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetAttribute(int *value, enum cudaDeviceAttr attr, int device) +{ + return __cudaCDP2DeviceGetAttribute(value, attr, device); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetLimit(size_t *pValue, enum cudaLimit limit) +{ + return __cudaCDP2DeviceGetLimit(pValue, limit); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetCacheConfig(enum cudaFuncCache *pCacheConfig) +{ + return __cudaCDP2DeviceGetCacheConfig(pCacheConfig); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetSharedMemConfig(enum cudaSharedMemConfig *pConfig) +{ + return __cudaCDP2DeviceGetSharedMemConfig(pConfig); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetLastError(void) +{ + return __cudaCDP2GetLastError(); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaPeekAtLastError(void) +{ + return __cudaCDP2PeekAtLastError(); +} + +static __inline__ __device__ __cudart_builtin__ const char* CUDARTAPI cudaGetErrorString(cudaError_t error) +{ + return __cudaCDP2GetErrorString(error); +} + +static __inline__ __device__ __cudart_builtin__ const char* CUDARTAPI cudaGetErrorName(cudaError_t error) +{ + return __cudaCDP2GetErrorName(error); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetDeviceCount(int *count) +{ + return __cudaCDP2GetDeviceCount(count); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetDevice(int *device) +{ + return __cudaCDP2GetDevice(device); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamCreateWithFlags(cudaStream_t *pStream, unsigned int flags) +{ + return __cudaCDP2StreamCreateWithFlags(pStream, flags); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamDestroy(cudaStream_t stream) +{ + return __cudaCDP2StreamDestroy(stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamWaitEvent(cudaStream_t stream, cudaEvent_t event, unsigned int flags) +{ + return __cudaCDP2StreamWaitEvent(stream, event, flags); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamWaitEvent_ptsz(cudaStream_t stream, cudaEvent_t event, unsigned int flags) +{ + return __cudaCDP2StreamWaitEvent_ptsz(stream, event, flags); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventCreateWithFlags(cudaEvent_t *event, unsigned int flags) +{ + return __cudaCDP2EventCreateWithFlags(event, flags); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecord(cudaEvent_t event, cudaStream_t stream) +{ + return __cudaCDP2EventRecord(event, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecord_ptsz(cudaEvent_t event, cudaStream_t stream) +{ + return __cudaCDP2EventRecord_ptsz(event, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecordWithFlags(cudaEvent_t event, cudaStream_t stream, unsigned int flags) +{ + return __cudaCDP2EventRecordWithFlags(event, stream, flags); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecordWithFlags_ptsz(cudaEvent_t event, cudaStream_t stream, unsigned int flags) +{ + return __cudaCDP2EventRecordWithFlags_ptsz(event, stream, flags); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventDestroy(cudaEvent_t event) +{ + return __cudaCDP2EventDestroy(event); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaFuncGetAttributes(struct cudaFuncAttributes *attr, const void *func) +{ + return __cudaCDP2FuncGetAttributes(attr, func); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaFree(void *devPtr) +{ + return __cudaCDP2Free(devPtr); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMalloc(void **devPtr, size_t size) +{ + return __cudaCDP2Malloc(devPtr, size); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpyAsync(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream) +{ + return __cudaCDP2MemcpyAsync(dst, src, count, kind, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpyAsync_ptsz(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream) +{ + return __cudaCDP2MemcpyAsync_ptsz(dst, src, count, kind, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy2DAsync(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream) +{ + return __cudaCDP2Memcpy2DAsync(dst, dpitch, src, spitch, width, height, kind, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy2DAsync_ptsz(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream) +{ + return __cudaCDP2Memcpy2DAsync_ptsz(dst, dpitch, src, spitch, width, height, kind, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy3DAsync(const struct cudaMemcpy3DParms *p, cudaStream_t stream) +{ + return __cudaCDP2Memcpy3DAsync(p, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy3DAsync_ptsz(const struct cudaMemcpy3DParms *p, cudaStream_t stream) +{ + return __cudaCDP2Memcpy3DAsync_ptsz(p, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemsetAsync(void *devPtr, int value, size_t count, cudaStream_t stream) +{ + return __cudaCDP2MemsetAsync(devPtr, value, count, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemsetAsync_ptsz(void *devPtr, int value, size_t count, cudaStream_t stream) +{ + return __cudaCDP2MemsetAsync_ptsz(devPtr, value, count, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset2DAsync(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream) +{ + return __cudaCDP2Memset2DAsync(devPtr, pitch, value, width, height, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset2DAsync_ptsz(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream) +{ + return __cudaCDP2Memset2DAsync_ptsz(devPtr, pitch, value, width, height, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset3DAsync(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream) +{ + return __cudaCDP2Memset3DAsync(pitchedDevPtr, value, extent, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset3DAsync_ptsz(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream) +{ + return __cudaCDP2Memset3DAsync_ptsz(pitchedDevPtr, value, extent, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaRuntimeGetVersion(int *runtimeVersion) +{ + return __cudaCDP2RuntimeGetVersion(runtimeVersion); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize) +{ + return __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessor(numBlocks, func, blockSize, dynamicSmemSize); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize, unsigned int flags) +{ + return __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessorWithFlags(numBlocks, func, blockSize, dynamicSmemSize, flags); +} +#endif // __CUDA_INTERNAL_USE_CDP2 + +#endif // __CUDA_ARCH__ + + +/** + * \ingroup CUDART_EXECUTION + * \brief Obtains a parameter buffer + * + * Obtains a parameter buffer which can be filled with parameters for a kernel launch. + * Parameters passed to ::cudaLaunchDevice must be allocated via this function. + * + * This is a low level API and can only be accessed from Parallel Thread Execution (PTX). + * CUDA user code should use <<< >>> to launch kernels. + * + * \param alignment - Specifies alignment requirement of the parameter buffer + * \param size - Specifies size requirement in bytes + * + * \return + * Returns pointer to the allocated parameterBuffer + * \notefnerr + * + * \sa cudaLaunchDevice + */ +#ifdef __CUDA_INTERNAL_USE_CDP2 +static __inline__ __device__ __cudart_builtin__ void * CUDARTAPI cudaGetParameterBuffer(size_t alignment, size_t size) +{ + return __cudaCDP2GetParameterBuffer(alignment, size); +} +#else +extern __device__ __cudart_builtin__ void * CUDARTAPI cudaGetParameterBuffer(size_t alignment, size_t size); +#endif + + +/** + * \ingroup CUDART_EXECUTION + * \brief Launches a specified kernel + * + * Launches a specified kernel with the specified parameter buffer. A parameter buffer can be obtained + * by calling ::cudaGetParameterBuffer(). + * + * This is a low level API and can only be accessed from Parallel Thread Execution (PTX). + * CUDA user code should use <<< >>> to launch the kernels. + * + * \param func - Pointer to the kernel to be launched + * \param parameterBuffer - Holds the parameters to the launched kernel. parameterBuffer can be NULL. (Optional) + * \param gridDimension - Specifies grid dimensions + * \param blockDimension - Specifies block dimensions + * \param sharedMemSize - Specifies size of shared memory + * \param stream - Specifies the stream to be used + * + * \return + * ::cudaSuccess, ::cudaErrorInvalidDevice, ::cudaErrorLaunchMaxDepthExceeded, ::cudaErrorInvalidConfiguration, + * ::cudaErrorStartupFailure, ::cudaErrorLaunchPendingCountExceeded, ::cudaErrorLaunchOutOfResources + * \notefnerr + * \n Please refer to Execution Configuration and Parameter Buffer Layout from the CUDA Programming + * Guide for the detailed descriptions of launch configuration and parameter layout respectively. + * + * \sa cudaGetParameterBuffer + */ +#ifdef __CUDA_INTERNAL_USE_CDP2 +static __inline__ __device__ __cudart_builtin__ void * CUDARTAPI cudaGetParameterBufferV2(void *func, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize) +{ + return __cudaCDP2GetParameterBufferV2(func, gridDimension, blockDimension, sharedMemSize); +} +#else +extern __device__ __cudart_builtin__ void * CUDARTAPI cudaGetParameterBufferV2(void *func, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize); +#endif + + +#ifdef __CUDA_INTERNAL_USE_CDP2 +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDevice_ptsz(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream) +{ + return __cudaCDP2LaunchDevice_ptsz(func, parameterBuffer, gridDimension, blockDimension, sharedMemSize, stream); +} + +static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDeviceV2_ptsz(void *parameterBuffer, cudaStream_t stream) +{ + return __cudaCDP2LaunchDeviceV2_ptsz(parameterBuffer, stream); +} +#else +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDevice_ptsz(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDeviceV2_ptsz(void *parameterBuffer, cudaStream_t stream); +#endif + + +#if defined(CUDA_API_PER_THREAD_DEFAULT_STREAM) && defined(__CUDA_ARCH__) + // When compiling for the device and per thread default stream is enabled, add + // a static inline redirect to the per thread stream entry points. + + static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI + cudaLaunchDevice(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream) + { +#ifdef __CUDA_INTERNAL_USE_CDP2 + return __cudaCDP2LaunchDevice_ptsz(func, parameterBuffer, gridDimension, blockDimension, sharedMemSize, stream); +#else + return cudaLaunchDevice_ptsz(func, parameterBuffer, gridDimension, blockDimension, sharedMemSize, stream); +#endif + } + + static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI + cudaLaunchDeviceV2(void *parameterBuffer, cudaStream_t stream) + { +#ifdef __CUDA_INTERNAL_USE_CDP2 + return __cudaCDP2LaunchDeviceV2_ptsz(parameterBuffer, stream); +#else + return cudaLaunchDeviceV2_ptsz(parameterBuffer, stream); +#endif + } +#else // defined(CUDA_API_PER_THREAD_DEFAULT_STREAM) && defined(__CUDA_ARCH__) +#ifdef __CUDA_INTERNAL_USE_CDP2 + static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDevice(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream) + { + return __cudaCDP2LaunchDevice(func, parameterBuffer, gridDimension, blockDimension, sharedMemSize, stream); + } + + static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDeviceV2(void *parameterBuffer, cudaStream_t stream) + { + return __cudaCDP2LaunchDeviceV2(parameterBuffer, stream); + } +#else // __CUDA_INTERNAL_USE_CDP2 +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDevice(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream); +extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDeviceV2(void *parameterBuffer, cudaStream_t stream); +#endif // __CUDA_INTERNAL_USE_CDP2 +#endif // defined(CUDA_API_PER_THREAD_DEFAULT_STREAM) && defined(__CUDA_ARCH__) + + +// These symbols should not be used outside of this header file. +#define __cudaCDP2DeviceGetAttribute +#define __cudaCDP2DeviceGetLimit +#define __cudaCDP2DeviceGetCacheConfig +#define __cudaCDP2DeviceGetSharedMemConfig +#define __cudaCDP2GetLastError +#define __cudaCDP2PeekAtLastError +#define __cudaCDP2GetErrorString +#define __cudaCDP2GetErrorName +#define __cudaCDP2GetDeviceCount +#define __cudaCDP2GetDevice +#define __cudaCDP2StreamCreateWithFlags +#define __cudaCDP2StreamDestroy +#define __cudaCDP2StreamWaitEvent +#define __cudaCDP2StreamWaitEvent_ptsz +#define __cudaCDP2EventCreateWithFlags +#define __cudaCDP2EventRecord +#define __cudaCDP2EventRecord_ptsz +#define __cudaCDP2EventRecordWithFlags +#define __cudaCDP2EventRecordWithFlags_ptsz +#define __cudaCDP2EventDestroy +#define __cudaCDP2FuncGetAttributes +#define __cudaCDP2Free +#define __cudaCDP2Malloc +#define __cudaCDP2MemcpyAsync +#define __cudaCDP2MemcpyAsync_ptsz +#define __cudaCDP2Memcpy2DAsync +#define __cudaCDP2Memcpy2DAsync_ptsz +#define __cudaCDP2Memcpy3DAsync +#define __cudaCDP2Memcpy3DAsync_ptsz +#define __cudaCDP2MemsetAsync +#define __cudaCDP2MemsetAsync_ptsz +#define __cudaCDP2Memset2DAsync +#define __cudaCDP2Memset2DAsync_ptsz +#define __cudaCDP2Memset3DAsync +#define __cudaCDP2Memset3DAsync_ptsz +#define __cudaCDP2RuntimeGetVersion +#define __cudaCDP2GetParameterBuffer +#define __cudaCDP2GetParameterBufferV2 +#define __cudaCDP2LaunchDevice_ptsz +#define __cudaCDP2LaunchDeviceV2_ptsz +#define __cudaCDP2LaunchDevice +#define __cudaCDP2LaunchDeviceV2 +#define __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessor +#define __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessorWithFlags + +} + +template static __inline__ __device__ __cudart_builtin__ cudaError_t cudaMalloc(T **devPtr, size_t size); +template static __inline__ __device__ __cudart_builtin__ cudaError_t cudaFuncGetAttributes(struct cudaFuncAttributes *attr, T *entry); +template static __inline__ __device__ __cudart_builtin__ cudaError_t cudaOccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, T func, int blockSize, size_t dynamicSmemSize); +template static __inline__ __device__ __cudart_builtin__ cudaError_t cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, T func, int blockSize, size_t dynamicSmemSize, unsigned int flags); + + +#endif // !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 350) +#endif /* defined(__cplusplus) && defined(__CUDACC__) */ + +#undef __DEPRECATED__ +#undef __CDPRT_DEPRECATED +#undef __CUDA_INTERNAL_USE_CDP2 + +#endif /* !__CUDA_DEVICE_RUNTIME_API_H__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_egl_interop.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_egl_interop.h new file mode 100644 index 0000000000000000000000000000000000000000..40ab01b33e0e9bec536192676c2a804809276fc4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_egl_interop.h @@ -0,0 +1,642 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_EGL_INTEROP_H__) +#define __CUDA_EGL_INTEROP_H__ + +#include "cuda_runtime_api.h" +#include "cuda_runtime.h" +#include "cudart_platform.h" +#include "EGL/egl.h" +#include "EGL/eglext.h" + +#if defined(__cplusplus) +extern "C" { +#endif /* __cplusplus */ + +/** + * \addtogroup CUDART_TYPES + * @{ + */ + + /** + * Maximum number of planes per frame + */ +#define CUDA_EGL_MAX_PLANES 3 + +/** + * CUDA EglFrame type - array or pointer + */ +typedef enum cudaEglFrameType_enum +{ + cudaEglFrameTypeArray = 0, /**< Frame type CUDA array */ + cudaEglFrameTypePitch = 1, /**< Frame type CUDA pointer */ +} cudaEglFrameType; + +/** + * Resource location flags- sysmem or vidmem + * + * For CUDA context on iGPU, since video and system memory are equivalent - + * these flags will not have an effect on the execution. + * + * For CUDA context on dGPU, applications can use the flag ::cudaEglResourceLocationFlags + * to give a hint about the desired location. + * + * ::cudaEglResourceLocationSysmem - the frame data is made resident on the system memory + * to be accessed by CUDA. + * + * ::cudaEglResourceLocationVidmem - the frame data is made resident on the dedicated + * video memory to be accessed by CUDA. + * + * There may be an additional latency due to new allocation and data migration, + * if the frame is produced on a different memory. + */ +typedef enum cudaEglResourceLocationFlags_enum { + cudaEglResourceLocationSysmem = 0x00, /**< Resource location sysmem */ + cudaEglResourceLocationVidmem = 0x01, /**< Resource location vidmem */ +} cudaEglResourceLocationFlags; + +/** + * CUDA EGL Color Format - The different planar and multiplanar formats currently supported for CUDA_EGL interops. + */ +typedef enum cudaEglColorFormat_enum { + cudaEglColorFormatYUV420Planar = 0, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatYUV420SemiPlanar = 1, /**< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV420Planar. */ + cudaEglColorFormatYUV422Planar = 2, /**< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. */ + cudaEglColorFormatYUV422SemiPlanar = 3, /**< Y, UV in two surfaces with VU byte ordering, width, height ratio same as YUV422Planar. */ + cudaEglColorFormatARGB = 6, /**< R/G/B/A four channels in one surface with BGRA byte ordering. */ + cudaEglColorFormatRGBA = 7, /**< R/G/B/A four channels in one surface with ABGR byte ordering. */ + cudaEglColorFormatL = 8, /**< single luminance channel in one surface. */ + cudaEglColorFormatR = 9, /**< single color channel in one surface. */ + cudaEglColorFormatYUV444Planar = 10, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. */ + cudaEglColorFormatYUV444SemiPlanar = 11, /**< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV444Planar. */ + cudaEglColorFormatYUYV422 = 12, /**< Y, U, V in one surface, interleaved as UYVY in one channel. */ + cudaEglColorFormatUYVY422 = 13, /**< Y, U, V in one surface, interleaved as YUYV in one channel. */ + cudaEglColorFormatABGR = 14, /**< R/G/B/A four channels in one surface with RGBA byte ordering. */ + cudaEglColorFormatBGRA = 15, /**< R/G/B/A four channels in one surface with ARGB byte ordering. */ + cudaEglColorFormatA = 16, /**< Alpha color format - one channel in one surface. */ + cudaEglColorFormatRG = 17, /**< R/G color format - two channels in one surface with GR byte ordering */ + cudaEglColorFormatAYUV = 18, /**< Y, U, V, A four channels in one surface, interleaved as VUYA. */ + cudaEglColorFormatYVU444SemiPlanar = 19, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */ + cudaEglColorFormatYVU422SemiPlanar = 20, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */ + cudaEglColorFormatYVU420SemiPlanar = 21, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatY10V10U10_444SemiPlanar = 22, /**< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */ + cudaEglColorFormatY10V10U10_420SemiPlanar = 23, /**< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatY12V12U12_444SemiPlanar = 24, /**< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */ + cudaEglColorFormatY12V12U12_420SemiPlanar = 25, /**< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatVYUY_ER = 26, /**< Extended Range Y, U, V in one surface, interleaved as YVYU in one channel. */ + cudaEglColorFormatUYVY_ER = 27, /**< Extended Range Y, U, V in one surface, interleaved as YUYV in one channel. */ + cudaEglColorFormatYUYV_ER = 28, /**< Extended Range Y, U, V in one surface, interleaved as UYVY in one channel. */ + cudaEglColorFormatYVYU_ER = 29, /**< Extended Range Y, U, V in one surface, interleaved as VYUY in one channel. */ + cudaEglColorFormatYUVA_ER = 31, /**< Extended Range Y, U, V, A four channels in one surface, interleaved as AVUY. */ + cudaEglColorFormatAYUV_ER = 32, /**< Extended Range Y, U, V, A four channels in one surface, interleaved as VUYA. */ + cudaEglColorFormatYUV444Planar_ER = 33, /**< Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height. */ + cudaEglColorFormatYUV422Planar_ER = 34, /**< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. */ + cudaEglColorFormatYUV420Planar_ER = 35, /**< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatYUV444SemiPlanar_ER = 36, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = Y width, U/V height = Y height. */ + cudaEglColorFormatYUV422SemiPlanar_ER = 37, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */ + cudaEglColorFormatYUV420SemiPlanar_ER = 38, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatYVU444Planar_ER = 39, /**< Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height. */ + cudaEglColorFormatYVU422Planar_ER = 40, /**< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. */ + cudaEglColorFormatYVU420Planar_ER = 41, /**< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatYVU444SemiPlanar_ER = 42, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */ + cudaEglColorFormatYVU422SemiPlanar_ER = 43, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */ + cudaEglColorFormatYVU420SemiPlanar_ER = 44, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatBayerRGGB = 45, /**< Bayer format - one channel in one surface with interleaved RGGB ordering. */ + cudaEglColorFormatBayerBGGR = 46, /**< Bayer format - one channel in one surface with interleaved BGGR ordering. */ + cudaEglColorFormatBayerGRBG = 47, /**< Bayer format - one channel in one surface with interleaved GRBG ordering. */ + cudaEglColorFormatBayerGBRG = 48, /**< Bayer format - one channel in one surface with interleaved GBRG ordering. */ + cudaEglColorFormatBayer10RGGB = 49, /**< Bayer10 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 10 bits used 6 bits No-op. */ + cudaEglColorFormatBayer10BGGR = 50, /**< Bayer10 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 10 bits used 6 bits No-op. */ + cudaEglColorFormatBayer10GRBG = 51, /**< Bayer10 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 10 bits used 6 bits No-op. */ + cudaEglColorFormatBayer10GBRG = 52, /**< Bayer10 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 10 bits used 6 bits No-op. */ + cudaEglColorFormatBayer12RGGB = 53, /**< Bayer12 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + cudaEglColorFormatBayer12BGGR = 54, /**< Bayer12 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + cudaEglColorFormatBayer12GRBG = 55, /**< Bayer12 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + cudaEglColorFormatBayer12GBRG = 56, /**< Bayer12 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + cudaEglColorFormatBayer14RGGB = 57, /**< Bayer14 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 14 bits used 2 bits No-op. */ + cudaEglColorFormatBayer14BGGR = 58, /**< Bayer14 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 14 bits used 2 bits No-op. */ + cudaEglColorFormatBayer14GRBG = 59, /**< Bayer14 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 14 bits used 2 bits No-op. */ + cudaEglColorFormatBayer14GBRG = 60, /**< Bayer14 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 14 bits used 2 bits No-op. */ + cudaEglColorFormatBayer20RGGB = 61, /**< Bayer20 format - one channel in one surface with interleaved RGGB ordering. Out of 32 bits, 20 bits used 12 bits No-op. */ + cudaEglColorFormatBayer20BGGR = 62, /**< Bayer20 format - one channel in one surface with interleaved BGGR ordering. Out of 32 bits, 20 bits used 12 bits No-op. */ + cudaEglColorFormatBayer20GRBG = 63, /**< Bayer20 format - one channel in one surface with interleaved GRBG ordering. Out of 32 bits, 20 bits used 12 bits No-op. */ + cudaEglColorFormatBayer20GBRG = 64, /**< Bayer20 format - one channel in one surface with interleaved GBRG ordering. Out of 32 bits, 20 bits used 12 bits No-op. */ + cudaEglColorFormatYVU444Planar = 65, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. */ + cudaEglColorFormatYVU422Planar = 66, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. */ + cudaEglColorFormatYVU420Planar = 67, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatBayerIspRGGB = 68, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved RGGB ordering and mapped to opaque integer datatype. */ + cudaEglColorFormatBayerIspBGGR = 69, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved BGGR ordering and mapped to opaque integer datatype. */ + cudaEglColorFormatBayerIspGRBG = 70, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GRBG ordering and mapped to opaque integer datatype. */ + cudaEglColorFormatBayerIspGBRG = 71, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GBRG ordering and mapped to opaque integer datatype. */ + cudaEglColorFormatBayerBCCR = 72, /**< Bayer format - one channel in one surface with interleaved BCCR ordering. */ + cudaEglColorFormatBayerRCCB = 73, /**< Bayer format - one channel in one surface with interleaved RCCB ordering. */ + cudaEglColorFormatBayerCRBC = 74, /**< Bayer format - one channel in one surface with interleaved CRBC ordering. */ + cudaEglColorFormatBayerCBRC = 75, /**< Bayer format - one channel in one surface with interleaved CBRC ordering. */ + cudaEglColorFormatBayer10CCCC = 76, /**< Bayer10 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 10 bits used 6 bits No-op. */ + cudaEglColorFormatBayer12BCCR = 77, /**< Bayer12 format - one channel in one surface with interleaved BCCR ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + cudaEglColorFormatBayer12RCCB = 78, /**< Bayer12 format - one channel in one surface with interleaved RCCB ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + cudaEglColorFormatBayer12CRBC = 79, /**< Bayer12 format - one channel in one surface with interleaved CRBC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + cudaEglColorFormatBayer12CBRC = 80, /**< Bayer12 format - one channel in one surface with interleaved CBRC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + cudaEglColorFormatBayer12CCCC = 81, /**< Bayer12 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */ + cudaEglColorFormatY = 82, /**< Color format for single Y plane. */ + cudaEglColorFormatYUV420SemiPlanar_2020 = 83, /**< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatYVU420SemiPlanar_2020 = 84, /**< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatYUV420Planar_2020 = 85, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatYVU420Planar_2020 = 86, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatYUV420SemiPlanar_709 = 87, /**< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatYVU420SemiPlanar_709 = 88, /**< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatYUV420Planar_709 = 89, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatYVU420Planar_709 = 90, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatY10V10U10_420SemiPlanar_709 = 91, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatY10V10U10_420SemiPlanar_2020 = 92, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatY10V10U10_422SemiPlanar_2020 = 93, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */ + cudaEglColorFormatY10V10U10_422SemiPlanar = 94, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */ + cudaEglColorFormatY10V10U10_422SemiPlanar_709 = 95, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */ + cudaEglColorFormatY_ER = 96, /**< Extended Range Color format for single Y plane. */ + cudaEglColorFormatY_709_ER = 97, /**< Extended Range Color format for single Y plane. */ + cudaEglColorFormatY10_ER = 98, /**< Extended Range Color format for single Y10 plane. */ + cudaEglColorFormatY10_709_ER = 99, /**< Extended Range Color format for single Y10 plane. */ + cudaEglColorFormatY12_ER = 100, /**< Extended Range Color format for single Y12 plane. */ + cudaEglColorFormatY12_709_ER = 101, /**< Extended Range Color format for single Y12 plane. */ + cudaEglColorFormatYUVA = 102, /**< Y, U, V, A four channels in one surface, interleaved as AVUY. */ + cudaEglColorFormatYVYU = 104, /**< Y, U, V in one surface, interleaved as YVYU in one channel. */ + cudaEglColorFormatVYUY = 105, /**< Y, U, V in one surface, interleaved as VYUY in one channel. */ + cudaEglColorFormatY10V10U10_420SemiPlanar_ER = 106, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatY10V10U10_420SemiPlanar_709_ER = 107, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatY10V10U10_444SemiPlanar_ER = 108, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */ + cudaEglColorFormatY10V10U10_444SemiPlanar_709_ER = 109, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */ + cudaEglColorFormatY12V12U12_420SemiPlanar_ER = 110, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatY12V12U12_420SemiPlanar_709_ER = 111, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */ + cudaEglColorFormatY12V12U12_444SemiPlanar_ER = 112, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */ + cudaEglColorFormatY12V12U12_444SemiPlanar_709_ER = 113, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */ +} cudaEglColorFormat; + +/** + * CUDA EGL Plane Descriptor - structure defining each plane of a CUDA EGLFrame + */ +typedef struct cudaEglPlaneDesc_st { + unsigned int width; /**< Width of plane */ + unsigned int height; /**< Height of plane */ + unsigned int depth; /**< Depth of plane */ + unsigned int pitch; /**< Pitch of plane */ + unsigned int numChannels; /**< Number of channels for the plane */ + struct cudaChannelFormatDesc channelDesc; /**< Channel Format Descriptor */ + unsigned int reserved[4]; /**< Reserved for future use */ +} cudaEglPlaneDesc; + +/** + * CUDA EGLFrame Descriptor - structure defining one frame of EGL. + * + * Each frame may contain one or more planes depending on whether the surface is Multiplanar or not. + * Each plane of EGLFrame is represented by ::cudaEglPlaneDesc which is defined as: + * \code + * typedef struct cudaEglPlaneDesc_st { + * unsigned int width; + * unsigned int height; + * unsigned int depth; + * unsigned int pitch; + * unsigned int numChannels; + * struct cudaChannelFormatDesc channelDesc; + * unsigned int reserved[4]; + * } cudaEglPlaneDesc; + * \endcode + +*/ +typedef struct cudaEglFrame_st { + union { + cudaArray_t pArray[CUDA_EGL_MAX_PLANES]; /**< Array of CUDA arrays corresponding to each plane*/ + struct cudaPitchedPtr pPitch[CUDA_EGL_MAX_PLANES]; /**< Array of Pointers corresponding to each plane*/ + } frame; + cudaEglPlaneDesc planeDesc[CUDA_EGL_MAX_PLANES]; /**< CUDA EGL Plane Descriptor ::cudaEglPlaneDesc*/ + unsigned int planeCount; /**< Number of planes */ + cudaEglFrameType frameType; /**< Array or Pitch */ + cudaEglColorFormat eglColorFormat; /**< CUDA EGL Color Format*/ +} cudaEglFrame; + +/** + * CUDA EGLSream Connection + */ +typedef struct CUeglStreamConnection_st *cudaEglStreamConnection; + +/** @} */ /* END CUDART_TYPES */ + +/** + * \addtogroup CUDART_EGL EGL Interoperability + * This section describes the EGL interoperability functions of the CUDA + * runtime application programming interface. + * + * @{ + */ + +/** + * \brief Registers an EGL image + * + * Registers the EGLImageKHR specified by \p image for access by + * CUDA. A handle to the registered object is returned as \p pCudaResource. + * Additional Mapping/Unmapping is not required for the registered resource and + * ::cudaGraphicsResourceGetMappedEglFrame can be directly called on the \p pCudaResource. + * + * The application will be responsible for synchronizing access to shared objects. + * The application must ensure that any pending operation which access the objects have completed + * before passing control to CUDA. This may be accomplished by issuing and waiting for + * glFinish command on all GLcontexts (for OpenGL and likewise for other APIs). + * The application will be also responsible for ensuring that any pending operation on the + * registered CUDA resource has completed prior to executing subsequent commands in other APIs + * accesing the same memory objects. + * This can be accomplished by calling cuCtxSynchronize or cuEventSynchronize (preferably). + * + * The surface's intended usage is specified using \p flags, as follows: + * + * - ::cudaGraphicsRegisterFlagsNone: Specifies no hints about how this + * resource will be used. It is therefore assumed that this resource will be + * read from and written to by CUDA. This is the default value. + * - ::cudaGraphicsRegisterFlagsReadOnly: Specifies that CUDA + * will not write to this resource. + * - ::cudaGraphicsRegisterFlagsWriteDiscard: Specifies that + * CUDA will not read from this resource and will write over the + * entire contents of the resource, so none of the data previously + * stored in the resource will be preserved. + * + * The EGLImageKHR is an object which can be used to create EGLImage target resource. It is defined as a void pointer. + * typedef void* EGLImageKHR + * + * \param pCudaResource - Pointer to the returned object handle + * \param image - An EGLImageKHR image which can be used to create target resource. + * \param flags - Map flags + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown + * + * \sa + * ::cudaGraphicsUnregisterResource, + * ::cudaGraphicsResourceGetMappedEglFrame, + * ::cuGraphicsEGLRegisterImage + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphicsEGLRegisterImage(struct cudaGraphicsResource **pCudaResource, EGLImageKHR image, unsigned int flags); + +/** + * \brief Connect CUDA to EGLStream as a consumer. + * + * Connect CUDA as a consumer to EGLStreamKHR specified by \p eglStream. + * + * The EGLStreamKHR is an EGL object that transfers a sequence of image frames from one + * API to another. + * + * \param conn - Pointer to the returned connection handle + * \param eglStream - EGLStreamKHR handle + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown + * + * \sa + * ::cudaEGLStreamConsumerDisconnect, + * ::cudaEGLStreamConsumerAcquireFrame, + * ::cudaEGLStreamConsumerReleaseFrame, + * ::cuEGLStreamConsumerConnect + */ +extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerConnect(cudaEglStreamConnection *conn, EGLStreamKHR eglStream); + +/** + * \brief Connect CUDA to EGLStream as a consumer with given flags. + * + * Connect CUDA as a consumer to EGLStreamKHR specified by \p stream with specified \p flags defined by + * ::cudaEglResourceLocationFlags. + * + * The flags specify whether the consumer wants to access frames from system memory or video memory. + * Default is ::cudaEglResourceLocationVidmem. + * + * \param conn - Pointer to the returned connection handle + * \param eglStream - EGLStreamKHR handle + * \param flags - Flags denote intended location - system or video. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown + * + * \sa + * ::cudaEGLStreamConsumerDisconnect, + * ::cudaEGLStreamConsumerAcquireFrame, + * ::cudaEGLStreamConsumerReleaseFrame, + * ::cuEGLStreamConsumerConnectWithFlags + */ +extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerConnectWithFlags(cudaEglStreamConnection *conn, EGLStreamKHR eglStream, unsigned int flags); + +/** + * \brief Disconnect CUDA as a consumer to EGLStream . + * + * Disconnect CUDA as a consumer to EGLStreamKHR. + * + * \param conn - Conection to disconnect. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown + * + * \sa + * ::cudaEGLStreamConsumerConnect, + * ::cudaEGLStreamConsumerAcquireFrame, + * ::cudaEGLStreamConsumerReleaseFrame, + * ::cuEGLStreamConsumerDisconnect + */ +extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerDisconnect(cudaEglStreamConnection *conn); + +/** + * \brief Acquire an image frame from the EGLStream with CUDA as a consumer. + * + * Acquire an image frame from EGLStreamKHR. + * ::cudaGraphicsResourceGetMappedEglFrame can be called on \p pCudaResource to get + * ::cudaEglFrame. + * + * \param conn - Connection on which to acquire + * \param pCudaResource - CUDA resource on which the EGLStream frame will be mapped for use. + * \param pStream - CUDA stream for synchronization and any data migrations + * implied by ::cudaEglResourceLocationFlags. + * \param timeout - Desired timeout in usec. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * ::cudaErrorLaunchTimeout + * + * \sa + * ::cudaEGLStreamConsumerConnect, + * ::cudaEGLStreamConsumerDisconnect, + * ::cudaEGLStreamConsumerReleaseFrame, + * ::cuEGLStreamConsumerAcquireFrame + */ + +extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerAcquireFrame(cudaEglStreamConnection *conn, + cudaGraphicsResource_t *pCudaResource, cudaStream_t *pStream, unsigned int timeout); +/** + * \brief Releases the last frame acquired from the EGLStream. + * + * Release the acquired image frame specified by \p pCudaResource to EGLStreamKHR. + * + * \param conn - Connection on which to release + * \param pCudaResource - CUDA resource whose corresponding frame is to be released + * \param pStream - CUDA stream on which release will be done. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown + * + * \sa + * ::cudaEGLStreamConsumerConnect, + * ::cudaEGLStreamConsumerDisconnect, + * ::cudaEGLStreamConsumerAcquireFrame, + * ::cuEGLStreamConsumerReleaseFrame + */ +extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerReleaseFrame(cudaEglStreamConnection *conn, + cudaGraphicsResource_t pCudaResource, cudaStream_t *pStream); + +/** + * \brief Connect CUDA to EGLStream as a producer. + * + * Connect CUDA as a producer to EGLStreamKHR specified by \p stream. + * + * The EGLStreamKHR is an EGL object that transfers a sequence of image frames from one + * API to another. + * + * \param conn - Pointer to the returned connection handle + * \param eglStream - EGLStreamKHR handle + * \param width - width of the image to be submitted to the stream + * \param height - height of the image to be submitted to the stream + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown + * + * \sa + * ::cudaEGLStreamProducerDisconnect, + * ::cudaEGLStreamProducerPresentFrame, + * ::cudaEGLStreamProducerReturnFrame, + * ::cuEGLStreamProducerConnect + */ +extern __host__ cudaError_t CUDARTAPI cudaEGLStreamProducerConnect(cudaEglStreamConnection *conn, + EGLStreamKHR eglStream, EGLint width, EGLint height); + +/** + * \brief Disconnect CUDA as a producer to EGLStream . + * + * Disconnect CUDA as a producer to EGLStreamKHR. + * + * \param conn - Conection to disconnect. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown + * + * \sa + * ::cudaEGLStreamProducerConnect, + * ::cudaEGLStreamProducerPresentFrame, + * ::cudaEGLStreamProducerReturnFrame, + * ::cuEGLStreamProducerDisconnect + */ +extern __host__ cudaError_t CUDARTAPI cudaEGLStreamProducerDisconnect(cudaEglStreamConnection *conn); + +/** + * \brief Present a CUDA eglFrame to the EGLStream with CUDA as a producer. + * + * The ::cudaEglFrame is defined as: + * \code + * typedef struct cudaEglFrame_st { + * union { + * cudaArray_t pArray[CUDA_EGL_MAX_PLANES]; + * struct cudaPitchedPtr pPitch[CUDA_EGL_MAX_PLANES]; + * } frame; + * cudaEglPlaneDesc planeDesc[CUDA_EGL_MAX_PLANES]; + * unsigned int planeCount; + * cudaEglFrameType frameType; + * cudaEglColorFormat eglColorFormat; + * } cudaEglFrame; + * \endcode + * + * For ::cudaEglFrame of type ::cudaEglFrameTypePitch, the application may present sub-region of a memory + * allocation. In that case, ::cudaPitchedPtr::ptr will specify the start address of the sub-region in + * the allocation and ::cudaEglPlaneDesc will specify the dimensions of the sub-region. + * + * \param conn - Connection on which to present the CUDA array + * \param eglframe - CUDA Eglstream Proucer Frame handle to be sent to the consumer over EglStream. + * \param pStream - CUDA stream on which to present the frame. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown + * + * \sa + * ::cudaEGLStreamProducerConnect, + * ::cudaEGLStreamProducerDisconnect, + * ::cudaEGLStreamProducerReturnFrame, + * ::cuEGLStreamProducerPresentFrame + */ +extern __host__ cudaError_t CUDARTAPI cudaEGLStreamProducerPresentFrame(cudaEglStreamConnection *conn, + cudaEglFrame eglframe, cudaStream_t *pStream); + +/** + * \brief Return the CUDA eglFrame to the EGLStream last released by the consumer. + * + * This API can potentially return cudaErrorLaunchTimeout if the consumer has not + * returned a frame to EGL stream. If timeout is returned the application can retry. + * + * \param conn - Connection on which to present the CUDA array + * \param eglframe - CUDA Eglstream Proucer Frame handle returned from the consumer over EglStream. + * \param pStream - CUDA stream on which to return the frame. + * + * \return + * ::cudaSuccess, + * ::cudaErrorLaunchTimeout, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown + * + * \sa + * ::cudaEGLStreamProducerConnect, + * ::cudaEGLStreamProducerDisconnect, + * ::cudaEGLStreamProducerPresentFrame, + * ::cuEGLStreamProducerReturnFrame + */ +extern __host__ cudaError_t CUDARTAPI cudaEGLStreamProducerReturnFrame(cudaEglStreamConnection *conn, + cudaEglFrame *eglframe, cudaStream_t *pStream); + +/** + * \brief Get an eglFrame through which to access a registered EGL graphics resource. + * + * Returns in \p *eglFrame an eglFrame pointer through which the registered graphics resource + * \p resource may be accessed. + * This API can only be called for EGL graphics resources. + * + * The ::cudaEglFrame is defined as + * \code + * typedef struct cudaEglFrame_st { + * union { + * cudaArray_t pArray[CUDA_EGL_MAX_PLANES]; + * struct cudaPitchedPtr pPitch[CUDA_EGL_MAX_PLANES]; + * } frame; + * cudaEglPlaneDesc planeDesc[CUDA_EGL_MAX_PLANES]; + * unsigned int planeCount; + * cudaEglFrameType frameType; + * cudaEglColorFormat eglColorFormat; + * } cudaEglFrame; + * \endcode + * + * + * \param eglFrame - Returned eglFrame. + * \param resource - Registered resource to access. + * \param index - Index for cubemap surfaces. + * \param mipLevel - Mipmap level for the subresource to access. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown + * + * \note Note that in case of multiplanar \p *eglFrame, pitch of only first plane (unsigned int cudaEglPlaneDesc::pitch) is to be considered by the application. + * + * \sa + * ::cudaGraphicsSubResourceGetMappedArray, + * ::cudaGraphicsResourceGetMappedPointer, + * ::cuGraphicsResourceGetMappedEglFrame + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphicsResourceGetMappedEglFrame(cudaEglFrame* eglFrame, + cudaGraphicsResource_t resource, unsigned int index, unsigned int mipLevel); + +/** + * \brief Creates an event from EGLSync object + * + * Creates an event *phEvent from an EGLSyncKHR eglSync with the flages specified + * via \p flags. Valid flags include: + * - ::cudaEventDefault: Default event creation flag. + * - ::cudaEventBlockingSync: Specifies that the created event should use blocking + * synchronization. A CPU thread that uses ::cudaEventSynchronize() to wait on + * an event created with this flag will block until the event has actually + * been completed. + * + * ::cudaEventRecord and TimingData are not supported for events created from EGLSync. + * + * The EGLSyncKHR is an opaque handle to an EGL sync object. + * typedef void* EGLSyncKHR + * + * \param phEvent - Returns newly created event + * \param eglSync - Opaque handle to EGLSync object + * \param flags - Event creation flags + * + * \return + * ::cudaSuccess, + * ::cudaErrorInitializationError, + * ::cudaErrorInvalidValue, + * ::cudaErrorLaunchFailure, + * ::cudaErrorMemoryAllocation + * + * \sa + * ::cudaEventQuery, + * ::cudaEventSynchronize, + * ::cudaEventDestroy + */ +extern __host__ cudaError_t CUDARTAPI cudaEventCreateFromEGLSync(cudaEvent_t *phEvent, EGLSyncKHR eglSync, unsigned int flags); + +/** @} */ /* END CUDART_EGL */ + +#if defined(__cplusplus) +} +#endif /* __cplusplus */ + +#endif /* __CUDA_EGL_INTEROP_H__ */ + diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp16.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp16.h new file mode 100644 index 0000000000000000000000000000000000000000..01981ed8108a18ad872f65d8914cab7638af596f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp16.h @@ -0,0 +1,4023 @@ +/* +* Copyright 1993-2021 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO LICENSEE: +* +* This source code and/or documentation ("Licensed Deliverables") are +* subject to NVIDIA intellectual property rights under U.S. and +* international Copyright laws. +* +* These Licensed Deliverables contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and +* conditions of a form of NVIDIA software license agreement by and +* between NVIDIA and Licensee ("License Agreement") or electronically +* accepted by Licensee. Notwithstanding any terms or conditions to +* the contrary in the License Agreement, reproduction or disclosure +* of the Licensed Deliverables to any third party without the express +* written consent of NVIDIA is prohibited. +* +* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE +* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE +* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS +* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. +* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED +* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, +* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE +* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY +* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY +* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +* OF THESE LICENSED DELIVERABLES. +* +* U.S. Government End Users. These Licensed Deliverables are a +* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT +* 1995), consisting of "commercial computer software" and "commercial +* computer software documentation" as such terms are used in 48 +* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government +* only as a commercial end item. Consistent with 48 C.F.R.12.212 and +* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all +* U.S. Government End Users acquire the Licensed Deliverables with +* only those rights set forth herein. +* +* Any use of the Licensed Deliverables in individual and commercial +* software must include, in the user documentation and internal +* comments to the code, the above Disclaimer and U.S. Government End +* Users Notice. +*/ + +/** +* \defgroup CUDA_MATH_INTRINSIC_HALF Half Precision Intrinsics +* This section describes half precision intrinsic functions that are +* only supported in device code. +* To use these functions, include the header file \p cuda_fp16.h in your program. +* The following macros are available to help users selectively enable/disable +* various definitions present in the header file: +* - \p CUDA_NO_HALF - If defined, this macro will prevent the definition of +* additional type aliases in the global namespace, helping to avoid potential +* conflicts with symbols defined in the user program. +* - \p __CUDA_NO_HALF_CONVERSIONS__ - If defined, this macro will prevent the +* use of the C++ type conversions (converting constructors and conversion +* operators) that are common for built-in floating-point types, but may be +* undesirable for \p half which is essentially a user-defined type. +* - \p __CUDA_NO_HALF_OPERATORS__ and \p __CUDA_NO_HALF2_OPERATORS__ - If +* defined, these macros will prevent the inadvertent use of usual arithmetic +* and comparison operators. This enforces the storage-only type semantics and +* prevents C++ style computations on \p half and \p half2 types. +*/ + +/** +* \defgroup CUDA_MATH__HALF_ARITHMETIC Half Arithmetic Functions +* \ingroup CUDA_MATH_INTRINSIC_HALF +* To use these functions, include the header file \p cuda_fp16.h in your program. +*/ + +/** +* \defgroup CUDA_MATH__HALF2_ARITHMETIC Half2 Arithmetic Functions +* \ingroup CUDA_MATH_INTRINSIC_HALF +* To use these functions, include the header file \p cuda_fp16.h in your program. +*/ + +/** +* \defgroup CUDA_MATH__HALF_COMPARISON Half Comparison Functions +* \ingroup CUDA_MATH_INTRINSIC_HALF +* To use these functions, include the header file \p cuda_fp16.h in your program. +*/ + +/** +* \defgroup CUDA_MATH__HALF2_COMPARISON Half2 Comparison Functions +* \ingroup CUDA_MATH_INTRINSIC_HALF +* To use these functions, include the header file \p cuda_fp16.h in your program. +*/ + +/** +* \defgroup CUDA_MATH__HALF_MISC Half Precision Conversion and Data Movement +* \ingroup CUDA_MATH_INTRINSIC_HALF +* To use these functions, include the header file \p cuda_fp16.h in your program. +*/ + +/** +* \defgroup CUDA_MATH__HALF_FUNCTIONS Half Math Functions +* \ingroup CUDA_MATH_INTRINSIC_HALF +* To use these functions, include the header file \p cuda_fp16.h in your program. +*/ + +/** +* \defgroup CUDA_MATH__HALF2_FUNCTIONS Half2 Math Functions +* \ingroup CUDA_MATH_INTRINSIC_HALF +* To use these functions, include the header file \p cuda_fp16.h in your program. +*/ + +#ifndef __CUDA_FP16_H__ +#define __CUDA_FP16_H__ + +#define ___CUDA_FP16_STRINGIFY_INNERMOST(x) #x +#define __CUDA_FP16_STRINGIFY(x) ___CUDA_FP16_STRINGIFY_INNERMOST(x) + +#if defined(__cplusplus) +#if defined(__CUDACC__) +#define __CUDA_FP16_DECL__ static __device__ __inline__ +#define __CUDA_HOSTDEVICE_FP16_DECL__ static __host__ __device__ __inline__ +#else +#define __CUDA_HOSTDEVICE_FP16_DECL__ static +#endif /* defined(__CUDACC__) */ + +#define __CUDA_FP16_TYPES_EXIST__ + +/* Forward-declaration of structures defined in "cuda_fp16.hpp" */ + +/** + * \brief half datatype + * + * \details This structure implements the datatype for storing + * half-precision floating-point numbers. The structure implements + * assignment operators and type conversions. + * 16 bits are being used in total: 1 sign bit, 5 bits for the exponent, + * and the significand is being stored in 10 bits. + * The total precision is 11 bits. There are 15361 representable + * numbers within the interval [0.0, 1.0], endpoints included. + * On average we have log10(2**11) ~ 3.311 decimal digits. + * + * \internal + * \req IEEE 754-2008 compliant implementation of half-precision + * floating-point numbers. + * \endinternal + */ +struct __half; + +/** + * \brief half2 datatype + * + * \details This structure implements the datatype for storing two + * half-precision floating-point numbers. + * The structure implements assignment operators and type conversions. + * + * \internal + * \req Vectorified version of half. + * \endinternal + */ +struct __half2; + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts double number to half precision in round-to-nearest-even mode +* and returns \p half with converted value. +* +* \details Converts double number \p a to half precision in round-to-nearest-even mode. +* \param[in] a - double. Is only being read. +* \returns half +* - \p a converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __double2half(const double a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts float number to half precision in round-to-nearest-even mode +* and returns \p half with converted value. +* +* \details Converts float number \p a to half precision in round-to-nearest-even mode. +* \param[in] a - float. Is only being read. +* \returns half +* - \p a converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half(const float a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts float number to half precision in round-to-nearest-even mode +* and returns \p half with converted value. +* +* \details Converts float number \p a to half precision in round-to-nearest-even mode. +* \param[in] a - float. Is only being read. +* \returns half +* - \p a converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rn(const float a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts float number to half precision in round-towards-zero mode +* and returns \p half with converted value. +* +* \details Converts float number \p a to half precision in round-towards-zero mode. +* \param[in] a - float. Is only being read. +* \returns half +* - \p a converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rz(const float a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts float number to half precision in round-down mode +* and returns \p half with converted value. +* +* \details Converts float number \p a to half precision in round-down mode. +* \param[in] a - float. Is only being read. +* +* \returns half +* - \p a converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rd(const float a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts float number to half precision in round-up mode +* and returns \p half with converted value. +* +* \details Converts float number \p a to half precision in round-up mode. +* \param[in] a - float. Is only being read. +* +* \returns half +* - \p a converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_ru(const float a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts \p half number to float. +* +* \details Converts half number \p a to float. +* \param[in] a - float. Is only being read. +* +* \returns float +* - \p a converted to float. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ float __half2float(const __half a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts input to half precision in round-to-nearest-even mode and +* populates both halves of \p half2 with converted value. +* +* \details Converts input \p a to half precision in round-to-nearest-even mode and +* populates both halves of \p half2 with converted value. +* \param[in] a - float. Is only being read. +* +* \returns half2 +* - The \p half2 value with both halves equal to the converted half +* precision number. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __float2half2_rn(const float a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts both input floats to half precision in round-to-nearest-even +* mode and returns \p half2 with converted values. +* +* \details Converts both input floats to half precision in round-to-nearest-even mode +* and combines the results into one \p half2 number. Low 16 bits of the return +* value correspond to the input \p a, high 16 bits correspond to the input \p +* b. +* \param[in] a - float. Is only being read. +* \param[in] b - float. Is only being read. +* +* \returns half2 +* - The \p half2 value with corresponding halves equal to the +* converted input floats. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __floats2half2_rn(const float a, const float b); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts low 16 bits of \p half2 to float and returns the result +* +* \details Converts low 16 bits of \p half2 input \p a to 32-bit floating-point number +* and returns the result. +* \param[in] a - half2. Is only being read. +* +* \returns float +* - The low 16 bits of \p a converted to float. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ float __low2float(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts high 16 bits of \p half2 to float and returns the result +* +* \details Converts high 16 bits of \p half2 input \p a to 32-bit floating-point number +* and returns the result. +* \param[in] a - half2. Is only being read. +* +* \returns float +* - The high 16 bits of \p a converted to float. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ float __high2float(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed short integer in round-towards-zero mode. +* +* \details Convert the half-precision floating-point value \p h to a signed short +* integer in round-towards-zero mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns short int +* - \p h converted to a signed short integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ short int __half2short_rz(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned short integer in round-towards-zero +* mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned short +* integer in round-towards-zero mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns unsigned short int +* - \p h converted to an unsigned short integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ unsigned short int __half2ushort_rz(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed integer in round-towards-zero mode. +* +* \details Convert the half-precision floating-point value \p h to a signed integer in +* round-towards-zero mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns int +* - \p h converted to a signed integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ int __half2int_rz(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned integer in round-towards-zero mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned integer +* in round-towards-zero mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns unsigned int +* - \p h converted to an unsigned integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ unsigned int __half2uint_rz(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed 64-bit integer in round-towards-zero mode. +* +* \details Convert the half-precision floating-point value \p h to a signed 64-bit +* integer in round-towards-zero mode. NaN inputs return a long long int with hex value of 0x8000000000000000. +* \param[in] h - half. Is only being read. +* +* \returns long long int +* - \p h converted to a signed 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ long long int __half2ll_rz(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned 64-bit integer in round-towards-zero +* mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned 64-bit +* integer in round-towards-zero mode. NaN inputs return 0x8000000000000000. +* \param[in] h - half. Is only being read. +* +* \returns unsigned long long int +* - \p h converted to an unsigned 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ unsigned long long int __half2ull_rz(const __half h); + +#if defined(__CUDACC__) +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts both components of float2 number to half precision in +* round-to-nearest-even mode and returns \p half2 with converted values. +* +* \details Converts both components of float2 to half precision in round-to-nearest +* mode and combines the results into one \p half2 number. Low 16 bits of the +* return value correspond to \p a.x and high 16 bits of the return value +* correspond to \p a.y. +* \param[in] a - float2. Is only being read. +* +* \returns half2 +* - The \p half2 which has corresponding halves equal to the +* converted float2 components. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __float22half2_rn(const float2 a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts both halves of \p half2 to float2 and returns the result. +* +* \details Converts both halves of \p half2 input \p a to float2 and returns the +* result. +* \param[in] a - half2. Is only being read. +* +* \returns float2 +* - \p a converted to float2. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ float2 __half22float2(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed integer in round-to-nearest-even mode. +* +* \details Convert the half-precision floating-point value \p h to a signed integer in +* round-to-nearest-even mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns int +* - \p h converted to a signed integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ int __half2int_rn(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed integer in round-down mode. +* +* \details Convert the half-precision floating-point value \p h to a signed integer in +* round-down mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns int +* - \p h converted to a signed integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ int __half2int_rd(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed integer in round-up mode. +* +* \details Convert the half-precision floating-point value \p h to a signed integer in +* round-up mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns int +* - \p h converted to a signed integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ int __half2int_ru(const __half h); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed integer to a half in round-to-nearest-even mode. +* +* \details Convert the signed integer value \p i to a half-precision floating-point +* value in round-to-nearest-even mode. +* \param[in] i - int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __int2half_rn(const int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed integer to a half in round-towards-zero mode. +* +* \details Convert the signed integer value \p i to a half-precision floating-point +* value in round-towards-zero mode. +* \param[in] i - int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __int2half_rz(const int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed integer to a half in round-down mode. +* +* \details Convert the signed integer value \p i to a half-precision floating-point +* value in round-down mode. +* \param[in] i - int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __int2half_rd(const int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed integer to a half in round-up mode. +* +* \details Convert the signed integer value \p i to a half-precision floating-point +* value in round-up mode. +* \param[in] i - int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __int2half_ru(const int i); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed short integer in round-to-nearest-even +* mode. +* +* \details Convert the half-precision floating-point value \p h to a signed short +* integer in round-to-nearest-even mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns short int +* - \p h converted to a signed short integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ short int __half2short_rn(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed short integer in round-down mode. +* +* \details Convert the half-precision floating-point value \p h to a signed short +* integer in round-down mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns short int +* - \p h converted to a signed short integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ short int __half2short_rd(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed short integer in round-up mode. +* +* \details Convert the half-precision floating-point value \p h to a signed short +* integer in round-up mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns short int +* - \p h converted to a signed short integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ short int __half2short_ru(const __half h); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed short integer to a half in round-to-nearest-even +* mode. +* +* \details Convert the signed short integer value \p i to a half-precision floating-point +* value in round-to-nearest-even mode. +* \param[in] i - short int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __short2half_rn(const short int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed short integer to a half in round-towards-zero mode. +* +* \details Convert the signed short integer value \p i to a half-precision floating-point +* value in round-towards-zero mode. +* \param[in] i - short int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __short2half_rz(const short int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed short integer to a half in round-down mode. +* +* \details Convert the signed short integer value \p i to a half-precision floating-point +* value in round-down mode. +* \param[in] i - short int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __short2half_rd(const short int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed short integer to a half in round-up mode. +* +* \details Convert the signed short integer value \p i to a half-precision floating-point +* value in round-up mode. +* \param[in] i - short int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __short2half_ru(const short int i); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned integer in round-to-nearest-even mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned integer +* in round-to-nearest-even mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns unsigned int +* - \p h converted to an unsigned integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned int __half2uint_rn(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned integer in round-down mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned integer +* in round-down mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns unsigned int +* - \p h converted to an unsigned integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned int __half2uint_rd(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned integer in round-up mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned integer +* in round-up mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns unsigned int +* - \p h converted to an unsigned integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned int __half2uint_ru(const __half h); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned integer to a half in round-to-nearest-even mode. +* +* \details Convert the unsigned integer value \p i to a half-precision floating-point +* value in round-to-nearest-even mode. +* \param[in] i - unsigned int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __uint2half_rn(const unsigned int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned integer to a half in round-towards-zero mode. +* +* \details Convert the unsigned integer value \p i to a half-precision floating-point +* value in round-towards-zero mode. +* \param[in] i - unsigned int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __uint2half_rz(const unsigned int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned integer to a half in round-down mode. +* +* \details Convert the unsigned integer value \p i to a half-precision floating-point +* value in round-down mode. +* \param[in] i - unsigned int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __uint2half_rd(const unsigned int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned integer to a half in round-up mode. +* +* \details Convert the unsigned integer value \p i to a half-precision floating-point +* value in round-up mode. +* \param[in] i - unsigned int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __uint2half_ru(const unsigned int i); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned short integer in round-to-nearest-even +* mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned short +* integer in round-to-nearest-even mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns unsigned short int +* - \p h converted to an unsigned short integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned short int __half2ushort_rn(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned short integer in round-down mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned short +* integer in round-down mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns unsigned short int +* - \p h converted to an unsigned short integer. +*/ +__CUDA_FP16_DECL__ unsigned short int __half2ushort_rd(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned short integer in round-up mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned short +* integer in round-up mode. NaN inputs are converted to 0. +* \param[in] h - half. Is only being read. +* +* \returns unsigned short int +* - \p h converted to an unsigned short integer. +*/ +__CUDA_FP16_DECL__ unsigned short int __half2ushort_ru(const __half h); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned short integer to a half in round-to-nearest-even +* mode. +* +* \details Convert the unsigned short integer value \p i to a half-precision floating-point +* value in round-to-nearest-even mode. +* \param[in] i - unsigned short int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __ushort2half_rn(const unsigned short int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned short integer to a half in round-towards-zero +* mode. +* +* \details Convert the unsigned short integer value \p i to a half-precision floating-point +* value in round-towards-zero mode. +* \param[in] i - unsigned short int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ushort2half_rz(const unsigned short int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned short integer to a half in round-down mode. +* +* \details Convert the unsigned short integer value \p i to a half-precision floating-point +* value in round-down mode. +* \param[in] i - unsigned short int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ushort2half_rd(const unsigned short int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned short integer to a half in round-up mode. +* +* \details Convert the unsigned short integer value \p i to a half-precision floating-point +* value in round-up mode. +* \param[in] i - unsigned short int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ushort2half_ru(const unsigned short int i); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned 64-bit integer in round-to-nearest-even +* mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned 64-bit +* integer in round-to-nearest-even mode. NaN inputs return 0x8000000000000000. +* \param[in] h - half. Is only being read. +* +* \returns unsigned long long int +* - \p h converted to an unsigned 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned long long int __half2ull_rn(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned 64-bit integer in round-down mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned 64-bit +* integer in round-down mode. NaN inputs return 0x8000000000000000. +* \param[in] h - half. Is only being read. +* +* \returns unsigned long long int +* - \p h converted to an unsigned 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned long long int __half2ull_rd(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned 64-bit integer in round-up mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned 64-bit +* integer in round-up mode. NaN inputs return 0x8000000000000000. +* \param[in] h - half. Is only being read. +* +* \returns unsigned long long int +* - \p h converted to an unsigned 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned long long int __half2ull_ru(const __half h); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned 64-bit integer to a half in round-to-nearest-even +* mode. +* +* \details Convert the unsigned 64-bit integer value \p i to a half-precision floating-point +* value in round-to-nearest-even mode. +* \param[in] i - unsigned long long int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __ull2half_rn(const unsigned long long int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned 64-bit integer to a half in round-towards-zero +* mode. +* +* \details Convert the unsigned 64-bit integer value \p i to a half-precision floating-point +* value in round-towards-zero mode. +* \param[in] i - unsigned long long int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ull2half_rz(const unsigned long long int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned 64-bit integer to a half in round-down mode. +* +* \details Convert the unsigned 64-bit integer value \p i to a half-precision floating-point +* value in round-down mode. +* \param[in] i - unsigned long long int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ull2half_rd(const unsigned long long int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned 64-bit integer to a half in round-up mode. +* +* \details Convert the unsigned 64-bit integer value \p i to a half-precision floating-point +* value in round-up mode. +* \param[in] i - unsigned long long int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ull2half_ru(const unsigned long long int i); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed 64-bit integer in round-to-nearest-even +* mode. +* +* \details Convert the half-precision floating-point value \p h to a signed 64-bit +* integer in round-to-nearest-even mode. NaN inputs return a long long int with hex value of 0x8000000000000000. +* \param[in] h - half. Is only being read. +* +* \returns long long int +* - \p h converted to a signed 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ long long int __half2ll_rn(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed 64-bit integer in round-down mode. +* +* \details Convert the half-precision floating-point value \p h to a signed 64-bit +* integer in round-down mode. NaN inputs return a long long int with hex value of 0x8000000000000000. +* \param[in] h - half. Is only being read. +* +* \returns long long int +* - \p h converted to a signed 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ long long int __half2ll_rd(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed 64-bit integer in round-up mode. +* +* \details Convert the half-precision floating-point value \p h to a signed 64-bit +* integer in round-up mode. NaN inputs return a long long int with hex value of 0x8000000000000000. +* \param[in] h - half. Is only being read. +* +* \returns long long int +* - \p h converted to a signed 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ long long int __half2ll_ru(const __half h); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed 64-bit integer to a half in round-to-nearest-even +* mode. +* +* \details Convert the signed 64-bit integer value \p i to a half-precision floating-point +* value in round-to-nearest-even mode. +* \param[in] i - long long int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __ll2half_rn(const long long int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed 64-bit integer to a half in round-towards-zero mode. +* +* \details Convert the signed 64-bit integer value \p i to a half-precision floating-point +* value in round-towards-zero mode. +* \param[in] i - long long int. Is only being read. +* +* \returns half +* - \p i converted to half. +*/ +__CUDA_FP16_DECL__ __half __ll2half_rz(const long long int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed 64-bit integer to a half in round-down mode. +* +* \details Convert the signed 64-bit integer value \p i to a half-precision floating-point +* value in round-down mode. +* \param[in] i - long long int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ll2half_rd(const long long int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed 64-bit integer to a half in round-up mode. +* +* \details Convert the signed 64-bit integer value \p i to a half-precision floating-point +* value in round-up mode. +* \param[in] i - long long int. Is only being read. +* +* \returns half +* - \p i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ll2half_ru(const long long int i); + +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Truncate input argument to the integral part. +* +* \details Round \p h to the nearest integer value that does not exceed \p h in +* magnitude. +* \param[in] h - half. Is only being read. +* +* \returns half +* - The truncated integer value. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half htrunc(const __half h); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculate ceiling of the input argument. +* +* \details Compute the smallest integer value not less than \p h. +* \param[in] h - half. Is only being read. +* +* \returns half +* - The smallest integer value not less than \p h. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hceil(const __half h); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculate the largest integer less than or equal to \p h. +* +* \details Calculate the largest integer value which is less than or equal to \p h. +* \param[in] h - half. Is only being read. +* +* \returns half +* - The largest integer value which is less than or equal to \p h. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hfloor(const __half h); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Round input to nearest integer value in half-precision floating-point +* number. +* +* \details Round \p h to the nearest integer value in half-precision floating-point +* format, with halfway cases rounded to the nearest even integer value. +* \param[in] h - half. Is only being read. +* +* \returns half +* - The nearest integer to \p h. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hrint(const __half h); + +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Truncate \p half2 vector input argument to the integral part. +* +* \details Round each component of vector \p h to the nearest integer value that does +* not exceed \p h in magnitude. +* \param[in] h - half2. Is only being read. +* +* \returns half2 +* - The truncated \p h. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2trunc(const __half2 h); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculate \p half2 vector ceiling of the input argument. +* +* \details For each component of vector \p h compute the smallest integer value not less +* than \p h. +* \param[in] h - half2. Is only being read. +* +* \returns half2 +* - The vector of smallest integers not less than \p h. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2ceil(const __half2 h); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculate the largest integer less than or equal to \p h. +* +* \details For each component of vector \p h calculate the largest integer value which +* is less than or equal to \p h. +* \param[in] h - half2. Is only being read. +* +* \returns half2 +* - The vector of largest integers which is less than or equal to \p h. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2floor(const __half2 h); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Round input to nearest integer value in half-precision floating-point +* number. +* +* \details Round each component of \p half2 vector \p h to the nearest integer value in +* half-precision floating-point format, with halfway cases rounded to the +* nearest even integer value. +* \param[in] h - half2. Is only being read. +* +* \returns half2 +* - The vector of rounded integer values. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2rint(const __half2 h); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Returns \p half2 with both halves equal to the input value. +* +* \details Returns \p half2 number with both halves equal to the input \p a \p half +* number. +* \param[in] a - half. Is only being read. +* +* \returns half2 +* - The vector which has both its halves equal to the input \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __half2half2(const __half a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Swaps both halves of the \p half2 input. +* +* \details Swaps both halves of the \p half2 input and returns a new \p half2 number +* with swapped halves. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - \p a with its halves being swapped. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __lowhigh2highlow(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Extracts low 16 bits from each of the two \p half2 inputs and combines +* into one \p half2 number. +* +* \details Extracts low 16 bits from each of the two \p half2 inputs and combines into +* one \p half2 number. Low 16 bits from input \p a is stored in low 16 bits of +* the return value, low 16 bits from input \p b is stored in high 16 bits of +* the return value. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The low 16 bits of \p a and of \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __lows2half2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Extracts high 16 bits from each of the two \p half2 inputs and +* combines into one \p half2 number. +* +* \details Extracts high 16 bits from each of the two \p half2 inputs and combines into +* one \p half2 number. High 16 bits from input \p a is stored in low 16 bits of +* the return value, high 16 bits from input \p b is stored in high 16 bits of +* the return value. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The high 16 bits of \p a and of \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __highs2half2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Returns high 16 bits of \p half2 input. +* +* \details Returns high 16 bits of \p half2 input \p a. +* \param[in] a - half2. Is only being read. +* +* \returns half +* - The high 16 bits of the input. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __high2half(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Returns low 16 bits of \p half2 input. +* +* \details Returns low 16 bits of \p half2 input \p a. +* \param[in] a - half2. Is only being read. +* +* \returns half +* - Returns \p half which contains low 16 bits of the input \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __low2half(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Checks if the input \p half number is infinite. +* +* \details Checks if the input \p half number \p a is infinite. +* \param[in] a - half. Is only being read. +* +* \returns int +* - -1 iff \p a is equal to negative infinity, +* - 1 iff \p a is equal to positive infinity, +* - 0 otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ int __hisinf(const __half a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Combines two \p half numbers into one \p half2 number. +* +* \details Combines two input \p half number \p a and \p b into one \p half2 number. +* Input \p a is stored in low 16 bits of the return value, input \p b is stored +* in high 16 bits of the return value. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half2 +* - The half2 with one half equal to \p a and the other to \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __halves2half2(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Extracts low 16 bits from \p half2 input. +* +* \details Extracts low 16 bits from \p half2 input \p a and returns a new \p half2 +* number which has both halves equal to the extracted bits. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The half2 with both halves equal to the low 16 bits of the input. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __low2half2(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Extracts high 16 bits from \p half2 input. +* +* \details Extracts high 16 bits from \p half2 input \p a and returns a new \p half2 +* number which has both halves equal to the extracted bits. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The half2 with both halves equal to the high 16 bits of the input. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __high2half2(const __half2 a); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Reinterprets bits in a \p half as a signed short integer. +* +* \details Reinterprets the bits in the half-precision floating-point number \p h +* as a signed short integer. +* \param[in] h - half. Is only being read. +* +* \returns short int +* - The reinterpreted value. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ short int __half_as_short(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Reinterprets bits in a \p half as an unsigned short integer. +* +* \details Reinterprets the bits in the half-precision floating-point \p h +* as an unsigned short number. +* \param[in] h - half. Is only being read. +* +* \returns unsigned short int +* - The reinterpreted value. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned short int __half_as_ushort(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Reinterprets bits in a signed short integer as a \p half. +* +* \details Reinterprets the bits in the signed short integer \p i as a +* half-precision floating-point number. +* \param[in] i - short int. Is only being read. +* +* \returns half +* - The reinterpreted value. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __short_as_half(const short int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Reinterprets bits in an unsigned short integer as a \p half. +* +* \details Reinterprets the bits in the unsigned short integer \p i as a +* half-precision floating-point number. +* \param[in] i - unsigned short int. Is only being read. +* +* \returns half +* - The reinterpreted value. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ushort_as_half(const unsigned short int i); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Calculates \p half maximum of two input values. +* +* \details Calculates \p half max(\p a, \p b) +* defined as (\p a > \p b) ? \p a : \p b. +* - If either of inputs is NaN, the other input is returned. +* - If both inputs are NaNs, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hmax(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Calculates \p half minimum of two input values. +* +* \details Calculates \p half min(\p a, \p b) +* defined as (\p a < \p b) ? \p a : \p b. +* - If either of inputs is NaN, the other input is returned. +* - If both inputs are NaNs, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hmin(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Calculates \p half2 vector maximum of two inputs. +* +* \details Calculates \p half2 vector max(\p a, \p b). +* Elementwise \p half operation is defined as +* (\p a > \p b) ? \p a : \p b. +* - If either of inputs is NaN, the other input is returned. +* - If both inputs are NaNs, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The result of elementwise maximum of vectors \p a and \p b +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hmax2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Calculates \p half2 vector minimum of two inputs. +* +* \details Calculates \p half2 vector min(\p a, \p b). +* Elementwise \p half operation is defined as +* (\p a < \p b) ? \p a : \p b. +* - If either of inputs is NaN, the other input is returned. +* - If both inputs are NaNs, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The result of elementwise minimum of vectors \p a and \p b +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hmin2(const __half2 a, const __half2 b); + +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 300) +#if !defined warpSize && !defined __local_warpSize +#define warpSize 32 +#define __local_warpSize +#endif + +#if defined(_WIN32) +# define __DEPRECATED__(msg) __declspec(deprecated(msg)) +#elif (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 5 && !defined(__clang__)))) +# define __DEPRECATED__(msg) __attribute__((deprecated)) +#else +# define __DEPRECATED__(msg) __attribute__((deprecated(msg))) +#endif + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 +#define __WSB_DEPRECATION_MESSAGE(x) __CUDA_FP16_STRINGIFY(x) "() is deprecated in favor of " __CUDA_FP16_STRINGIFY(x) "_sync() and may be removed in a future release (Use -Wno-deprecated-declarations to suppress this warning)." + +__CUDA_FP16_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) __half2 __shfl(const __half2 var, const int delta, const int width = warpSize); +__CUDA_FP16_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) __half2 __shfl_up(const __half2 var, const unsigned int delta, const int width = warpSize); +__CUDA_FP16_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down))__half2 __shfl_down(const __half2 var, const unsigned int delta, const int width = warpSize); +__CUDA_FP16_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) __half2 __shfl_xor(const __half2 var, const int delta, const int width = warpSize); +__CUDA_FP16_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) __half __shfl(const __half var, const int delta, const int width = warpSize); +__CUDA_FP16_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) __half __shfl_up(const __half var, const unsigned int delta, const int width = warpSize); +__CUDA_FP16_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) __half __shfl_down(const __half var, const unsigned int delta, const int width = warpSize); +__CUDA_FP16_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) __half __shfl_xor(const __half var, const int delta, const int width = warpSize); +#endif + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Exchange a variable between threads within a warp. Direct copy from indexed thread. +* +* \details Returns the value of var held by the thread whose ID is given by delta. +* If width is less than warpSize then each subsection of the warp behaves as a separate +* entity with a starting logical thread ID of 0. If delta is outside the range [0:width-1], +* the value returned corresponds to the value of var held by the delta modulo width (i.e. +* within the same subsection). width must have a value which is a power of 2; +* results are undefined if width is not a power of 2, or is a number greater than +* warpSize. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - half2. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 4-byte word referenced by var from the source thread ID as half2. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __shfl_sync(const unsigned mask, const __half2 var, const int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Exchange a variable between threads within a warp. Copy from a thread with lower ID relative to the caller. +* +* \details Calculates a source thread ID by subtracting delta from the caller's lane ID. +* The value of var held by the resulting lane ID is returned: in effect, var is shifted up +* the warp by delta threads. If width is less than warpSize then each subsection of the warp +* behaves as a separate entity with a starting logical thread ID of 0. The source thread index +* will not wrap around the value of width, so effectively the lower delta threads will be unchanged. +* width must have a value which is a power of 2; results are undefined if width is not a power of 2, +* or is a number greater than warpSize. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - half2. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 4-byte word referenced by var from the source thread ID as half2. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \note_ref_guide_warp_shuffle +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __shfl_up_sync(const unsigned mask, const __half2 var, const unsigned int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Exchange a variable between threads within a warp. Copy from a thread with higher ID relative to the caller. +* +* \details Calculates a source thread ID by adding delta to the caller's thread ID. +* The value of var held by the resulting thread ID is returned: this has the effect +* of shifting var down the warp by delta threads. If width is less than warpSize then +* each subsection of the warp behaves as a separate entity with a starting logical +* thread ID of 0. As for __shfl_up_sync(), the ID number of the source thread +* will not wrap around the value of width and so the upper delta threads +* will remain unchanged. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - half2. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 4-byte word referenced by var from the source thread ID as half2. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \note_ref_guide_warp_shuffle +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __shfl_down_sync(const unsigned mask, const __half2 var, const unsigned int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Exchange a variable between threads within a warp. Copy from a thread based on bitwise XOR of own thread ID. +* +* \details Calculates a source thread ID by performing a bitwise XOR of the caller's thread ID with mask: +* the value of var held by the resulting thread ID is returned. If width is less than warpSize then each +* group of width consecutive threads are able to access elements from earlier groups of threads, +* however if they attempt to access elements from later groups of threads their own value of var +* will be returned. This mode implements a butterfly addressing pattern such as is used in tree +* reduction and broadcast. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - half2. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 4-byte word referenced by var from the source thread ID as half2. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \note_ref_guide_warp_shuffle +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __shfl_xor_sync(const unsigned mask, const __half2 var, const int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Exchange a variable between threads within a warp. Direct copy from indexed thread. +* +* \details Returns the value of var held by the thread whose ID is given by delta. +* If width is less than warpSize then each subsection of the warp behaves as a separate +* entity with a starting logical thread ID of 0. If delta is outside the range [0:width-1], +* the value returned corresponds to the value of var held by the delta modulo width (i.e. +* within the same subsection). width must have a value which is a power of 2; +* results are undefined if width is not a power of 2, or is a number greater than +* warpSize. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - half. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 2-byte word referenced by var from the source thread ID as half. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \note_ref_guide_warp_shuffle +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __shfl_sync(const unsigned mask, const __half var, const int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Exchange a variable between threads within a warp. Copy from a thread with lower ID relative to the caller. +* \details Calculates a source thread ID by subtracting delta from the caller's lane ID. +* The value of var held by the resulting lane ID is returned: in effect, var is shifted up +* the warp by delta threads. If width is less than warpSize then each subsection of the warp +* behaves as a separate entity with a starting logical thread ID of 0. The source thread index +* will not wrap around the value of width, so effectively the lower delta threads will be unchanged. +* width must have a value which is a power of 2; results are undefined if width is not a power of 2, +* or is a number greater than warpSize. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - half. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 2-byte word referenced by var from the source thread ID as half. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \note_ref_guide_warp_shuffle +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __shfl_up_sync(const unsigned mask, const __half var, const unsigned int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Exchange a variable between threads within a warp. Copy from a thread with higher ID relative to the caller. +* +* \details Calculates a source thread ID by adding delta to the caller's thread ID. +* The value of var held by the resulting thread ID is returned: this has the effect +* of shifting var down the warp by delta threads. If width is less than warpSize then +* each subsection of the warp behaves as a separate entity with a starting logical +* thread ID of 0. As for __shfl_up_sync(), the ID number of the source thread +* will not wrap around the value of width and so the upper delta threads +* will remain unchanged. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - half. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 2-byte word referenced by var from the source thread ID as half. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \note_ref_guide_warp_shuffle +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __shfl_down_sync(const unsigned mask, const __half var, const unsigned int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Exchange a variable between threads within a warp. Copy from a thread based on bitwise XOR of own thread ID. +* +* \details Calculates a source thread ID by performing a bitwise XOR of the caller's thread ID with mask: +* the value of var held by the resulting thread ID is returned. If width is less than warpSize then each +* group of width consecutive threads are able to access elements from earlier groups of threads, +* however if they attempt to access elements from later groups of threads their own value of var +* will be returned. This mode implements a butterfly addressing pattern such as is used in tree +* reduction and broadcast. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - half. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 2-byte word referenced by var from the source thread ID as half. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \note_ref_guide_warp_shuffle +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __shfl_xor_sync(const unsigned mask, const __half var, const int delta, const int width = warpSize); + +#if defined(__local_warpSize) +#undef warpSize +#undef __local_warpSize +#endif +#endif /*!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 300) */ + +#if defined(__cplusplus) && ( !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 320) ) +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.nc` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half2 __ldg(const __half2 *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.nc` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half __ldg(const __half *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.cg` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half2 __ldcg(const __half2 *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.cg` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half __ldcg(const __half *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.ca` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half2 __ldca(const __half2 *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.ca` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half __ldca(const __half *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.cs` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half2 __ldcs(const __half2 *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.cs` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half __ldcs(const __half *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.lu` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half2 __ldlu(const __half2 *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.lu` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half __ldlu(const __half *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.cv` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half2 __ldcv(const __half2 *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.cv` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half __ldcv(const __half *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `st.global.wb` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_FP16_DECL__ void __stwb(__half2 *const ptr, const __half2 value); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `st.global.wb` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_FP16_DECL__ void __stwb(__half *const ptr, const __half value); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `st.global.cg` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_FP16_DECL__ void __stcg(__half2 *const ptr, const __half2 value); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `st.global.cg` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_FP16_DECL__ void __stcg(__half *const ptr, const __half value); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `st.global.cs` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_FP16_DECL__ void __stcs(__half2 *const ptr, const __half2 value); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `st.global.cs` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_FP16_DECL__ void __stcs(__half *const ptr, const __half value); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `st.global.wt` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_FP16_DECL__ void __stwt(__half2 *const ptr, const __half2 value); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `st.global.wt` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_FP16_DECL__ void __stwt(__half *const ptr, const __half value); +#endif /*defined(__cplusplus) && ( !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 320) )*/ + +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs half2 vector if-equal comparison. +* +* \details Performs \p half2 vector if-equal comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The vector result of if-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __heq2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector not-equal comparison. +* +* \details Performs \p half2 vector not-equal comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The vector result of not-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hne2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector less-equal comparison. +* +* \details Performs \p half2 vector less-equal comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The \p half2 result of less-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hle2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector greater-equal comparison. +* +* \details Performs \p half2 vector greater-equal comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The vector result of greater-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hge2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector less-than comparison. +* +* \details Performs \p half2 vector less-than comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The half2 vector result of less-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hlt2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector greater-than comparison. +* +* \details Performs \p half2 vector greater-than comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The vector result of greater-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hgt2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered if-equal comparison. +* +* \details Performs \p half2 vector if-equal comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The vector result of unordered if-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hequ2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered not-equal comparison. +* +* \details Performs \p half2 vector not-equal comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The vector result of unordered not-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hneu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered less-equal comparison. +* +* Performs \p half2 vector less-equal comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The vector result of unordered less-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hleu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered greater-equal comparison. +* +* \details Performs \p half2 vector greater-equal comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The \p half2 vector result of unordered greater-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hgeu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered less-than comparison. +* +* \details Performs \p half2 vector less-than comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The vector result of unordered less-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hltu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered greater-than comparison. +* +* \details Performs \p half2 vector greater-than comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The \p half2 vector result of unordered greater-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hgtu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs half2 vector if-equal comparison. +* +* \details Performs \p half2 vector if-equal comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns unsigned int +* - The vector mask result of if-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned __heq2_mask(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector not-equal comparison. +* +* \details Performs \p half2 vector not-equal comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns unsigned int +* - The vector mask result of not-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned __hne2_mask(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector less-equal comparison. +* +* \details Performs \p half2 vector less-equal comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns unsigned int +* - The vector mask result of less-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned __hle2_mask(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector greater-equal comparison. +* +* \details Performs \p half2 vector greater-equal comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns unsigned int +* - The vector mask result of greater-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned __hge2_mask(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector less-than comparison. +* +* \details Performs \p half2 vector less-than comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns unsigned int +* - The vector mask result of less-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned __hlt2_mask(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector greater-than comparison. +* +* \details Performs \p half2 vector greater-than comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns unsigned int +* - The vector mask result of greater-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned __hgt2_mask(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered if-equal comparison. +* +* \details Performs \p half2 vector if-equal comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns unsigned int +* - The vector mask result of unordered if-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned __hequ2_mask(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered not-equal comparison. +* +* \details Performs \p half2 vector not-equal comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns unsigned int +* - The vector mask result of unordered not-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned __hneu2_mask(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered less-equal comparison. +* +* Performs \p half2 vector less-equal comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns unsigned int +* - The vector mask result of unordered less-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned __hleu2_mask(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered greater-equal comparison. +* +* \details Performs \p half2 vector greater-equal comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns unsigned int +* - The vector mask result of unordered greater-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned __hgeu2_mask(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered less-than comparison. +* +* \details Performs \p half2 vector less-than comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns unsigned int +* - The vector mask result of unordered less-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned __hltu2_mask(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered greater-than comparison. +* +* \details Performs \p half2 vector greater-than comparison of inputs \p a and \p b. +* The corresponding \p unsigned bits are set to 0xFFFF for true, or 0x0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns unsigned int +* - The vector mask result of unordered greater-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned __hgtu2_mask(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Determine whether \p half2 argument is a NaN. +* +* \details Determine whether each half of input \p half2 number \p a is a NaN. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The half2 with the corresponding \p half results set to +* 1.0 for NaN, 0.0 otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hisnan2(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector addition in round-to-nearest-even mode. +* +* \details Performs \p half2 vector add of inputs \p a and \p b, in round-to-nearest +* mode. +* \internal +* \req DEEPLEARN-SRM_REQ-95 +* \endinternal +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The sum of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hadd2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector subtraction in round-to-nearest-even mode. +* +* \details Subtracts \p half2 input vector \p b from input vector \p a in +* round-to-nearest-even mode. +* \internal +* \req DEEPLEARN-SRM_REQ-104 +* \endinternal +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The subtraction of vector \p b from \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hsub2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector multiplication in round-to-nearest-even mode. +* +* \details Performs \p half2 vector multiplication of inputs \p a and \p b, in +* round-to-nearest-even mode. +* \internal +* \req DEEPLEARN-SRM_REQ-102 +* \endinternal +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The result of elementwise multiplying the vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hmul2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector addition in round-to-nearest-even mode. +* +* \details Performs \p half2 vector add of inputs \p a and \p b, in round-to-nearest +* mode. Prevents floating-point contractions of mul+add into fma. +* \internal +* \req DEEPLEARN-SRM_REQ-95 +* \endinternal +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The sum of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hadd2_rn(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector subtraction in round-to-nearest-even mode. +* +* \details Subtracts \p half2 input vector \p b from input vector \p a in +* round-to-nearest-even mode. Prevents floating-point contractions of mul+sub +* into fma. +* \internal +* \req DEEPLEARN-SRM_REQ-104 +* \endinternal +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The subtraction of vector \p b from \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hsub2_rn(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector multiplication in round-to-nearest-even mode. +* +* \details Performs \p half2 vector multiplication of inputs \p a and \p b, in +* round-to-nearest-even mode. Prevents floating-point contractions of +* mul+add or sub into fma. +* \internal +* \req DEEPLEARN-SRM_REQ-102 +* \endinternal +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The result of elementwise multiplying the vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hmul2_rn(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector division in round-to-nearest-even mode. +* +* \details Divides \p half2 input vector \p a by input vector \p b in round-to-nearest +* mode. +* \internal +* \req DEEPLEARN-SRM_REQ-103 +* \endinternal +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The elementwise division of \p a with \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __h2div(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Calculates the absolute value of both halves of the input \p half2 number and +* returns the result. +* +* \details Calculates the absolute value of both halves of the input \p half2 number and +* returns the result. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - Returns \p a with the absolute value of both halves. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __habs2(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector addition in round-to-nearest-even mode, with +* saturation to [0.0, 1.0]. +* +* \details Performs \p half2 vector add of inputs \p a and \p b, in round-to-nearest +* mode, and clamps the results to range [0.0, 1.0]. NaN results are flushed to +* +0.0. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The sum of \p a and \p b, with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hadd2_sat(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector subtraction in round-to-nearest-even mode, +* with saturation to [0.0, 1.0]. +* +* \details Subtracts \p half2 input vector \p b from input vector \p a in +* round-to-nearest-even mode, and clamps the results to range [0.0, 1.0]. NaN +* results are flushed to +0.0. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The subtraction of vector \p b from \p a, with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hsub2_sat(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector multiplication in round-to-nearest-even mode, +* with saturation to [0.0, 1.0]. +* +* \details Performs \p half2 vector multiplication of inputs \p a and \p b, in +* round-to-nearest-even mode, and clamps the results to range [0.0, 1.0]. NaN +* results are flushed to +0.0. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The result of elementwise multiplication of vectors \p a and \p b, +* with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hmul2_sat(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector fused multiply-add in round-to-nearest-even +* mode. +* +* \details Performs \p half2 vector multiply on inputs \p a and \p b, +* then performs a \p half2 vector add of the result with \p c, +* rounding the result once in round-to-nearest-even mode. +* \internal +* \req DEEPLEARN-SRM_REQ-105 +* \endinternal +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* \param[in] c - half2. Is only being read. +* +* \returns half2 +* - The result of elementwise fused multiply-add operation on vectors \p a, \p b, and \p c. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hfma2(const __half2 a, const __half2 b, const __half2 c); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector fused multiply-add in round-to-nearest-even +* mode, with saturation to [0.0, 1.0]. +* +* \details Performs \p half2 vector multiply on inputs \p a and \p b, +* then performs a \p half2 vector add of the result with \p c, +* rounding the result once in round-to-nearest-even mode, and clamps the +* results to range [0.0, 1.0]. NaN results are flushed to +0.0. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* \param[in] c - half2. Is only being read. +* +* \returns half2 +* - The result of elementwise fused multiply-add operation on vectors \p a, \p b, and \p c, +* with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hfma2_sat(const __half2 a, const __half2 b, const __half2 c); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Negates both halves of the input \p half2 number and returns the +* result. +* +* \details Negates both halves of the input \p half2 number \p a and returns the result. +* \internal +* \req DEEPLEARN-SRM_REQ-101 +* \endinternal +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - Returns \p a with both halves negated. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hneg2(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Calculates the absolute value of input \p half number and returns the result. +* +* \details Calculates the absolute value of input \p half number and returns the result. +* \param[in] a - half. Is only being read. +* +* \returns half +* - The absolute value of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __habs(const __half a); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half addition in round-to-nearest-even mode. +* +* \details Performs \p half addition of inputs \p a and \p b, in round-to-nearest-even +* mode. +* \internal +* \req DEEPLEARN-SRM_REQ-94 +* \endinternal +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* - The sum of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hadd(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half subtraction in round-to-nearest-even mode. +* +* \details Subtracts \p half input \p b from input \p a in round-to-nearest +* mode. +* \internal +* \req DEEPLEARN-SRM_REQ-97 +* \endinternal +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* - The result of subtracting \p b from \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hsub(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half multiplication in round-to-nearest-even mode. +* +* \details Performs \p half multiplication of inputs \p a and \p b, in round-to-nearest +* mode. +* \internal +* \req DEEPLEARN-SRM_REQ-99 +* \endinternal +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* - The result of multiplying \p a and \p b. +*/ +__CUDA_FP16_DECL__ __half __hmul(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half addition in round-to-nearest-even mode. +* +* \details Performs \p half addition of inputs \p a and \p b, in round-to-nearest-even +* mode. Prevents floating-point contractions of mul+add into fma. +* \internal +* \req DEEPLEARN-SRM_REQ-94 +* \endinternal +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* - The sum of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hadd_rn(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half subtraction in round-to-nearest-even mode. +* +* \details Subtracts \p half input \p b from input \p a in round-to-nearest +* mode. Prevents floating-point contractions of mul+sub into fma. +* \internal +* \req DEEPLEARN-SRM_REQ-97 +* \endinternal +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* - The result of subtracting \p b from \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hsub_rn(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half multiplication in round-to-nearest-even mode. +* +* \details Performs \p half multiplication of inputs \p a and \p b, in round-to-nearest +* mode. Prevents floating-point contractions of mul+add or sub into fma. +* \internal +* \req DEEPLEARN-SRM_REQ-99 +* \endinternal +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* - The result of multiplying \p a and \p b. +*/ +__CUDA_FP16_DECL__ __half __hmul_rn(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half division in round-to-nearest-even mode. +* +* \details Divides \p half input \p a by input \p b in round-to-nearest +* mode. +* \internal +* \req DEEPLEARN-SRM_REQ-98 +* \endinternal +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* - The result of dividing \p a by \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hdiv(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half addition in round-to-nearest-even mode, with +* saturation to [0.0, 1.0]. +* +* \details Performs \p half add of inputs \p a and \p b, in round-to-nearest-even mode, +* and clamps the result to range [0.0, 1.0]. NaN results are flushed to +0.0. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* - The sum of \p a and \p b, with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hadd_sat(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half subtraction in round-to-nearest-even mode, with +* saturation to [0.0, 1.0]. +* +* \details Subtracts \p half input \p b from input \p a in round-to-nearest +* mode, +* and clamps the result to range [0.0, 1.0]. NaN results are flushed to +0.0. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* - The result of subtraction of \p b from \p a, with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hsub_sat(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half multiplication in round-to-nearest-even mode, with +* saturation to [0.0, 1.0]. +* +* \details Performs \p half multiplication of inputs \p a and \p b, in round-to-nearest +* mode, and clamps the result to range [0.0, 1.0]. NaN results are flushed to +* +0.0. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* - The result of multiplying \p a and \p b, with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hmul_sat(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half fused multiply-add in round-to-nearest-even mode. +* +* \details Performs \p half multiply on inputs \p a and \p b, +* then performs a \p half add of the result with \p c, +* rounding the result once in round-to-nearest-even mode. +* \internal +* \req DEEPLEARN-SRM_REQ-96 +* \endinternal +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* \param[in] c - half. Is only being read. +* +* \returns half +* - The result of fused multiply-add operation on \p +* a, \p b, and \p c. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hfma(const __half a, const __half b, const __half c); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half fused multiply-add in round-to-nearest-even mode, +* with saturation to [0.0, 1.0]. +* +* \details Performs \p half multiply on inputs \p a and \p b, +* then performs a \p half add of the result with \p c, +* rounding the result once in round-to-nearest-even mode, and clamps the result +* to range [0.0, 1.0]. NaN results are flushed to +0.0. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* \param[in] c - half. Is only being read. +* +* \returns half +* - The result of fused multiply-add operation on \p +* a, \p b, and \p c, with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hfma_sat(const __half a, const __half b, const __half c); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Negates input \p half number and returns the result. +* +* \details Negates input \p half number and returns the result. +* \internal +* \req DEEPLEARN-SRM_REQ-100 +* \endinternal +* \param[in] a - half. Is only being read. +* +* \returns half +* - minus a +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hneg(const __half a); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector if-equal comparison and returns boolean true +* iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector if-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half if-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* - true if both \p half results of if-equal comparison +* of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbeq2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector not-equal comparison and returns boolean +* true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector not-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half not-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* - true if both \p half results of not-equal comparison +* of vectors \p a and \p b are true, +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbne2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector less-equal comparison and returns boolean +* true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector less-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half less-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* - true if both \p half results of less-equal comparison +* of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hble2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector greater-equal comparison and returns boolean +* true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector greater-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half greater-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* - true if both \p half results of greater-equal +* comparison of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbge2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector less-than comparison and returns boolean +* true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector less-than comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half less-than comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* - true if both \p half results of less-than comparison +* of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hblt2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector greater-than comparison and returns boolean +* true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector greater-than comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half greater-than comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* - true if both \p half results of greater-than +* comparison of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbgt2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered if-equal comparison and returns +* boolean true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector if-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half if-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* - true if both \p half results of unordered if-equal +* comparison of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbequ2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered not-equal comparison and returns +* boolean true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector not-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half not-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* - true if both \p half results of unordered not-equal +* comparison of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbneu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered less-equal comparison and returns +* boolean true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector less-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half less-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* - true if both \p half results of unordered less-equal +* comparison of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbleu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered greater-equal comparison and +* returns boolean true iff both \p half results are true, boolean false +* otherwise. +* +* \details Performs \p half2 vector greater-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half greater-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* - true if both \p half results of unordered +* greater-equal comparison of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbgeu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered less-than comparison and returns +* boolean true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector less-than comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half less-than comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* - true if both \p half results of unordered less-than comparison of +* vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbltu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered greater-than comparison and +* returns boolean true iff both \p half results are true, boolean false +* otherwise. +* +* \details Performs \p half2 vector greater-than comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half greater-than comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* - true if both \p half results of unordered +* greater-than comparison of vectors \p a and \p b are true; +* - false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbgtu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half if-equal comparison. +* +* \details Performs \p half if-equal comparison of inputs \p a and \p b. +* NaN inputs generate false results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* - The boolean result of if-equal comparison of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __heq(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half not-equal comparison. +* +* \details Performs \p half not-equal comparison of inputs \p a and \p b. +* NaN inputs generate false results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* - The boolean result of not-equal comparison of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hne(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half less-equal comparison. +* +* \details Performs \p half less-equal comparison of inputs \p a and \p b. +* NaN inputs generate false results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* - The boolean result of less-equal comparison of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hle(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half greater-equal comparison. +* +* \details Performs \p half greater-equal comparison of inputs \p a and \p b. +* NaN inputs generate false results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* - The boolean result of greater-equal comparison of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hge(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half less-than comparison. +* +* \details Performs \p half less-than comparison of inputs \p a and \p b. +* NaN inputs generate false results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* - The boolean result of less-than comparison of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hlt(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half greater-than comparison. +* +* \details Performs \p half greater-than comparison of inputs \p a and \p b. +* NaN inputs generate false results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* - The boolean result of greater-than comparison of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hgt(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half unordered if-equal comparison. +* +* \details Performs \p half if-equal comparison of inputs \p a and \p b. +* NaN inputs generate true results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* - The boolean result of unordered if-equal comparison of \p a and +* \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hequ(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half unordered not-equal comparison. +* +* \details Performs \p half not-equal comparison of inputs \p a and \p b. +* NaN inputs generate true results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* - The boolean result of unordered not-equal comparison of \p a and +* \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hneu(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half unordered less-equal comparison. +* +* \details Performs \p half less-equal comparison of inputs \p a and \p b. +* NaN inputs generate true results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* - The boolean result of unordered less-equal comparison of \p a and +* \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hleu(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half unordered greater-equal comparison. +* +* \details Performs \p half greater-equal comparison of inputs \p a and \p b. +* NaN inputs generate true results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* - The boolean result of unordered greater-equal comparison of \p a +* and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hgeu(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half unordered less-than comparison. +* +* \details Performs \p half less-than comparison of inputs \p a and \p b. +* NaN inputs generate true results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* - The boolean result of unordered less-than comparison of \p a and +* \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hltu(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half unordered greater-than comparison. +* +* \details Performs \p half greater-than comparison of inputs \p a and \p b. +* NaN inputs generate true results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* - The boolean result of unordered greater-than comparison of \p a +* and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hgtu(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Determine whether \p half argument is a NaN. +* +* \details Determine whether \p half value \p a is a NaN. +* \param[in] a - half. Is only being read. +* +* \returns bool +* - true iff argument is NaN. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hisnan(const __half a); +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800) +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Calculates \p half maximum of two input values, NaNs pass through. +* +* \details Calculates \p half max(\p a, \p b) +* defined as (\p a > \p b) ? \p a : \p b. +* - If either of inputs is NaN, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hmax_nan(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Calculates \p half minimum of two input values, NaNs pass through. +* +* \details Calculates \p half min(\p a, \p b) +* defined as (\p a < \p b) ? \p a : \p b. +* - If either of inputs is NaN, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hmin_nan(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half fused multiply-add in round-to-nearest-even mode with relu saturation. +* +* \details Performs \p half multiply on inputs \p a and \p b, +* then performs a \p half add of the result with \p c, +* rounding the result once in round-to-nearest-even mode. +* Then negative result is clamped to 0. +* NaN result is converted to canonical NaN. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* \param[in] c - half. Is only being read. +* +* \returns half +* - The result of fused multiply-add operation on \p +* a, \p b, and \p c with relu saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hfma_relu(const __half a, const __half b, const __half c); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Calculates \p half2 vector maximum of two inputs, NaNs pass through. +* +* \details Calculates \p half2 vector max(\p a, \p b). +* Elementwise \p half operation is defined as +* (\p a > \p b) ? \p a : \p b. +* - If either of inputs is NaN, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The result of elementwise maximum of vectors \p a and \p b, with NaNs pass through +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hmax2_nan(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Calculates \p half2 vector minimum of two inputs, NaNs pass through. +* +* \details Calculates \p half2 vector min(\p a, \p b). +* Elementwise \p half operation is defined as +* (\p a < \p b) ? \p a : \p b. +* - If either of inputs is NaN, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* - The result of elementwise minimum of vectors \p a and \p b, with NaNs pass through +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hmin2_nan(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector fused multiply-add in round-to-nearest-even +* mode with relu saturation. +* +* \details Performs \p half2 vector multiply on inputs \p a and \p b, +* then performs a \p half2 vector add of the result with \p c, +* rounding the result once in round-to-nearest-even mode. +* Then negative result is clamped to 0. +* NaN result is converted to canonical NaN. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* \param[in] c - half2. Is only being read. +* +* \returns half2 +* - The result of elementwise fused multiply-add operation on vectors \p a, \p b, and \p c with relu saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hfma2_relu(const __half2 a, const __half2 b, const __half2 c); +#endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800) */ +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs fast complex multiply-accumulate +* +* \details Interprets vector \p half2 input pairs \p a, \p b, and \p c as +* complex numbers in \p half precision and performs +* complex multiply-accumulate operation: a*b + c +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* \param[in] c - half2. Is only being read. +* +* \returns half2 +* - The result of complex multiply-accumulate operation on complex numbers \p a, \p b, and \p c +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hcmadd(const __half2 a, const __half2 b, const __half2 c); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half square root in round-to-nearest-even mode. +* +* \details Calculates \p half square root of input \p a in round-to-nearest-even mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* - The square root of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hsqrt(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half reciprocal square root in round-to-nearest-even +* mode. +* +* \details Calculates \p half reciprocal square root of input \p a in round-to-nearest +* mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* - The reciprocal square root of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hrsqrt(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half reciprocal in round-to-nearest-even mode. +* +* \details Calculates \p half reciprocal of input \p a in round-to-nearest-even mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* - The reciprocal of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hrcp(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half natural logarithm in round-to-nearest-even mode. +* +* \details Calculates \p half natural logarithm of input \p a in round-to-nearest-even +* mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* - The natural logarithm of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hlog(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half binary logarithm in round-to-nearest-even mode. +* +* \details Calculates \p half binary logarithm of input \p a in round-to-nearest-even +* mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* - The binary logarithm of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hlog2(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half decimal logarithm in round-to-nearest-even mode. +* +* \details Calculates \p half decimal logarithm of input \p a in round-to-nearest-even +* mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* - The decimal logarithm of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hlog10(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half natural exponential function in round-to-nearest +* mode. +* +* \details Calculates \p half natural exponential function of input \p a in +* round-to-nearest-even mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* - The natural exponential function on \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hexp(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half binary exponential function in round-to-nearest +* mode. +* +* \details Calculates \p half binary exponential function of input \p a in +* round-to-nearest-even mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* - The binary exponential function on \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hexp2(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half decimal exponential function in round-to-nearest +* mode. +* +* \details Calculates \p half decimal exponential function of input \p a in +* round-to-nearest-even mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* - The decimal exponential function on \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hexp10(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half cosine in round-to-nearest-even mode. +* +* \details Calculates \p half cosine of input \p a in round-to-nearest-even mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* - The cosine of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hcos(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half sine in round-to-nearest-even mode. +* +* \details Calculates \p half sine of input \p a in round-to-nearest-even mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* - The sine of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hsin(const __half a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector square root in round-to-nearest-even mode. +* +* \details Calculates \p half2 square root of input vector \p a in round-to-nearest +* mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The elementwise square root on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2sqrt(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector reciprocal square root in round-to-nearest +* mode. +* +* \details Calculates \p half2 reciprocal square root of input vector \p a in +* round-to-nearest-even mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The elementwise reciprocal square root on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2rsqrt(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector reciprocal in round-to-nearest-even mode. +* +* \details Calculates \p half2 reciprocal of input vector \p a in round-to-nearest-even +* mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The elementwise reciprocal on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2rcp(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector natural logarithm in round-to-nearest-even +* mode. +* +* \details Calculates \p half2 natural logarithm of input vector \p a in +* round-to-nearest-even mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The elementwise natural logarithm on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2log(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector binary logarithm in round-to-nearest-even +* mode. +* +* \details Calculates \p half2 binary logarithm of input vector \p a in round-to-nearest +* mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The elementwise binary logarithm on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2log2(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector decimal logarithm in round-to-nearest-even +* mode. +* +* \details Calculates \p half2 decimal logarithm of input vector \p a in +* round-to-nearest-even mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The elementwise decimal logarithm on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2log10(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector exponential function in round-to-nearest +* mode. +* +* \details Calculates \p half2 exponential function of input vector \p a in +* round-to-nearest-even mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The elementwise exponential function on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2exp(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector binary exponential function in +* round-to-nearest-even mode. +* +* \details Calculates \p half2 binary exponential function of input vector \p a in +* round-to-nearest-even mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The elementwise binary exponential function on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2exp2(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector decimal exponential function in +* round-to-nearest-even mode. +* +* \details Calculates \p half2 decimal exponential function of input vector \p a in +* round-to-nearest-even mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The elementwise decimal exponential function on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2exp10(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector cosine in round-to-nearest-even mode. +* +* \details Calculates \p half2 cosine of input vector \p a in round-to-nearest-even +* mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The elementwise cosine on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2cos(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector sine in round-to-nearest-even mode. +* +* \details Calculates \p half2 sine of input vector \p a in round-to-nearest-even mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* - The elementwise sine on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2sin(const __half2 a); + +#endif /*if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)*/ + +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 600) + +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Vector add \p val to the value stored at \p address in global or shared memory, and writes this +* value back to \p address. The atomicity of the add operation is guaranteed separately for each of the +* two __half elements; the entire __half2 is not guaranteed to be atomic as a single 32-bit access. +* +* \details The location of \p address must be in global or shared memory. This operation has undefined +* behavior otherwise. This operation is only supported by devices of compute capability 6.x and higher. +* +* \param[in] address - half2*. An address in global or shared memory. +* \param[in] val - half2. The value to be added. +* +* \returns half2 +* - The old value read from \p address. +* +* \note_ref_guide_atomic +*/ +__CUDA_FP16_DECL__ __half2 atomicAdd(__half2 *const address, const __half2 val); + +#endif /*if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 600)*/ + +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) + +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Adds \p val to the value stored at \p address in global or shared memory, and writes this value +* back to \p address. This operation is performed in one atomic operation. +* +* \details The location of \p address must be in global or shared memory. This operation has undefined +* behavior otherwise. This operation is only supported by devices of compute capability 7.x and higher. +* +* \param[in] address - half*. An address in global or shared memory. +* \param[in] val - half. The value to be added. +* +* \returns half +* - The old value read from \p address. +* +* \note_ref_guide_atomic +*/ +__CUDA_FP16_DECL__ __half atomicAdd(__half *const address, const __half val); + +#endif /*if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700)*/ + +#endif /* defined(__CUDACC__) */ + +#undef __CUDA_FP16_DECL__ +#undef __CUDA_HOSTDEVICE_FP16_DECL__ + +#endif /* defined(__cplusplus) */ + +/* Note the .hpp file is included even for host-side compilation, to capture the "half" & "half2" definitions */ +#include "cuda_fp16.hpp" +#undef ___CUDA_FP16_STRINGIFY_INNERMOST +#undef __CUDA_FP16_STRINGIFY + +#endif /* end of include guard: __CUDA_FP16_H__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp16.hpp b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp16.hpp new file mode 100644 index 0000000000000000000000000000000000000000..22fb0c025e6086938f308a51090a239237a50c9c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp16.hpp @@ -0,0 +1,2738 @@ +/* +* Copyright 1993-2022 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO LICENSEE: +* +* This source code and/or documentation ("Licensed Deliverables") are +* subject to NVIDIA intellectual property rights under U.S. and +* international Copyright laws. +* +* These Licensed Deliverables contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and +* conditions of a form of NVIDIA software license agreement by and +* between NVIDIA and Licensee ("License Agreement") or electronically +* accepted by Licensee. Notwithstanding any terms or conditions to +* the contrary in the License Agreement, reproduction or disclosure +* of the Licensed Deliverables to any third party without the express +* written consent of NVIDIA is prohibited. +* +* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE +* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE +* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS +* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. +* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED +* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, +* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE +* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY +* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY +* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +* OF THESE LICENSED DELIVERABLES. +* +* U.S. Government End Users. These Licensed Deliverables are a +* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT +* 1995), consisting of "commercial computer software" and "commercial +* computer software documentation" as such terms are used in 48 +* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government +* only as a commercial end item. Consistent with 48 C.F.R.12.212 and +* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all +* U.S. Government End Users acquire the Licensed Deliverables with +* only those rights set forth herein. +* +* Any use of the Licensed Deliverables in individual and commercial +* software must include, in the user documentation and internal +* comments to the code, the above Disclaimer and U.S. Government End +* Users Notice. +*/ + +#if !defined(__CUDA_FP16_HPP__) +#define __CUDA_FP16_HPP__ + +#if !defined(__CUDA_FP16_H__) +#error "Do not include this file directly. Instead, include cuda_fp16.h." +#endif + +#if !defined(_MSC_VER) && __cplusplus >= 201103L +# define __CPP_VERSION_AT_LEAST_11_FP16 +#elif _MSC_FULL_VER >= 190024210 && _MSVC_LANG >= 201103L +# define __CPP_VERSION_AT_LEAST_11_FP16 +#endif + +// implicitly provided by NVRTC +#if !defined(__CUDACC_RTC__) +#include +#endif /* !defined(__CUDACC_RTC__) */ + + +#if !defined(IF_DEVICE_OR_CUDACC) +#if defined(__CUDACC__) + #define IF_DEVICE_OR_CUDACC(d, c, f) NV_IF_ELSE_TARGET(NV_IS_DEVICE, d, c) +#else + #define IF_DEVICE_OR_CUDACC(d, c, f) NV_IF_ELSE_TARGET(NV_IS_DEVICE, d, f) +#endif +#endif +/* C++11 header for std::move. + * In RTC mode, std::move is provided implicitly; don't include the header + */ +#if defined(__CPP_VERSION_AT_LEAST_11_FP16) && !defined(__CUDACC_RTC__) +#include +#endif /* __cplusplus >= 201103L && !defined(__CUDACC_RTC__) */ + +/* C++ header for std::memcpy (used for type punning in host-side implementations). + * When compiling as a CUDA source file memcpy is provided implicitly. + * !defined(__CUDACC__) implies !defined(__CUDACC_RTC__). + */ +#if defined(__cplusplus) && !defined(__CUDACC__) +#include +#endif /* defined(__cplusplus) && !defined(__CUDACC__) */ + + +/* Set up function decorations */ +#if defined(__CUDACC__) || defined(_NVHPC_CUDA) +#define __CUDA_FP16_DECL__ static __device__ __inline__ +#define __CUDA_HOSTDEVICE_FP16_DECL__ static __host__ __device__ __inline__ +#define __VECTOR_FUNCTIONS_DECL__ static __inline__ __host__ __device__ +#define __CUDA_HOSTDEVICE__ __host__ __device__ +#else /* !defined(__CUDACC__) */ +#if defined(__GNUC__) +#define __CUDA_HOSTDEVICE_FP16_DECL__ static __attribute__ ((unused)) +#else +#define __CUDA_HOSTDEVICE_FP16_DECL__ static +#endif /* defined(__GNUC__) */ +#define __CUDA_HOSTDEVICE__ +#endif /* defined(__CUDACC_) */ + +/* Set up structure-alignment attribute */ +#if defined(__CUDACC__) +#define __CUDA_ALIGN__(align) __align__(align) +#else +/* Define alignment macro based on compiler type (cannot assume C11 "_Alignas" is available) */ +#if __cplusplus >= 201103L +#define __CUDA_ALIGN__(n) alignas(n) /* C++11 kindly gives us a keyword for this */ +#else /* !defined(__CPP_VERSION_AT_LEAST_11_FP16)*/ +#if defined(__GNUC__) +#define __CUDA_ALIGN__(n) __attribute__ ((aligned(n))) +#elif defined(_MSC_VER) +#define __CUDA_ALIGN__(n) __declspec(align(n)) +#else +#define __CUDA_ALIGN__(n) +#endif /* defined(__GNUC__) */ +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP16) */ +#endif /* defined(__CUDACC__) */ + +/* Macros to allow half & half2 to be used by inline assembly */ +#define __HALF_TO_US(var) *(reinterpret_cast(&(var))) +#define __HALF_TO_CUS(var) *(reinterpret_cast(&(var))) +#define __HALF2_TO_UI(var) *(reinterpret_cast(&(var))) +#define __HALF2_TO_CUI(var) *(reinterpret_cast(&(var))) + +/* Macros for half & half2 binary arithmetic */ +#define __BINARY_OP_HALF_MACRO(name) /* do */ {\ + __half val; \ + asm( "{" __CUDA_FP16_STRINGIFY(name) ".f16 %0,%1,%2;\n}" \ + :"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)),"h"(__HALF_TO_CUS(b))); \ + return val; \ +} /* while(0) */ +#define __BINARY_OP_HALF2_MACRO(name) /* do */ {\ + __half2 val; \ + asm( "{" __CUDA_FP16_STRINGIFY(name) ".f16x2 %0,%1,%2;\n}" \ + :"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b))); \ + return val; \ +} /* while(0) */ +#define __TERNARY_OP_HALF_MACRO(name) /* do */ {\ + __half val; \ + asm( "{" __CUDA_FP16_STRINGIFY(name) ".f16 %0,%1,%2,%3;\n}" \ + :"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)),"h"(__HALF_TO_CUS(b)),"h"(__HALF_TO_CUS(c))); \ + return val; \ +} /* while(0) */ +#define __TERNARY_OP_HALF2_MACRO(name) /* do */ {\ + __half2 val; \ + asm( "{" __CUDA_FP16_STRINGIFY(name) ".f16x2 %0,%1,%2,%3;\n}" \ + :"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b)),"r"(__HALF2_TO_CUI(c))); \ + return val; \ +} /* while(0) */ + +/** +* Types which allow static initialization of "half" and "half2" until +* these become an actual builtin. Note this initialization is as a +* bitfield representation of "half", and not a conversion from short->half. +* Such a representation will be deprecated in a future version of CUDA. +* (Note these are visible to non-nvcc compilers, including C-only compilation) +*/ +typedef struct __CUDA_ALIGN__(2) { + unsigned short x; +} __half_raw; + +typedef struct __CUDA_ALIGN__(4) { + unsigned short x; + unsigned short y; +} __half2_raw; + +/* All other definitions in this file are only visible to C++ compilers */ +#if defined(__cplusplus) + +/* Hide GCC member initialization list warnings because of host/device in-function init requirement */ +#if defined(__GNUC__) +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wstrict-aliasing" +#pragma GCC diagnostic ignored "-Weffc++" +#endif /* __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) */ +#endif /* defined(__GNUC__) */ + +/* class' : multiple assignment operators specified + The class has multiple assignment operators of a single type. This warning is informational */ +#if defined(_MSC_VER) && _MSC_VER >= 1500 +#pragma warning( push ) +#pragma warning( disable:4522 ) +#endif /* defined(__GNUC__) */ + +struct __CUDA_ALIGN__(2) __half { +protected: + unsigned short __x; + +public: +#if defined(__CPP_VERSION_AT_LEAST_11_FP16) + __half() = default; +#else + __CUDA_HOSTDEVICE__ __half() { } +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP16) */ + + /* Convert to/from __half_raw */ + __CUDA_HOSTDEVICE__ __half(const __half_raw &hr) : __x(hr.x) { } + __CUDA_HOSTDEVICE__ __half &operator=(const __half_raw &hr) { __x = hr.x; return *this; } + __CUDA_HOSTDEVICE__ volatile __half &operator=(const __half_raw &hr) volatile { __x = hr.x; return *this; } + __CUDA_HOSTDEVICE__ volatile __half &operator=(const volatile __half_raw &hr) volatile { __x = hr.x; return *this; } + __CUDA_HOSTDEVICE__ operator __half_raw() const { __half_raw ret; ret.x = __x; return ret; } + __CUDA_HOSTDEVICE__ operator __half_raw() const volatile { __half_raw ret; ret.x = __x; return ret; } + +#if !defined(__CUDA_NO_HALF_CONVERSIONS__) + + /* Construct from float/double */ + __CUDA_HOSTDEVICE__ __half(const float f) { __x = __float2half(f).__x; } + __CUDA_HOSTDEVICE__ __half(const double f) { __x = __double2half(f).__x; } + + __CUDA_HOSTDEVICE__ operator float() const { return __half2float(*this); } + __CUDA_HOSTDEVICE__ __half &operator=(const float f) { __x = __float2half(f).__x; return *this; } + + /* We omit "cast to double" operator, so as to not be ambiguous about up-cast */ + __CUDA_HOSTDEVICE__ __half &operator=(const double f) { __x = __double2half(f).__x; return *this; } + +/* Member functions only available to nvcc compilation so far */ +#if defined(__CUDACC__) + /* Allow automatic construction from types supported natively in hardware */ + /* Note we do avoid constructor init-list because of special host/device compilation rules */ + __CUDA_HOSTDEVICE__ __half(const short val) { __x = __short2half_rn(val).__x; } + __CUDA_HOSTDEVICE__ __half(const unsigned short val) { __x = __ushort2half_rn(val).__x; } + __CUDA_HOSTDEVICE__ __half(const int val) { __x = __int2half_rn(val).__x; } + __CUDA_HOSTDEVICE__ __half(const unsigned int val) { __x = __uint2half_rn(val).__x; } + __CUDA_HOSTDEVICE__ __half(const long long val) { __x = __ll2half_rn(val).__x; } + __CUDA_HOSTDEVICE__ __half(const unsigned long long val) { __x = __ull2half_rn(val).__x; } + + /* Allow automatic casts to supported builtin types, matching all that are permitted with float */ + __CUDA_HOSTDEVICE__ operator short() const { return __half2short_rz(*this); } + __CUDA_HOSTDEVICE__ __half &operator=(const short val) { __x = __short2half_rn(val).__x; return *this; } + + __CUDA_HOSTDEVICE__ operator unsigned short() const { return __half2ushort_rz(*this); } + __CUDA_HOSTDEVICE__ __half &operator=(const unsigned short val) { __x = __ushort2half_rn(val).__x; return *this; } + + __CUDA_HOSTDEVICE__ operator int() const { return __half2int_rz(*this); } + __CUDA_HOSTDEVICE__ __half &operator=(const int val) { __x = __int2half_rn(val).__x; return *this; } + + __CUDA_HOSTDEVICE__ operator unsigned int() const { return __half2uint_rz(*this); } + __CUDA_HOSTDEVICE__ __half &operator=(const unsigned int val) { __x = __uint2half_rn(val).__x; return *this; } + + __CUDA_HOSTDEVICE__ operator long long() const { return __half2ll_rz(*this); } + __CUDA_HOSTDEVICE__ __half &operator=(const long long val) { __x = __ll2half_rn(val).__x; return *this; } + + __CUDA_HOSTDEVICE__ operator unsigned long long() const { return __half2ull_rz(*this); } + __CUDA_HOSTDEVICE__ __half &operator=(const unsigned long long val) { __x = __ull2half_rn(val).__x; return *this; } + + /* Boolean conversion - note both 0 and -0 must return false */ + __CUDA_HOSTDEVICE__ operator bool() const { return (__x & 0x7FFFU) != 0U; } +#endif /* defined(__CUDACC__) */ +#endif /* !defined(__CUDA_NO_HALF_CONVERSIONS__) */ +}; + +/* Global-space operator functions are only available to nvcc compilation */ +#if defined(__CUDACC__) + +/* Arithmetic FP16 operations only supported on arch >= 5.3 */ +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) || defined(_NVHPC_CUDA) +#if !defined(__CUDA_NO_HALF_OPERATORS__) +/* Some basic arithmetic operations expected of a builtin */ +__device__ __forceinline__ __half operator+(const __half &lh, const __half &rh) { return __hadd(lh, rh); } +__device__ __forceinline__ __half operator-(const __half &lh, const __half &rh) { return __hsub(lh, rh); } +__device__ __forceinline__ __half operator*(const __half &lh, const __half &rh) { return __hmul(lh, rh); } +__device__ __forceinline__ __half operator/(const __half &lh, const __half &rh) { return __hdiv(lh, rh); } + +__device__ __forceinline__ __half &operator+=(__half &lh, const __half &rh) { lh = __hadd(lh, rh); return lh; } +__device__ __forceinline__ __half &operator-=(__half &lh, const __half &rh) { lh = __hsub(lh, rh); return lh; } +__device__ __forceinline__ __half &operator*=(__half &lh, const __half &rh) { lh = __hmul(lh, rh); return lh; } +__device__ __forceinline__ __half &operator/=(__half &lh, const __half &rh) { lh = __hdiv(lh, rh); return lh; } + +/* Note for increment and decrement we use the raw value 0x3C00U equating to half(1.0F), to avoid the extra conversion */ +__device__ __forceinline__ __half &operator++(__half &h) { __half_raw one; one.x = 0x3C00U; h += one; return h; } +__device__ __forceinline__ __half &operator--(__half &h) { __half_raw one; one.x = 0x3C00U; h -= one; return h; } +__device__ __forceinline__ __half operator++(__half &h, const int ignored) +{ + // ignored on purpose. Parameter only needed to distinguish the function declaration from other types of operators. + static_cast(ignored); + + const __half ret = h; + __half_raw one; + one.x = 0x3C00U; + h += one; + return ret; +} +__device__ __forceinline__ __half operator--(__half &h, const int ignored) +{ + // ignored on purpose. Parameter only needed to distinguish the function declaration from other types of operators. + static_cast(ignored); + + const __half ret = h; + __half_raw one; + one.x = 0x3C00U; + h -= one; + return ret; +} + +/* Unary plus and inverse operators */ +__device__ __forceinline__ __half operator+(const __half &h) { return h; } +__device__ __forceinline__ __half operator-(const __half &h) { return __hneg(h); } + +/* Some basic comparison operations to make it look like a builtin */ +__device__ __forceinline__ bool operator==(const __half &lh, const __half &rh) { return __heq(lh, rh); } +__device__ __forceinline__ bool operator!=(const __half &lh, const __half &rh) { return __hneu(lh, rh); } +__device__ __forceinline__ bool operator> (const __half &lh, const __half &rh) { return __hgt(lh, rh); } +__device__ __forceinline__ bool operator< (const __half &lh, const __half &rh) { return __hlt(lh, rh); } +__device__ __forceinline__ bool operator>=(const __half &lh, const __half &rh) { return __hge(lh, rh); } +__device__ __forceinline__ bool operator<=(const __half &lh, const __half &rh) { return __hle(lh, rh); } +#endif /* !defined(__CUDA_NO_HALF_OPERATORS__) */ +#endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) || defined(_NVHPC_CUDA) */ +#endif /* defined(__CUDACC__) */ + +/* __half2 is visible to non-nvcc host compilers */ +struct __CUDA_ALIGN__(4) __half2 { + __half x; + __half y; + + // All construct/copy/assign/move +public: +#if defined(__CPP_VERSION_AT_LEAST_11_FP16) + __half2() = default; + __CUDA_HOSTDEVICE__ __half2(const __half2 &&src) { __HALF2_TO_UI(*this) = std::move(__HALF2_TO_CUI(src)); } + __CUDA_HOSTDEVICE__ __half2 &operator=(const __half2 &&src) { __HALF2_TO_UI(*this) = std::move(__HALF2_TO_CUI(src)); return *this; } +#else + __CUDA_HOSTDEVICE__ __half2() { } +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP16) */ + __CUDA_HOSTDEVICE__ __half2(const __half &a, const __half &b) : x(a), y(b) { } + __CUDA_HOSTDEVICE__ __half2(const __half2 &src) { __HALF2_TO_UI(*this) = __HALF2_TO_CUI(src); } + __CUDA_HOSTDEVICE__ __half2 &operator=(const __half2 &src) { __HALF2_TO_UI(*this) = __HALF2_TO_CUI(src); return *this; } + + /* Convert to/from __half2_raw */ + __CUDA_HOSTDEVICE__ __half2(const __half2_raw &h2r ) { __HALF2_TO_UI(*this) = __HALF2_TO_CUI(h2r); } + __CUDA_HOSTDEVICE__ __half2 &operator=(const __half2_raw &h2r) { __HALF2_TO_UI(*this) = __HALF2_TO_CUI(h2r); return *this; } + __CUDA_HOSTDEVICE__ operator __half2_raw() const { __half2_raw ret; ret.x = 0U; ret.y = 0U; __HALF2_TO_UI(ret) = __HALF2_TO_CUI(*this); return ret; } +}; + +/* Global-space operator functions are only available to nvcc compilation */ +#if defined(__CUDACC__) + +/* Arithmetic FP16x2 operations only supported on arch >= 5.3 */ +#if (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) || defined(_NVHPC_CUDA)) && !defined(__CUDA_NO_HALF2_OPERATORS__) + +__device__ __forceinline__ __half2 operator+(const __half2 &lh, const __half2 &rh) { return __hadd2(lh, rh); } +__device__ __forceinline__ __half2 operator-(const __half2 &lh, const __half2 &rh) { return __hsub2(lh, rh); } +__device__ __forceinline__ __half2 operator*(const __half2 &lh, const __half2 &rh) { return __hmul2(lh, rh); } +__device__ __forceinline__ __half2 operator/(const __half2 &lh, const __half2 &rh) { return __h2div(lh, rh); } + +__device__ __forceinline__ __half2& operator+=(__half2 &lh, const __half2 &rh) { lh = __hadd2(lh, rh); return lh; } +__device__ __forceinline__ __half2& operator-=(__half2 &lh, const __half2 &rh) { lh = __hsub2(lh, rh); return lh; } +__device__ __forceinline__ __half2& operator*=(__half2 &lh, const __half2 &rh) { lh = __hmul2(lh, rh); return lh; } +__device__ __forceinline__ __half2& operator/=(__half2 &lh, const __half2 &rh) { lh = __h2div(lh, rh); return lh; } + +__device__ __forceinline__ __half2 &operator++(__half2 &h) { __half2_raw one; one.x = 0x3C00U; one.y = 0x3C00U; h = __hadd2(h, one); return h; } +__device__ __forceinline__ __half2 &operator--(__half2 &h) { __half2_raw one; one.x = 0x3C00U; one.y = 0x3C00U; h = __hsub2(h, one); return h; } +__device__ __forceinline__ __half2 operator++(__half2 &h, const int ignored) +{ + // ignored on purpose. Parameter only needed to distinguish the function declaration from other types of operators. + static_cast(ignored); + + const __half2 ret = h; + __half2_raw one; + one.x = 0x3C00U; + one.y = 0x3C00U; + h = __hadd2(h, one); + return ret; +} +__device__ __forceinline__ __half2 operator--(__half2 &h, const int ignored) +{ + // ignored on purpose. Parameter only needed to distinguish the function declaration from other types of operators. + static_cast(ignored); + + const __half2 ret = h; + __half2_raw one; + one.x = 0x3C00U; + one.y = 0x3C00U; + h = __hsub2(h, one); + return ret; +} + +__device__ __forceinline__ __half2 operator+(const __half2 &h) { return h; } +__device__ __forceinline__ __half2 operator-(const __half2 &h) { return __hneg2(h); } + +__device__ __forceinline__ bool operator==(const __half2 &lh, const __half2 &rh) { return __hbeq2(lh, rh); } +__device__ __forceinline__ bool operator!=(const __half2 &lh, const __half2 &rh) { return __hbneu2(lh, rh); } +__device__ __forceinline__ bool operator>(const __half2 &lh, const __half2 &rh) { return __hbgt2(lh, rh); } +__device__ __forceinline__ bool operator<(const __half2 &lh, const __half2 &rh) { return __hblt2(lh, rh); } +__device__ __forceinline__ bool operator>=(const __half2 &lh, const __half2 &rh) { return __hbge2(lh, rh); } +__device__ __forceinline__ bool operator<=(const __half2 &lh, const __half2 &rh) { return __hble2(lh, rh); } + +#endif /* (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) || defined(_NVHPC_CUDA)) && !defined(__CUDA_NO_HALF2_OPERATORS__) */ +#endif /* defined(__CUDACC__) */ + +/* Restore warning for multiple assignment operators */ +#if defined(_MSC_VER) && _MSC_VER >= 1500 +#pragma warning( pop ) +#endif /* defined(_MSC_VER) && _MSC_VER >= 1500 */ + +/* Restore -Weffc++ warnings from here on */ +#if defined(__GNUC__) +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic pop +#endif /* __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) */ +#endif /* defined(__GNUC__) */ + +#undef __CUDA_HOSTDEVICE__ +#undef __CUDA_ALIGN__ + +#ifndef __CUDACC_RTC__ /* no host functions in NVRTC mode */ +static inline unsigned short __internal_float2half(const float f, unsigned int &sign, unsigned int &remainder) +{ + unsigned int x; + unsigned int u; + unsigned int result; +#if defined(__CUDACC__) + (void)memcpy(&x, &f, sizeof(f)); +#else + (void)std::memcpy(&x, &f, sizeof(f)); +#endif + u = (x & 0x7fffffffU); + sign = ((x >> 16U) & 0x8000U); + // NaN/+Inf/-Inf + if (u >= 0x7f800000U) { + remainder = 0U; + result = ((u == 0x7f800000U) ? (sign | 0x7c00U) : 0x7fffU); + } else if (u > 0x477fefffU) { // Overflows + remainder = 0x80000000U; + result = (sign | 0x7bffU); + } else if (u >= 0x38800000U) { // Normal numbers + remainder = u << 19U; + u -= 0x38000000U; + result = (sign | (u >> 13U)); + } else if (u < 0x33000001U) { // +0/-0 + remainder = u; + result = sign; + } else { // Denormal numbers + const unsigned int exponent = u >> 23U; + const unsigned int shift = 0x7eU - exponent; + unsigned int mantissa = (u & 0x7fffffU); + mantissa |= 0x800000U; + remainder = mantissa << (32U - shift); + result = (sign | (mantissa >> shift)); + result &= 0x0000FFFFU; + } + return static_cast(result); +} +#endif /* #if !defined(__CUDACC_RTC__) */ + +__CUDA_HOSTDEVICE_FP16_DECL__ __half __double2half(const double a) +{ +IF_DEVICE_OR_CUDACC( + __half val; + asm("{ cvt.rn.f16.f64 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "d"(a)); + return val; +, + __half result; + // Perform rounding to 11 bits of precision, convert value + // to float and call existing float to half conversion. + // By pre-rounding to 11 bits we avoid additional rounding + // in float to half conversion. + unsigned long long int absa; + unsigned long long int ua; + (void)memcpy(&ua, &a, sizeof(a)); + absa = (ua & 0x7fffffffffffffffULL); + if ((absa >= 0x40f0000000000000ULL) || (absa <= 0x3e60000000000000ULL)) + { + // |a| >= 2^16 or NaN or |a| <= 2^(-25) + // double-rounding is not a problem + result = __float2half(static_cast(a)); + } + else + { + // here 2^(-25) < |a| < 2^16 + // prepare shifter value such that a + shifter + // done in double precision performs round-to-nearest-even + // and (a + shifter) - shifter results in a rounded to + // 11 bits of precision. Shifter needs to have exponent of + // a plus 53 - 11 = 42 and a leading bit in mantissa to guard + // against negative values. + // So need to have |a| capped to avoid overflow in exponent. + // For inputs that are smaller than half precision minnorm + // we prepare fixed shifter exponent. + unsigned long long shifterBits; + if (absa >= 0x3f10000000000000ULL) + { // Here if |a| >= 2^(-14) + // add 42 to exponent bits + shifterBits = (ua & 0x7ff0000000000000ULL) + 0x02A0000000000000ULL; + } + else + { // 2^(-25) < |a| < 2^(-14), potentially results in denormal + // set exponent bits to 42 - 14 + bias + shifterBits = 0x41B0000000000000ULL; + } + // set leading mantissa bit to protect against negative inputs + shifterBits |= 0x0008000000000000ULL; + double shifter; + (void)memcpy(&shifter, &shifterBits, sizeof(shifterBits)); + double aShiftRound = a + shifter; + + // Prevent the compiler from optimizing away a + shifter - shifter + // by doing intermediate memcopy and harmless bitwize operation + unsigned long long int aShiftRoundBits; + (void)memcpy(&aShiftRoundBits, &aShiftRound, sizeof(aShiftRound)); + + // the value is positive, so this operation doesn't change anything + aShiftRoundBits &= 0x7fffffffffffffffULL; + + (void)memcpy(&aShiftRound, &aShiftRoundBits, sizeof(aShiftRound)); + + result = __float2half(static_cast(aShiftRound - shifter)); + } + + return result; +, + __half result; + /* + // Perform rounding to 11 bits of precision, convert value + // to float and call existing float to half conversion. + // By pre-rounding to 11 bits we avoid additional rounding + // in float to half conversion. + */ + unsigned long long int absa; + unsigned long long int ua; + (void)std::memcpy(&ua, &a, sizeof(a)); + absa = (ua & 0x7fffffffffffffffULL); + if ((absa >= 0x40f0000000000000ULL) || (absa <= 0x3e60000000000000ULL)) + { + /* + // |a| >= 2^16 or NaN or |a| <= 2^(-25) + // double-rounding is not a problem + */ + result = __float2half(static_cast(a)); + } + else + { + /* + // here 2^(-25) < |a| < 2^16 + // prepare shifter value such that a + shifter + // done in double precision performs round-to-nearest-even + // and (a + shifter) - shifter results in a rounded to + // 11 bits of precision. Shifter needs to have exponent of + // a plus 53 - 11 = 42 and a leading bit in mantissa to guard + // against negative values. + // So need to have |a| capped to avoid overflow in exponent. + // For inputs that are smaller than half precision minnorm + // we prepare fixed shifter exponent. + */ + unsigned long long shifterBits; + if (absa >= 0x3f10000000000000ULL) + { + /* + // Here if |a| >= 2^(-14) + // add 42 to exponent bits + */ + shifterBits = (ua & 0x7ff0000000000000ULL) + 0x02A0000000000000ULL; + } + else + { + /* + // 2^(-25) < |a| < 2^(-14), potentially results in denormal + // set exponent bits to 42 - 14 + bias + */ + shifterBits = 0x41B0000000000000ULL; + } + // set leading mantissa bit to protect against negative inputs + shifterBits |= 0x0008000000000000ULL; + double shifter; + (void)std::memcpy(&shifter, &shifterBits, sizeof(shifterBits)); + double aShiftRound = a + shifter; + + /* + // Prevent the compiler from optimizing away a + shifter - shifter + // by doing intermediate memcopy and harmless bitwize operation + */ + unsigned long long int aShiftRoundBits; + (void)std::memcpy(&aShiftRoundBits, &aShiftRound, sizeof(aShiftRound)); + + // the value is positive, so this operation doesn't change anything + aShiftRoundBits &= 0x7fffffffffffffffULL; + + (void)std::memcpy(&aShiftRound, &aShiftRoundBits, sizeof(aShiftRound)); + + result = __float2half(static_cast(aShiftRound - shifter)); + } + + return result; +) +} + +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half(const float a) +{ + __half val; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("{ cvt.rn.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a)); +, + __half_raw r; + unsigned int sign = 0U; + unsigned int remainder = 0U; + r.x = __internal_float2half(a, sign, remainder); + if ((remainder > 0x80000000U) || ((remainder == 0x80000000U) && ((r.x & 0x1U) != 0U))) { + r.x++; + } + val = r; +) + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rn(const float a) +{ + __half val; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("{ cvt.rn.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a)); +, + __half_raw r; + unsigned int sign = 0U; + unsigned int remainder = 0U; + r.x = __internal_float2half(a, sign, remainder); + if ((remainder > 0x80000000U) || ((remainder == 0x80000000U) && ((r.x & 0x1U) != 0U))) { + r.x++; + } + val = r; +) + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rz(const float a) +{ + __half val; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("{ cvt.rz.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a)); +, + __half_raw r; + unsigned int sign = 0U; + unsigned int remainder = 0U; + r.x = __internal_float2half(a, sign, remainder); + val = r; +) + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rd(const float a) +{ + __half val; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("{ cvt.rm.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a)); +, + __half_raw r; + unsigned int sign = 0U; + unsigned int remainder = 0U; + r.x = __internal_float2half(a, sign, remainder); + if ((remainder != 0U) && (sign != 0U)) { + r.x++; + } + val = r; +) + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_ru(const float a) +{ + __half val; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("{ cvt.rp.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a)); +, + __half_raw r; + unsigned int sign = 0U; + unsigned int remainder = 0U; + r.x = __internal_float2half(a, sign, remainder); + if ((remainder != 0U) && (sign == 0U)) { + r.x++; + } + val = r; +) + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __float2half2_rn(const float a) +{ + __half2 val; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("{.reg .f16 low;\n" + " cvt.rn.f16.f32 low, %1;\n" + " mov.b32 %0, {low,low};}\n" : "=r"(__HALF2_TO_UI(val)) : "f"(a)); +, + val = __half2(__float2half_rn(a), __float2half_rn(a)); +) + return val; +} + +#if defined(__CUDACC__) || defined(_NVHPC_CUDA) +__CUDA_FP16_DECL__ __half2 __internal_device_float2_to_half2_rn(const float a, const float b) { + __half2 val; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80, + asm("{ cvt.rn.f16x2.f32 %0, %2, %1; }\n" + : "=r"(__HALF2_TO_UI(val)) : "f"(a), "f"(b)); +, + asm("{.reg .f16 low,high;\n" + " cvt.rn.f16.f32 low, %1;\n" + " cvt.rn.f16.f32 high, %2;\n" + " mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "f"(a), "f"(b)); +) + return val; +} + +#endif /* defined(__CUDACC__) || defined(_NVHPC_CUDA) */ + +__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __floats2half2_rn(const float a, const float b) +{ + __half2 val; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + val = __internal_device_float2_to_half2_rn(a,b); +, + val = __half2(__float2half_rn(a), __float2half_rn(b)); +) + return val; +} + +#ifndef __CUDACC_RTC__ /* no host functions in NVRTC mode */ +static inline float __internal_half2float(const unsigned short h) +{ + unsigned int sign = ((static_cast(h) >> 15U) & 1U); + unsigned int exponent = ((static_cast(h) >> 10U) & 0x1fU); + unsigned int mantissa = ((static_cast(h) & 0x3ffU) << 13U); + float f; + if (exponent == 0x1fU) { /* NaN or Inf */ + /* discard sign of a NaN */ + sign = ((mantissa != 0U) ? (sign >> 1U) : sign); + mantissa = ((mantissa != 0U) ? 0x7fffffU : 0U); + exponent = 0xffU; + } else if (exponent == 0U) { /* Denorm or Zero */ + if (mantissa != 0U) { + unsigned int msb; + exponent = 0x71U; + do { + msb = (mantissa & 0x400000U); + mantissa <<= 1U; /* normalize */ + --exponent; + } while (msb == 0U); + mantissa &= 0x7fffffU; /* 1.mantissa is implicit */ + } + } else { + exponent += 0x70U; + } + const unsigned int u = ((sign << 31U) | (exponent << 23U) | mantissa); +#if defined(__CUDACC__) + (void)memcpy(&f, &u, sizeof(u)); +#else + (void)std::memcpy(&f, &u, sizeof(u)); +#endif + return f; +} +#endif /* !defined(__CUDACC_RTC__) */ + +__CUDA_HOSTDEVICE_FP16_DECL__ float __half2float(const __half a) +{ + float val; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("{ cvt.f32.f16 %0, %1;}\n" : "=f"(val) : "h"(__HALF_TO_CUS(a))); +, + val = __internal_half2float(static_cast<__half_raw>(a).x); +) + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ float __low2float(const __half2 a) +{ + float val; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high},%1;\n" + " cvt.f32.f16 %0, low;}\n" : "=f"(val) : "r"(__HALF2_TO_CUI(a))); +, + val = __internal_half2float(static_cast<__half2_raw>(a).x); +) + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ float __high2float(const __half2 a) +{ + float val; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high},%1;\n" + " cvt.f32.f16 %0, high;}\n" : "=f"(val) : "r"(__HALF2_TO_CUI(a))); +, + val = __internal_half2float(static_cast<__half2_raw>(a).y); +) + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ short int __half2short_rz(const __half h) +{ + short int i; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("cvt.rzi.s16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h))); +, + const float f = __half2float(h); + const short int max_val = (short int)0x7fffU; + const short int min_val = (short int)0x8000U; + const unsigned short bits = static_cast(static_cast<__half_raw>(h).x << 1U); + // saturation fixup + if (bits > (unsigned short)0xF800U) { + // NaN + i = 0; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + // normal value, conversion is well-defined + i = static_cast(f); + } +) + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ unsigned short int __half2ushort_rz(const __half h) +{ + unsigned short int i; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("cvt.rzi.u16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h))); +, + const float f = __half2float(h); + const unsigned short int max_val = 0xffffU; + const unsigned short int min_val = 0U; + const unsigned short bits = static_cast(static_cast<__half_raw>(h).x << 1U); + // saturation fixup + if (bits > (unsigned short)0xF800U) { + // NaN + i = 0U; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + // normal value, conversion is well-defined + i = static_cast(f); + } +) + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ int __half2int_rz(const __half h) +{ + int i; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("cvt.rzi.s32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h))); +, + const float f = __half2float(h); + const int max_val = (int)0x7fffffffU; + const int min_val = (int)0x80000000U; + const unsigned short bits = static_cast(static_cast<__half_raw>(h).x << 1U); + // saturation fixup + if (bits > (unsigned short)0xF800U) { + // NaN + i = 0; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + // normal value, conversion is well-defined + i = static_cast(f); + } +) + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ unsigned int __half2uint_rz(const __half h) +{ + unsigned int i; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("cvt.rzi.u32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h))); +, + const float f = __half2float(h); + const unsigned int max_val = 0xffffffffU; + const unsigned int min_val = 0U; + const unsigned short bits = static_cast(static_cast<__half_raw>(h).x << 1U); + // saturation fixup + if (bits > (unsigned short)0xF800U) { + // NaN + i = 0U; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + // normal value, conversion is well-defined + i = static_cast(f); + } +) + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ long long int __half2ll_rz(const __half h) +{ + long long int i; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("cvt.rzi.s64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h))); +, + const float f = __half2float(h); + const long long int max_val = (long long int)0x7fffffffffffffffULL; + const long long int min_val = (long long int)0x8000000000000000ULL; + const unsigned short bits = static_cast(static_cast<__half_raw>(h).x << 1U); + // saturation fixup + if (bits > (unsigned short)0xF800U) { + // NaN + i = min_val; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + // normal value, conversion is well-defined + i = static_cast(f); + } +) + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ unsigned long long int __half2ull_rz(const __half h) +{ + unsigned long long int i; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("cvt.rzi.u64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h))); +, + const float f = __half2float(h); + const unsigned long long int max_val = 0xffffffffffffffffULL; + const unsigned long long int min_val = 0ULL; + const unsigned short bits = static_cast(static_cast<__half_raw>(h).x << 1U); + // saturation fixup + if (bits > (unsigned short)0xF800U) { + // NaN + i = 0x8000000000000000ULL; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + // normal value, conversion is well-defined + i = static_cast(f); + } +) + return i; +} + +/* Intrinsic functions only available to nvcc compilers */ +#if defined(__CUDACC__) + +/* CUDA vector-types compatible vector creation function (note returns __half2, not half2) */ +__VECTOR_FUNCTIONS_DECL__ __half2 make_half2(const __half x, const __half y) +{ + __half2 t; t.x = x; t.y = y; return t; +} +#undef __VECTOR_FUNCTIONS_DECL__ + + +/* Definitions of intrinsics */ +__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __float22half2_rn(const float2 a) +{ + const __half2 val = __floats2half2_rn(a.x, a.y); + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ float2 __half22float2(const __half2 a) +{ + float hi_float; + float lo_float; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high},%1;\n" + " cvt.f32.f16 %0, low;}\n" : "=f"(lo_float) : "r"(__HALF2_TO_CUI(a))); + + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high},%1;\n" + " cvt.f32.f16 %0, high;}\n" : "=f"(hi_float) : "r"(__HALF2_TO_CUI(a))); +, + lo_float = __internal_half2float(((__half2_raw)a).x); + hi_float = __internal_half2float(((__half2_raw)a).y); +) + return make_float2(lo_float, hi_float); +} +__CUDA_FP16_DECL__ int __half2int_rn(const __half h) +{ + int i; + asm("cvt.rni.s32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ int __half2int_rd(const __half h) +{ + int i; + asm("cvt.rmi.s32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ int __half2int_ru(const __half h) +{ + int i; + asm("cvt.rpi.s32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __int2half_rn(const int i) +{ + __half h; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("cvt.rn.f16.s32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i)); +, + // double-rounding is not a problem here: if integer + // has more than 24 bits, it is already too large to + // be represented in half precision, and result will + // be infinity. + const float f = static_cast(i); + h = __float2half_rn(f); +) + return h; +} +__CUDA_FP16_DECL__ __half __int2half_rz(const int i) +{ + __half h; + asm("cvt.rz.f16.s32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __int2half_rd(const int i) +{ + __half h; + asm("cvt.rm.f16.s32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __int2half_ru(const int i) +{ + __half h; + asm("cvt.rp.f16.s32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i)); + return h; +} + +__CUDA_FP16_DECL__ short int __half2short_rn(const __half h) +{ + short int i; + asm("cvt.rni.s16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ short int __half2short_rd(const __half h) +{ + short int i; + asm("cvt.rmi.s16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ short int __half2short_ru(const __half h) +{ + short int i; + asm("cvt.rpi.s16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __short2half_rn(const short int i) +{ + __half h; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("cvt.rn.f16.s16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i)); +, + const float f = static_cast(i); + h = __float2half_rn(f); +) + return h; +} +__CUDA_FP16_DECL__ __half __short2half_rz(const short int i) +{ + __half h; + asm("cvt.rz.f16.s16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __short2half_rd(const short int i) +{ + __half h; + asm("cvt.rm.f16.s16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __short2half_ru(const short int i) +{ + __half h; + asm("cvt.rp.f16.s16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i)); + return h; +} + +__CUDA_FP16_DECL__ unsigned int __half2uint_rn(const __half h) +{ + unsigned int i; + asm("cvt.rni.u32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ unsigned int __half2uint_rd(const __half h) +{ + unsigned int i; + asm("cvt.rmi.u32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ unsigned int __half2uint_ru(const __half h) +{ + unsigned int i; + asm("cvt.rpi.u32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __uint2half_rn(const unsigned int i) +{ + __half h; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("cvt.rn.f16.u32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i)); +, + // double-rounding is not a problem here: if integer + // has more than 24 bits, it is already too large to + // be represented in half precision, and result will + // be infinity. + const float f = static_cast(i); + h = __float2half_rn(f); +) + return h; +} +__CUDA_FP16_DECL__ __half __uint2half_rz(const unsigned int i) +{ + __half h; + asm("cvt.rz.f16.u32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __uint2half_rd(const unsigned int i) +{ + __half h; + asm("cvt.rm.f16.u32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __uint2half_ru(const unsigned int i) +{ + __half h; + asm("cvt.rp.f16.u32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i)); + return h; +} + +__CUDA_FP16_DECL__ unsigned short int __half2ushort_rn(const __half h) +{ + unsigned short int i; + asm("cvt.rni.u16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ unsigned short int __half2ushort_rd(const __half h) +{ + unsigned short int i; + asm("cvt.rmi.u16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ unsigned short int __half2ushort_ru(const __half h) +{ + unsigned short int i; + asm("cvt.rpi.u16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __ushort2half_rn(const unsigned short int i) +{ + __half h; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("cvt.rn.f16.u16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i)); +, + const float f = static_cast(i); + h = __float2half_rn(f); +) + return h; +} +__CUDA_FP16_DECL__ __half __ushort2half_rz(const unsigned short int i) +{ + __half h; + asm("cvt.rz.f16.u16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __ushort2half_rd(const unsigned short int i) +{ + __half h; + asm("cvt.rm.f16.u16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __ushort2half_ru(const unsigned short int i) +{ + __half h; + asm("cvt.rp.f16.u16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i)); + return h; +} + +__CUDA_FP16_DECL__ unsigned long long int __half2ull_rn(const __half h) +{ + unsigned long long int i; + asm("cvt.rni.u64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ unsigned long long int __half2ull_rd(const __half h) +{ + unsigned long long int i; + asm("cvt.rmi.u64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ unsigned long long int __half2ull_ru(const __half h) +{ + unsigned long long int i; + asm("cvt.rpi.u64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __ull2half_rn(const unsigned long long int i) +{ + __half h; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("cvt.rn.f16.u64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i)); +, + // double-rounding is not a problem here: if integer + // has more than 24 bits, it is already too large to + // be represented in half precision, and result will + // be infinity. + const float f = static_cast(i); + h = __float2half_rn(f); +) + return h; +} +__CUDA_FP16_DECL__ __half __ull2half_rz(const unsigned long long int i) +{ + __half h; + asm("cvt.rz.f16.u64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __ull2half_rd(const unsigned long long int i) +{ + __half h; + asm("cvt.rm.f16.u64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __ull2half_ru(const unsigned long long int i) +{ + __half h; + asm("cvt.rp.f16.u64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i)); + return h; +} + +__CUDA_FP16_DECL__ long long int __half2ll_rn(const __half h) +{ + long long int i; + asm("cvt.rni.s64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ long long int __half2ll_rd(const __half h) +{ + long long int i; + asm("cvt.rmi.s64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ long long int __half2ll_ru(const __half h) +{ + long long int i; + asm("cvt.rpi.s64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __ll2half_rn(const long long int i) +{ + __half h; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm("cvt.rn.f16.s64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i)); +, + // double-rounding is not a problem here: if integer + // has more than 24 bits, it is already too large to + // be represented in half precision, and result will + // be infinity. + const float f = static_cast(i); + h = __float2half_rn(f); +) + return h; +} +__CUDA_FP16_DECL__ __half __ll2half_rz(const long long int i) +{ + __half h; + asm("cvt.rz.f16.s64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __ll2half_rd(const long long int i) +{ + __half h; + asm("cvt.rm.f16.s64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __ll2half_ru(const long long int i) +{ + __half h; + asm("cvt.rp.f16.s64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i)); + return h; +} + +__CUDA_FP16_DECL__ __half htrunc(const __half h) +{ + __half r; + asm("cvt.rzi.f16.f16 %0, %1;" : "=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(h))); + return r; +} +__CUDA_FP16_DECL__ __half hceil(const __half h) +{ + __half r; + asm("cvt.rpi.f16.f16 %0, %1;" : "=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(h))); + return r; +} +__CUDA_FP16_DECL__ __half hfloor(const __half h) +{ + __half r; + asm("cvt.rmi.f16.f16 %0, %1;" : "=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(h))); + return r; +} +__CUDA_FP16_DECL__ __half hrint(const __half h) +{ + __half r; + asm("cvt.rni.f16.f16 %0, %1;" : "=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(h))); + return r; +} + +__CUDA_FP16_DECL__ __half2 h2trunc(const __half2 h) +{ + __half2 val; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " cvt.rzi.f16.f16 low, low;\n" + " cvt.rzi.f16.f16 high, high;\n" + " mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(h))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2ceil(const __half2 h) +{ + __half2 val; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " cvt.rpi.f16.f16 low, low;\n" + " cvt.rpi.f16.f16 high, high;\n" + " mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(h))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2floor(const __half2 h) +{ + __half2 val; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " cvt.rmi.f16.f16 low, low;\n" + " cvt.rmi.f16.f16 high, high;\n" + " mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(h))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2rint(const __half2 h) +{ + __half2 val; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " cvt.rni.f16.f16 low, low;\n" + " cvt.rni.f16.f16 high, high;\n" + " mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(h))); + return val; +} +__CUDA_FP16_DECL__ __half2 __lows2half2(const __half2 a, const __half2 b) +{ + __half2 val; + asm("{.reg .f16 alow,ahigh,blow,bhigh;\n" + " mov.b32 {alow,ahigh}, %1;\n" + " mov.b32 {blow,bhigh}, %2;\n" + " mov.b32 %0, {alow,blow};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)), "r"(__HALF2_TO_CUI(b))); + return val; +} +__CUDA_FP16_DECL__ __half2 __highs2half2(const __half2 a, const __half2 b) +{ + __half2 val; + asm("{.reg .f16 alow,ahigh,blow,bhigh;\n" + " mov.b32 {alow,ahigh}, %1;\n" + " mov.b32 {blow,bhigh}, %2;\n" + " mov.b32 %0, {ahigh,bhigh};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)), "r"(__HALF2_TO_CUI(b))); + return val; +} +__CUDA_FP16_DECL__ __half __low2half(const __half2 a) +{ + __half ret; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " mov.b16 %0, low;}" : "=h"(__HALF_TO_US(ret)) : "r"(__HALF2_TO_CUI(a))); + return ret; +} +__CUDA_FP16_DECL__ int __hisinf(const __half a) +{ + int retval; + if (__HALF_TO_CUS(a) == 0xFC00U) { + retval = -1; + } else if (__HALF_TO_CUS(a) == 0x7C00U) { + retval = 1; + } else { + retval = 0; + } + return retval; +} +__CUDA_FP16_DECL__ __half2 __low2half2(const __half2 a) +{ + __half2 val; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " mov.b32 %0, {low,low};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +__CUDA_FP16_DECL__ __half2 __high2half2(const __half2 a) +{ + __half2 val; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " mov.b32 %0, {high,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +__CUDA_FP16_DECL__ __half __high2half(const __half2 a) +{ + __half ret; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " mov.b16 %0, high;}" : "=h"(__HALF_TO_US(ret)) : "r"(__HALF2_TO_CUI(a))); + return ret; +} +__CUDA_FP16_DECL__ __half2 __halves2half2(const __half a, const __half b) +{ + __half2 val; + asm("{ mov.b32 %0, {%1,%2};}\n" + : "=r"(__HALF2_TO_UI(val)) : "h"(__HALF_TO_CUS(a)), "h"(__HALF_TO_CUS(b))); + return val; +} +__CUDA_FP16_DECL__ __half2 __half2half2(const __half a) +{ + __half2 val; + asm("{ mov.b32 %0, {%1,%1};}\n" + : "=r"(__HALF2_TO_UI(val)) : "h"(__HALF_TO_CUS(a))); + return val; +} +__CUDA_FP16_DECL__ __half2 __lowhigh2highlow(const __half2 a) +{ + __half2 val; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " mov.b32 %0, {high,low};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +__CUDA_FP16_DECL__ short int __half_as_short(const __half h) +{ + return static_cast(__HALF_TO_CUS(h)); +} +__CUDA_FP16_DECL__ unsigned short int __half_as_ushort(const __half h) +{ + return __HALF_TO_CUS(h); +} +__CUDA_FP16_DECL__ __half __short_as_half(const short int i) +{ + __half h; + __HALF_TO_US(h) = static_cast(i); + return h; +} +__CUDA_FP16_DECL__ __half __ushort_as_half(const unsigned short int i) +{ + __half h; + __HALF_TO_US(h) = i; + return h; +} + +/****************************************************************************** +* __half arithmetic * +******************************************************************************/ +__CUDA_FP16_DECL__ __half __hmax(const __half a, const __half b) +{ +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800) + __BINARY_OP_HALF_MACRO(max) +#else + const float fa = __half2float(a); + const float fb = __half2float(b); + float fr; + asm("{max.f32 %0,%1,%2;\n}" + :"=f"(fr) : "f"(fa), "f"(fb)); + const __half hr = __float2half(fr); + return hr; +#endif +} +__CUDA_FP16_DECL__ __half __hmin(const __half a, const __half b) +{ +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800) + __BINARY_OP_HALF_MACRO(min) +#else + const float fa = __half2float(a); + const float fb = __half2float(b); + float fr; + asm("{min.f32 %0,%1,%2;\n}" + :"=f"(fr) : "f"(fa), "f"(fb)); + const __half hr = __float2half(fr); + return hr; +#endif +} + +/****************************************************************************** +* __half2 arithmetic * +******************************************************************************/ +__CUDA_FP16_DECL__ __half2 __hmax2(const __half2 a, const __half2 b) +{ +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800) + __BINARY_OP_HALF2_MACRO(max) +#else + const float2 fa = __half22float2(a); + const float2 fb = __half22float2(b); + float2 fr; + asm("{max.f32 %0,%1,%2;\n}" + :"=f"(fr.x) : "f"(fa.x), "f"(fb.x)); + asm("{max.f32 %0,%1,%2;\n}" + :"=f"(fr.y) : "f"(fa.y), "f"(fb.y)); + const __half2 hr = __float22half2_rn(fr); + return hr; +#endif +} +__CUDA_FP16_DECL__ __half2 __hmin2(const __half2 a, const __half2 b) +{ +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800) + __BINARY_OP_HALF2_MACRO(min) +#else + const float2 fa = __half22float2(a); + const float2 fb = __half22float2(b); + float2 fr; + asm("{min.f32 %0,%1,%2;\n}" + :"=f"(fr.x) : "f"(fa.x), "f"(fb.x)); + asm("{min.f32 %0,%1,%2;\n}" + :"=f"(fr.y) : "f"(fa.y), "f"(fb.y)); + const __half2 hr = __float22half2_rn(fr); + return hr; +#endif +} + + +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 300) || defined(_NVHPC_CUDA) +/****************************************************************************** +* __half, __half2 warp shuffle * +******************************************************************************/ +#define __SHUFFLE_HALF2_MACRO(name) /* do */ {\ + __half2 r; \ + asm volatile ("{" __CUDA_FP16_STRINGIFY(name) " %0,%1,%2,%3;\n}" \ + :"=r"(__HALF2_TO_UI(r)): "r"(__HALF2_TO_CUI(var)), "r"(delta), "r"(c)); \ + return r; \ +} /* while(0) */ + +#define __SHUFFLE_SYNC_HALF2_MACRO(name) /* do */ {\ + __half2 r; \ + asm volatile ("{" __CUDA_FP16_STRINGIFY(name) " %0,%1,%2,%3,%4;\n}" \ + :"=r"(__HALF2_TO_UI(r)): "r"(__HALF2_TO_CUI(var)), "r"(delta), "r"(c), "r"(mask)); \ + return r; \ +} /* while(0) */ + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ < 700) + +__CUDA_FP16_DECL__ __half2 __shfl(const __half2 var, const int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = ((warp_size - static_cast(width)) << 8U) | 0x1fU; + __SHUFFLE_HALF2_MACRO(shfl.idx.b32) +} +__CUDA_FP16_DECL__ __half2 __shfl_up(const __half2 var, const unsigned int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = (warp_size - static_cast(width)) << 8U; + __SHUFFLE_HALF2_MACRO(shfl.up.b32) +} +__CUDA_FP16_DECL__ __half2 __shfl_down(const __half2 var, const unsigned int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = ((warp_size - static_cast(width)) << 8U) | 0x1fU; + __SHUFFLE_HALF2_MACRO(shfl.down.b32) +} +__CUDA_FP16_DECL__ __half2 __shfl_xor(const __half2 var, const int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = ((warp_size - static_cast(width)) << 8U) | 0x1fU; + __SHUFFLE_HALF2_MACRO(shfl.bfly.b32) +} + +#endif /* defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ < 700) */ + +__CUDA_FP16_DECL__ __half2 __shfl_sync(const unsigned mask, const __half2 var, const int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = ((warp_size - static_cast(width)) << 8U) | 0x1fU; + __SHUFFLE_SYNC_HALF2_MACRO(shfl.sync.idx.b32) +} +__CUDA_FP16_DECL__ __half2 __shfl_up_sync(const unsigned mask, const __half2 var, const unsigned int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = (warp_size - static_cast(width)) << 8U; + __SHUFFLE_SYNC_HALF2_MACRO(shfl.sync.up.b32) +} +__CUDA_FP16_DECL__ __half2 __shfl_down_sync(const unsigned mask, const __half2 var, const unsigned int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = ((warp_size - static_cast(width)) << 8U) | 0x1fU; + __SHUFFLE_SYNC_HALF2_MACRO(shfl.sync.down.b32) +} +__CUDA_FP16_DECL__ __half2 __shfl_xor_sync(const unsigned mask, const __half2 var, const int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = ((warp_size - static_cast(width)) << 8U) | 0x1fU; + __SHUFFLE_SYNC_HALF2_MACRO(shfl.sync.bfly.b32) +} + +#undef __SHUFFLE_HALF2_MACRO +#undef __SHUFFLE_SYNC_HALF2_MACRO + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ < 700) + +__CUDA_FP16_DECL__ __half __shfl(const __half var, const int delta, const int width) +{ + const __half2 temp1 = __halves2half2(var, var); + const __half2 temp2 = __shfl(temp1, delta, width); + return __low2half(temp2); +} +__CUDA_FP16_DECL__ __half __shfl_up(const __half var, const unsigned int delta, const int width) +{ + const __half2 temp1 = __halves2half2(var, var); + const __half2 temp2 = __shfl_up(temp1, delta, width); + return __low2half(temp2); +} +__CUDA_FP16_DECL__ __half __shfl_down(const __half var, const unsigned int delta, const int width) +{ + const __half2 temp1 = __halves2half2(var, var); + const __half2 temp2 = __shfl_down(temp1, delta, width); + return __low2half(temp2); +} +__CUDA_FP16_DECL__ __half __shfl_xor(const __half var, const int delta, const int width) +{ + const __half2 temp1 = __halves2half2(var, var); + const __half2 temp2 = __shfl_xor(temp1, delta, width); + return __low2half(temp2); +} + +#endif /* defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ < 700) */ + +__CUDA_FP16_DECL__ __half __shfl_sync(const unsigned mask, const __half var, const int delta, const int width) +{ + const __half2 temp1 = __halves2half2(var, var); + const __half2 temp2 = __shfl_sync(mask, temp1, delta, width); + return __low2half(temp2); +} +__CUDA_FP16_DECL__ __half __shfl_up_sync(const unsigned mask, const __half var, const unsigned int delta, const int width) +{ + const __half2 temp1 = __halves2half2(var, var); + const __half2 temp2 = __shfl_up_sync(mask, temp1, delta, width); + return __low2half(temp2); +} +__CUDA_FP16_DECL__ __half __shfl_down_sync(const unsigned mask, const __half var, const unsigned int delta, const int width) +{ + const __half2 temp1 = __halves2half2(var, var); + const __half2 temp2 = __shfl_down_sync(mask, temp1, delta, width); + return __low2half(temp2); +} +__CUDA_FP16_DECL__ __half __shfl_xor_sync(const unsigned mask, const __half var, const int delta, const int width) +{ + const __half2 temp1 = __halves2half2(var, var); + const __half2 temp2 = __shfl_xor_sync(mask, temp1, delta, width); + return __low2half(temp2); +} + +#endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 300) || defined(_NVHPC_CUDA) */ +/****************************************************************************** +* __half and __half2 __ldg,__ldcg,__ldca,__ldcs * +******************************************************************************/ + +#if defined(__cplusplus) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 320) || defined(_NVHPC_CUDA)) +#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__) +#define __LDG_PTR "l" +#else +#define __LDG_PTR "r" +#endif /*(defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)*/ +__CUDA_FP16_DECL__ __half2 __ldg(const __half2 *const ptr) +{ + __half2 ret; + asm ("ld.global.nc.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_FP16_DECL__ __half __ldg(const __half *const ptr) +{ + __half ret; + asm ("ld.global.nc.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_FP16_DECL__ __half2 __ldcg(const __half2 *const ptr) +{ + __half2 ret; + asm ("ld.global.cg.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_FP16_DECL__ __half __ldcg(const __half *const ptr) +{ + __half ret; + asm ("ld.global.cg.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_FP16_DECL__ __half2 __ldca(const __half2 *const ptr) +{ + __half2 ret; + asm ("ld.global.ca.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_FP16_DECL__ __half __ldca(const __half *const ptr) +{ + __half ret; + asm ("ld.global.ca.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_FP16_DECL__ __half2 __ldcs(const __half2 *const ptr) +{ + __half2 ret; + asm ("ld.global.cs.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_FP16_DECL__ __half __ldcs(const __half *const ptr) +{ + __half ret; + asm ("ld.global.cs.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_FP16_DECL__ __half2 __ldlu(const __half2 *const ptr) +{ + __half2 ret; + asm ("ld.global.lu.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr) : "memory"); + return ret; +} +__CUDA_FP16_DECL__ __half __ldlu(const __half *const ptr) +{ + __half ret; + asm ("ld.global.lu.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr) : "memory"); + return ret; +} +__CUDA_FP16_DECL__ __half2 __ldcv(const __half2 *const ptr) +{ + __half2 ret; + asm ("ld.global.cv.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr) : "memory"); + return ret; +} +__CUDA_FP16_DECL__ __half __ldcv(const __half *const ptr) +{ + __half ret; + asm ("ld.global.cv.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr) : "memory"); + return ret; +} +__CUDA_FP16_DECL__ void __stwb(__half2 *const ptr, const __half2 value) +{ + asm ("st.global.wb.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__HALF2_TO_CUI(value)) : "memory"); +} +__CUDA_FP16_DECL__ void __stwb(__half *const ptr, const __half value) +{ + asm ("st.global.wb.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__HALF_TO_CUS(value)) : "memory"); +} +__CUDA_FP16_DECL__ void __stcg(__half2 *const ptr, const __half2 value) +{ + asm ("st.global.cg.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__HALF2_TO_CUI(value)) : "memory"); +} +__CUDA_FP16_DECL__ void __stcg(__half *const ptr, const __half value) +{ + asm ("st.global.cg.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__HALF_TO_CUS(value)) : "memory"); +} +__CUDA_FP16_DECL__ void __stcs(__half2 *const ptr, const __half2 value) +{ + asm ("st.global.cs.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__HALF2_TO_CUI(value)) : "memory"); +} +__CUDA_FP16_DECL__ void __stcs(__half *const ptr, const __half value) +{ + asm ("st.global.cs.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__HALF_TO_CUS(value)) : "memory"); +} +__CUDA_FP16_DECL__ void __stwt(__half2 *const ptr, const __half2 value) +{ + asm ("st.global.wt.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__HALF2_TO_CUI(value)) : "memory"); +} +__CUDA_FP16_DECL__ void __stwt(__half *const ptr, const __half value) +{ + asm ("st.global.wt.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__HALF_TO_CUS(value)) : "memory"); +} +#undef __LDG_PTR +#endif /* defined(__cplusplus) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 320) || defined(_NVHPC_CUDA)) */ +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) || defined(_NVHPC_CUDA) +/****************************************************************************** +* __half2 comparison * +******************************************************************************/ +#define __COMPARISON_OP_HALF2_MACRO(name) /* do */ {\ + __half2 val; \ + asm( "{ " __CUDA_FP16_STRINGIFY(name) ".f16x2.f16x2 %0,%1,%2;\n}" \ + :"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b))); \ + return val; \ +} /* while(0) */ +__CUDA_FP16_DECL__ __half2 __heq2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.eq) +} +__CUDA_FP16_DECL__ __half2 __hne2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.ne) +} +__CUDA_FP16_DECL__ __half2 __hle2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.le) +} +__CUDA_FP16_DECL__ __half2 __hge2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.ge) +} +__CUDA_FP16_DECL__ __half2 __hlt2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.lt) +} +__CUDA_FP16_DECL__ __half2 __hgt2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.gt) +} +__CUDA_FP16_DECL__ __half2 __hequ2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.equ) +} +__CUDA_FP16_DECL__ __half2 __hneu2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.neu) +} +__CUDA_FP16_DECL__ __half2 __hleu2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.leu) +} +__CUDA_FP16_DECL__ __half2 __hgeu2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.geu) +} +__CUDA_FP16_DECL__ __half2 __hltu2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.ltu) +} +__CUDA_FP16_DECL__ __half2 __hgtu2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.gtu) +} +#undef __COMPARISON_OP_HALF2_MACRO +/****************************************************************************** +* __half2 comparison with mask output * +******************************************************************************/ +#define __COMPARISON_OP_HALF2_MACRO_MASK(name) /* do */ {\ + unsigned val; \ + asm( "{ " __CUDA_FP16_STRINGIFY(name) ".u32.f16x2 %0,%1,%2;\n}" \ + :"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b))); \ + return val; \ +} /* while(0) */ +__CUDA_FP16_DECL__ unsigned __heq2_mask(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO_MASK(set.eq) +} +__CUDA_FP16_DECL__ unsigned __hne2_mask(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO_MASK(set.ne) +} +__CUDA_FP16_DECL__ unsigned __hle2_mask(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO_MASK(set.le) +} +__CUDA_FP16_DECL__ unsigned __hge2_mask(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO_MASK(set.ge) +} +__CUDA_FP16_DECL__ unsigned __hlt2_mask(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO_MASK(set.lt) +} +__CUDA_FP16_DECL__ unsigned __hgt2_mask(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO_MASK(set.gt) +} +__CUDA_FP16_DECL__ unsigned __hequ2_mask(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO_MASK(set.equ) +} +__CUDA_FP16_DECL__ unsigned __hneu2_mask(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO_MASK(set.neu) +} +__CUDA_FP16_DECL__ unsigned __hleu2_mask(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO_MASK(set.leu) +} +__CUDA_FP16_DECL__ unsigned __hgeu2_mask(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO_MASK(set.geu) +} +__CUDA_FP16_DECL__ unsigned __hltu2_mask(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO_MASK(set.ltu) +} +__CUDA_FP16_DECL__ unsigned __hgtu2_mask(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO_MASK(set.gtu) +} +#undef __COMPARISON_OP_HALF2_MACRO_MASK +#define __BOOL_COMPARISON_OP_HALF2_MACRO(name) /* do */ {\ + __half2 val; \ + bool retval; \ + asm( "{ " __CUDA_FP16_STRINGIFY(name) ".f16x2.f16x2 %0,%1,%2;\n}" \ + :"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b))); \ + if (__HALF2_TO_CUI(val) == 0x3C003C00U) {\ + retval = true; \ + } else { \ + retval = false; \ + }\ + return retval;\ +} /* while(0) */ +__CUDA_FP16_DECL__ bool __hbeq2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.eq) +} +__CUDA_FP16_DECL__ bool __hbne2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.ne) +} +__CUDA_FP16_DECL__ bool __hble2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.le) +} +__CUDA_FP16_DECL__ bool __hbge2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.ge) +} +__CUDA_FP16_DECL__ bool __hblt2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.lt) +} +__CUDA_FP16_DECL__ bool __hbgt2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.gt) +} +__CUDA_FP16_DECL__ bool __hbequ2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.equ) +} +__CUDA_FP16_DECL__ bool __hbneu2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.neu) +} +__CUDA_FP16_DECL__ bool __hbleu2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.leu) +} +__CUDA_FP16_DECL__ bool __hbgeu2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.geu) +} +__CUDA_FP16_DECL__ bool __hbltu2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.ltu) +} +__CUDA_FP16_DECL__ bool __hbgtu2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.gtu) +} +#undef __BOOL_COMPARISON_OP_HALF2_MACRO +/****************************************************************************** +* __half comparison * +******************************************************************************/ +#define __COMPARISON_OP_HALF_MACRO(name) /* do */ {\ + unsigned short val; \ + asm( "{ .reg .pred __$temp3;\n" \ + " setp." __CUDA_FP16_STRINGIFY(name) ".f16 __$temp3, %1, %2;\n" \ + " selp.u16 %0, 1, 0, __$temp3;}" \ + : "=h"(val) : "h"(__HALF_TO_CUS(a)), "h"(__HALF_TO_CUS(b))); \ + return (val != 0U) ? true : false; \ +} /* while(0) */ +__CUDA_FP16_DECL__ bool __heq(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(eq) +} +__CUDA_FP16_DECL__ bool __hne(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(ne) +} +__CUDA_FP16_DECL__ bool __hle(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(le) +} +__CUDA_FP16_DECL__ bool __hge(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(ge) +} +__CUDA_FP16_DECL__ bool __hlt(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(lt) +} +__CUDA_FP16_DECL__ bool __hgt(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(gt) +} +__CUDA_FP16_DECL__ bool __hequ(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(equ) +} +__CUDA_FP16_DECL__ bool __hneu(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(neu) +} +__CUDA_FP16_DECL__ bool __hleu(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(leu) +} +__CUDA_FP16_DECL__ bool __hgeu(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(geu) +} +__CUDA_FP16_DECL__ bool __hltu(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(ltu) +} +__CUDA_FP16_DECL__ bool __hgtu(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(gtu) +} +#undef __COMPARISON_OP_HALF_MACRO +/****************************************************************************** +* __half2 arithmetic * +******************************************************************************/ +__CUDA_FP16_DECL__ __half2 __hadd2(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(add) +} +__CUDA_FP16_DECL__ __half2 __hsub2(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(sub) +} +__CUDA_FP16_DECL__ __half2 __hmul2(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(mul) +} +__CUDA_FP16_DECL__ __half2 __hadd2_sat(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(add.sat) +} +__CUDA_FP16_DECL__ __half2 __hsub2_sat(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(sub.sat) +} +__CUDA_FP16_DECL__ __half2 __hmul2_sat(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(mul.sat) +} +__CUDA_FP16_DECL__ __half2 __hadd2_rn(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(add.rn) +} +__CUDA_FP16_DECL__ __half2 __hsub2_rn(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(sub.rn) +} +__CUDA_FP16_DECL__ __half2 __hmul2_rn(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(mul.rn) +} +__CUDA_FP16_DECL__ __half2 __hfma2(const __half2 a, const __half2 b, const __half2 c) +{ + __TERNARY_OP_HALF2_MACRO(fma.rn) +} +__CUDA_FP16_DECL__ __half2 __hfma2_sat(const __half2 a, const __half2 b, const __half2 c) +{ + __TERNARY_OP_HALF2_MACRO(fma.rn.sat) +} +__CUDA_FP16_DECL__ __half2 __h2div(const __half2 a, const __half2 b) { + __half ha = __low2half(a); + __half hb = __low2half(b); + + const __half v1 = __hdiv(ha, hb); + + ha = __high2half(a); + hb = __high2half(b); + + const __half v2 = __hdiv(ha, hb); + + return __halves2half2(v1, v2); +} +/****************************************************************************** +* __half arithmetic * +******************************************************************************/ +__CUDA_FP16_DECL__ __half __hadd(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(add) +} +__CUDA_FP16_DECL__ __half __hsub(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(sub) +} +__CUDA_FP16_DECL__ __half __hmul(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(mul) +} +__CUDA_FP16_DECL__ __half __hadd_sat(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(add.sat) +} +__CUDA_FP16_DECL__ __half __hsub_sat(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(sub.sat) +} +__CUDA_FP16_DECL__ __half __hmul_sat(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(mul.sat) +} +__CUDA_FP16_DECL__ __half __hadd_rn(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(add.rn) +} +__CUDA_FP16_DECL__ __half __hsub_rn(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(sub.rn) +} +__CUDA_FP16_DECL__ __half __hmul_rn(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(mul.rn) +} +__CUDA_FP16_DECL__ __half __hfma(const __half a, const __half b, const __half c) +{ + __TERNARY_OP_HALF_MACRO(fma.rn) +} +__CUDA_FP16_DECL__ __half __hfma_sat(const __half a, const __half b, const __half c) +{ + __TERNARY_OP_HALF_MACRO(fma.rn.sat) +} +__CUDA_FP16_DECL__ __half __hdiv(const __half a, const __half b) { + __half v; + __half abs; + __half den; + __HALF_TO_US(den) = 0x008FU; + + float rcp; + const float fa = __half2float(a); + const float fb = __half2float(b); + + asm("{rcp.approx.ftz.f32 %0, %1;\n}" :"=f"(rcp) : "f"(fb)); + + float fv = rcp * fa; + + v = __float2half(fv); + abs = __habs(v); + if (__hlt(abs, den) && __hlt(__float2half(0.0f), abs)) { + const float err = __fmaf_rn(-fb, fv, fa); + fv = __fmaf_rn(rcp, err, fv); + v = __float2half(fv); + } + return v; +} + +/****************************************************************************** +* __half2 functions * +******************************************************************************/ +#define __SPEC_CASE2(i,r, spc, ulp) \ + "{.reg.b32 spc, ulp, p;\n"\ + " mov.b32 spc," __CUDA_FP16_STRINGIFY(spc) ";\n"\ + " mov.b32 ulp," __CUDA_FP16_STRINGIFY(ulp) ";\n"\ + " set.eq.f16x2.f16x2 p," __CUDA_FP16_STRINGIFY(i) ", spc;\n"\ + " fma.rn.f16x2 " __CUDA_FP16_STRINGIFY(r) ",p,ulp," __CUDA_FP16_STRINGIFY(r) ";\n}\n" +#define __SPEC_CASE(i,r, spc, ulp) \ + "{.reg.b16 spc, ulp, p;\n"\ + " mov.b16 spc," __CUDA_FP16_STRINGIFY(spc) ";\n"\ + " mov.b16 ulp," __CUDA_FP16_STRINGIFY(ulp) ";\n"\ + " set.eq.f16.f16 p," __CUDA_FP16_STRINGIFY(i) ", spc;\n"\ + " fma.rn.f16 " __CUDA_FP16_STRINGIFY(r) ",p,ulp," __CUDA_FP16_STRINGIFY(r) ";\n}\n" +#define __APPROX_FCAST(fun) /* do */ {\ + __half val;\ + asm("{.reg.b32 f; \n"\ + " .reg.b16 r; \n"\ + " mov.b16 r,%1; \n"\ + " cvt.f32.f16 f,r; \n"\ + " " __CUDA_FP16_STRINGIFY(fun) ".approx.ftz.f32 f,f; \n"\ + " cvt.rn.f16.f32 r,f; \n"\ + " mov.b16 %0,r; \n"\ + "}": "=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)));\ + return val;\ +} /* while(0) */ +#define __APPROX_FCAST2(fun) /* do */ {\ + __half2 val;\ + asm("{.reg.b16 hl, hu; \n"\ + " .reg.b32 fl, fu; \n"\ + " mov.b32 {hl, hu}, %1; \n"\ + " cvt.f32.f16 fl, hl; \n"\ + " cvt.f32.f16 fu, hu; \n"\ + " " __CUDA_FP16_STRINGIFY(fun) ".approx.ftz.f32 fl, fl; \n"\ + " " __CUDA_FP16_STRINGIFY(fun) ".approx.ftz.f32 fu, fu; \n"\ + " cvt.rn.f16.f32 hl, fl; \n"\ + " cvt.rn.f16.f32 hu, fu; \n"\ + " mov.b32 %0, {hl, hu}; \n"\ + "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); \ + return val;\ +} /* while(0) */ +static __device__ __forceinline__ float __float_simpl_sinf(float a); +static __device__ __forceinline__ float __float_simpl_cosf(float a); +__CUDA_FP16_DECL__ __half hsin(const __half a) { + const float sl = __float_simpl_sinf(__half2float(a)); + __half r = __float2half_rn(sl); + asm("{\n\t" + " .reg.b16 i,r,t; \n\t" + " mov.b16 r, %0; \n\t" + " mov.b16 i, %1; \n\t" + " and.b16 t, r, 0x8000U; \n\t" + " abs.f16 r, r; \n\t" + " abs.f16 i, i; \n\t" + __SPEC_CASE(i, r, 0X32B3U, 0x0800U) + __SPEC_CASE(i, r, 0X5CB0U, 0x9000U) + " or.b16 r,r,t; \n\t" + " mov.b16 %0, r; \n" + "}\n" : "+h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a))); + return r; +} +__CUDA_FP16_DECL__ __half2 h2sin(const __half2 a) { + const float sl = __float_simpl_sinf(__half2float(a.x)); + const float sh = __float_simpl_sinf(__half2float(a.y)); + __half2 r = __floats2half2_rn(sl, sh); + asm("{\n\t" + " .reg.b32 i,r,t; \n\t" + " mov.b32 r, %0; \n\t" + " mov.b32 i, %1; \n\t" + " and.b32 t, r, 0x80008000U; \n\t" + " abs.f16x2 r, r; \n\t" + " abs.f16x2 i, i; \n\t" + __SPEC_CASE2(i, r, 0X32B332B3U, 0x08000800U) + __SPEC_CASE2(i, r, 0X5CB05CB0U, 0x90009000U) + " or.b32 r, r, t; \n\t" + " mov.b32 %0, r; \n" + "}\n" : "+r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a))); + return r; +} +__CUDA_FP16_DECL__ __half hcos(const __half a) { + const float cl = __float_simpl_cosf(__half2float(a)); + __half r = __float2half_rn(cl); + asm("{\n\t" + " .reg.b16 i,r; \n\t" + " mov.b16 r, %0; \n\t" + " mov.b16 i, %1; \n\t" + " abs.f16 i, i; \n\t" + __SPEC_CASE(i, r, 0X2B7CU, 0x1000U) + " mov.b16 %0, r; \n" + "}\n" : "+h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a))); + return r; +} +__CUDA_FP16_DECL__ __half2 h2cos(const __half2 a) { + const float cl = __float_simpl_cosf(__half2float(a.x)); + const float ch = __float_simpl_cosf(__half2float(a.y)); + __half2 r = __floats2half2_rn(cl, ch); + asm("{\n\t" + " .reg.b32 i,r; \n\t" + " mov.b32 r, %0; \n\t" + " mov.b32 i, %1; \n\t" + " abs.f16x2 i, i; \n\t" + __SPEC_CASE2(i, r, 0X2B7C2B7CU, 0x10001000U) + " mov.b32 %0, r; \n" + "}\n" : "+r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a))); + return r; +} +static __device__ __forceinline__ float __internal_trig_reduction_kernel(const float a, unsigned int *const quadrant) +{ + const float ar = __fmaf_rn(a, 0.636619772F, 12582912.0F); + const unsigned q = __float_as_uint(ar); + const float j = __fsub_rn(ar, 12582912.0F); + float t = __fmaf_rn(j, -1.5707962512969971e+000F, a); + t = __fmaf_rn(j, -7.5497894158615964e-008F, t); + *quadrant = q; + return t; +} +static __device__ __forceinline__ float __internal_sin_cos_kernel(const float x, const unsigned int i) +{ + float z; + const float x2 = x*x; + float a8; + float a6; + float a4; + float a2; + float a1; + float a0; + + if ((i & 1U) != 0U) { + // cos + a8 = 2.44331571e-5F; + a6 = -1.38873163e-3F; + a4 = 4.16666457e-2F; + a2 = -5.00000000e-1F; + a1 = x2; + a0 = 1.0F; + } + else { + // sin + a8 = -1.95152959e-4F; + a6 = 8.33216087e-3F; + a4 = -1.66666546e-1F; + a2 = 0.0F; + a1 = x; + a0 = x; + } + + z = __fmaf_rn(a8, x2, a6); + z = __fmaf_rn(z, x2, a4); + z = __fmaf_rn(z, x2, a2); + z = __fmaf_rn(z, a1, a0); + + if ((i & 2U) != 0U) { + z = -z; + } + return z; +} +static __device__ __forceinline__ float __float_simpl_sinf(float a) +{ + float z; + unsigned i; + a = __internal_trig_reduction_kernel(a, &i); + z = __internal_sin_cos_kernel(a, i); + return z; +} +static __device__ __forceinline__ float __float_simpl_cosf(float a) +{ + float z; + unsigned i; + a = __internal_trig_reduction_kernel(a, &i); + z = __internal_sin_cos_kernel(a, (i & 0x3U) + 1U); + return z; +} + +__CUDA_FP16_DECL__ __half hexp(const __half a) { + __half val; + asm("{.reg.b32 f, C, nZ; \n" + " .reg.b16 h,r; \n" + " mov.b16 h,%1; \n" + " cvt.f32.f16 f,h; \n" + " mov.b32 C, 0x3fb8aa3bU; \n" + " mov.b32 nZ, 0x80000000U;\n" + " fma.rn.f32 f,f,C,nZ; \n" + " ex2.approx.ftz.f32 f,f; \n" + " cvt.rn.f16.f32 r,f; \n" + __SPEC_CASE(h, r, 0X1F79U, 0x9400U) + __SPEC_CASE(h, r, 0X25CFU, 0x9400U) + __SPEC_CASE(h, r, 0XC13BU, 0x0400U) + __SPEC_CASE(h, r, 0XC1EFU, 0x0200U) + " mov.b16 %0,r; \n" + "}": "=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2exp(const __half2 a) { + __half2 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 h,r,fl,fu,C,nZ; \n" + " mov.b32 {hl, hu}, %1; \n" + " mov.b32 h, %1; \n" + " cvt.f32.f16 fl, hl; \n" + " cvt.f32.f16 fu, hu; \n" + " mov.b32 C, 0x3fb8aa3bU; \n" + " mov.b32 nZ, 0x80000000U;\n" + " fma.rn.f32 fl,fl,C,nZ; \n" + " fma.rn.f32 fu,fu,C,nZ; \n" + " ex2.approx.ftz.f32 fl, fl; \n" + " ex2.approx.ftz.f32 fu, fu; \n" + " cvt.rn.f16.f32 hl, fl; \n" + " cvt.rn.f16.f32 hu, fu; \n" + " mov.b32 r, {hl, hu}; \n" + __SPEC_CASE2(h, r, 0X1F791F79U, 0x94009400U) + __SPEC_CASE2(h, r, 0X25CF25CFU, 0x94009400U) + __SPEC_CASE2(h, r, 0XC13BC13BU, 0x04000400U) + __SPEC_CASE2(h, r, 0XC1EFC1EFU, 0x02000200U) + " mov.b32 %0, r; \n" + "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +__CUDA_FP16_DECL__ __half hexp2(const __half a) { + __half val; + asm("{.reg.b32 f, ULP; \n" + " .reg.b16 r; \n" + " mov.b16 r,%1; \n" + " cvt.f32.f16 f,r; \n" + " ex2.approx.ftz.f32 f,f; \n" + " mov.b32 ULP, 0x33800000U;\n" + " fma.rn.f32 f,f,ULP,f; \n" + " cvt.rn.f16.f32 r,f; \n" + " mov.b16 %0,r; \n" + "}": "=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2exp2(const __half2 a) { + __half2 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 fl, fu, ULP; \n" + " mov.b32 {hl, hu}, %1; \n" + " cvt.f32.f16 fl, hl; \n" + " cvt.f32.f16 fu, hu; \n" + " ex2.approx.ftz.f32 fl, fl; \n" + " ex2.approx.ftz.f32 fu, fu; \n" + " mov.b32 ULP, 0x33800000U;\n" + " fma.rn.f32 fl,fl,ULP,fl; \n" + " fma.rn.f32 fu,fu,ULP,fu; \n" + " cvt.rn.f16.f32 hl, fl; \n" + " cvt.rn.f16.f32 hu, fu; \n" + " mov.b32 %0, {hl, hu}; \n" + "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +__CUDA_FP16_DECL__ __half hexp10(const __half a) { + __half val; + asm("{.reg.b16 h,r; \n" + " .reg.b32 f, C, nZ; \n" + " mov.b16 h, %1; \n" + " cvt.f32.f16 f, h; \n" + " mov.b32 C, 0x40549A78U; \n" + " mov.b32 nZ, 0x80000000U;\n" + " fma.rn.f32 f,f,C,nZ; \n" + " ex2.approx.ftz.f32 f, f; \n" + " cvt.rn.f16.f32 r, f; \n" + __SPEC_CASE(h, r, 0x34DEU, 0x9800U) + __SPEC_CASE(h, r, 0x9766U, 0x9000U) + __SPEC_CASE(h, r, 0x9972U, 0x1000U) + __SPEC_CASE(h, r, 0xA5C4U, 0x1000U) + __SPEC_CASE(h, r, 0xBF0AU, 0x8100U) + " mov.b16 %0, r; \n" + "}":"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2exp10(const __half2 a) { + __half2 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 h,r,fl,fu,C,nZ; \n" + " mov.b32 {hl, hu}, %1; \n" + " mov.b32 h, %1; \n" + " cvt.f32.f16 fl, hl; \n" + " cvt.f32.f16 fu, hu; \n" + " mov.b32 C, 0x40549A78U; \n" + " mov.b32 nZ, 0x80000000U;\n" + " fma.rn.f32 fl,fl,C,nZ; \n" + " fma.rn.f32 fu,fu,C,nZ; \n" + " ex2.approx.ftz.f32 fl, fl; \n" + " ex2.approx.ftz.f32 fu, fu; \n" + " cvt.rn.f16.f32 hl, fl; \n" + " cvt.rn.f16.f32 hu, fu; \n" + " mov.b32 r, {hl, hu}; \n" + __SPEC_CASE2(h, r, 0x34DE34DEU, 0x98009800U) + __SPEC_CASE2(h, r, 0x97669766U, 0x90009000U) + __SPEC_CASE2(h, r, 0x99729972U, 0x10001000U) + __SPEC_CASE2(h, r, 0xA5C4A5C4U, 0x10001000U) + __SPEC_CASE2(h, r, 0xBF0ABF0AU, 0x81008100U) + " mov.b32 %0, r; \n" + "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +__CUDA_FP16_DECL__ __half hlog2(const __half a) { + __half val; + asm("{.reg.b16 h, r; \n" + " .reg.b32 f; \n" + " mov.b16 h, %1; \n" + " cvt.f32.f16 f, h; \n" + " lg2.approx.ftz.f32 f, f; \n" + " cvt.rn.f16.f32 r, f; \n" + __SPEC_CASE(r, r, 0xA2E2U, 0x8080U) + __SPEC_CASE(r, r, 0xBF46U, 0x9400U) + " mov.b16 %0, r; \n" + "}":"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2log2(const __half2 a) { + __half2 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 fl, fu, r, p; \n" + " mov.b32 {hl, hu}, %1; \n" + " cvt.f32.f16 fl, hl; \n" + " cvt.f32.f16 fu, hu; \n" + " lg2.approx.ftz.f32 fl, fl; \n" + " lg2.approx.ftz.f32 fu, fu; \n" + " cvt.rn.f16.f32 hl, fl; \n" + " cvt.rn.f16.f32 hu, fu; \n" + " mov.b32 r, {hl, hu}; \n" + __SPEC_CASE2(r, r, 0xA2E2A2E2U, 0x80808080U) + __SPEC_CASE2(r, r, 0xBF46BF46U, 0x94009400U) + " mov.b32 %0, r; \n" + "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +__CUDA_FP16_DECL__ __half hlog(const __half a) { + __half val; + asm("{.reg.b32 f, C; \n" + " .reg.b16 r,h; \n" + " mov.b16 h,%1; \n" + " cvt.f32.f16 f,h; \n" + " lg2.approx.ftz.f32 f,f; \n" + " mov.b32 C, 0x3f317218U; \n" + " mul.f32 f,f,C; \n" + " cvt.rn.f16.f32 r,f; \n" + __SPEC_CASE(h, r, 0X160DU, 0x9C00U) + __SPEC_CASE(h, r, 0X3BFEU, 0x8010U) + __SPEC_CASE(h, r, 0X3C0BU, 0x8080U) + __SPEC_CASE(h, r, 0X6051U, 0x1C00U) + " mov.b16 %0,r; \n" + "}": "=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2log(const __half2 a) { + __half2 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 r, fl, fu, C, h; \n" + " mov.b32 {hl, hu}, %1; \n" + " mov.b32 h, %1; \n" + " cvt.f32.f16 fl, hl; \n" + " cvt.f32.f16 fu, hu; \n" + " lg2.approx.ftz.f32 fl, fl; \n" + " lg2.approx.ftz.f32 fu, fu; \n" + " mov.b32 C, 0x3f317218U; \n" + " mul.f32 fl,fl,C; \n" + " mul.f32 fu,fu,C; \n" + " cvt.rn.f16.f32 hl, fl; \n" + " cvt.rn.f16.f32 hu, fu; \n" + " mov.b32 r, {hl, hu}; \n" + __SPEC_CASE2(h, r, 0X160D160DU, 0x9C009C00U) + __SPEC_CASE2(h, r, 0X3BFE3BFEU, 0x80108010U) + __SPEC_CASE2(h, r, 0X3C0B3C0BU, 0x80808080U) + __SPEC_CASE2(h, r, 0X60516051U, 0x1C001C00U) + " mov.b32 %0, r; \n" + "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +__CUDA_FP16_DECL__ __half hlog10(const __half a) { + __half val; + asm("{.reg.b16 h, r; \n" + " .reg.b32 f, C; \n" + " mov.b16 h, %1; \n" + " cvt.f32.f16 f, h; \n" + " lg2.approx.ftz.f32 f, f; \n" + " mov.b32 C, 0x3E9A209BU; \n" + " mul.f32 f,f,C; \n" + " cvt.rn.f16.f32 r, f; \n" + __SPEC_CASE(h, r, 0x338FU, 0x1000U) + __SPEC_CASE(h, r, 0x33F8U, 0x9000U) + __SPEC_CASE(h, r, 0x57E1U, 0x9800U) + __SPEC_CASE(h, r, 0x719DU, 0x9C00U) + " mov.b16 %0, r; \n" + "}":"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2log10(const __half2 a) { + __half2 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 r, fl, fu, C, h; \n" + " mov.b32 {hl, hu}, %1; \n" + " mov.b32 h, %1; \n" + " cvt.f32.f16 fl, hl; \n" + " cvt.f32.f16 fu, hu; \n" + " lg2.approx.ftz.f32 fl, fl; \n" + " lg2.approx.ftz.f32 fu, fu; \n" + " mov.b32 C, 0x3E9A209BU; \n" + " mul.f32 fl,fl,C; \n" + " mul.f32 fu,fu,C; \n" + " cvt.rn.f16.f32 hl, fl; \n" + " cvt.rn.f16.f32 hu, fu; \n" + " mov.b32 r, {hl, hu}; \n" + __SPEC_CASE2(h, r, 0x338F338FU, 0x10001000U) + __SPEC_CASE2(h, r, 0x33F833F8U, 0x90009000U) + __SPEC_CASE2(h, r, 0x57E157E1U, 0x98009800U) + __SPEC_CASE2(h, r, 0x719D719DU, 0x9C009C00U) + " mov.b32 %0, r; \n" + "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +#undef __SPEC_CASE2 +#undef __SPEC_CASE +__CUDA_FP16_DECL__ __half2 h2rcp(const __half2 a) { + __APPROX_FCAST2(rcp) +} +__CUDA_FP16_DECL__ __half hrcp(const __half a) { + __APPROX_FCAST(rcp) +} +__CUDA_FP16_DECL__ __half2 h2rsqrt(const __half2 a) { + __APPROX_FCAST2(rsqrt) +} +__CUDA_FP16_DECL__ __half hrsqrt(const __half a) { + __APPROX_FCAST(rsqrt) +} +__CUDA_FP16_DECL__ __half2 h2sqrt(const __half2 a) { + __APPROX_FCAST2(sqrt) +} +__CUDA_FP16_DECL__ __half hsqrt(const __half a) { + __APPROX_FCAST(sqrt) +} +#undef __APPROX_FCAST +#undef __APPROX_FCAST2 +__CUDA_FP16_DECL__ __half2 __hisnan2(const __half2 a) +{ + __half2 r; + asm("{set.nan.f16x2.f16x2 %0,%1,%2;\n}" + :"=r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a)), "r"(__HALF2_TO_CUI(a))); + return r; +} +__CUDA_FP16_DECL__ bool __hisnan(const __half a) +{ + __half r; + asm("{set.nan.f16.f16 %0,%1,%2;\n}" + :"=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a)), "h"(__HALF_TO_CUS(a))); + return __HALF_TO_CUS(r) != 0U; +} +__CUDA_FP16_DECL__ __half2 __hneg2(const __half2 a) +{ + __half2 r; + asm("{neg.f16x2 %0,%1;\n}" + :"=r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a))); + return r; +} +__CUDA_FP16_DECL__ __half __hneg(const __half a) +{ + __half r; + asm("{neg.f16 %0,%1;\n}" + :"=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a))); + return r; +} +__CUDA_FP16_DECL__ __half2 __habs2(const __half2 a) +{ + __half2 r; + asm("{abs.f16x2 %0,%1;\n}" + :"=r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a))); + return r; +} +__CUDA_FP16_DECL__ __half __habs(const __half a) +{ + __half r; + asm("{abs.f16 %0,%1;\n}" + :"=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a))); + return r; +} + +__CUDA_FP16_DECL__ __half2 __hcmadd(const __half2 a, const __half2 b, const __half2 c) +{ + // fast version of complex multiply-accumulate + // (a.re, a.im) * (b.re, b.im) + (c.re, c.im) + // acc.re = (c.re + a.re*b.re) - a.im*b.im + // acc.im = (c.im + a.re*b.im) + a.im*b.re + __half real_tmp = __hfma(a.x, b.x, c.x); + __half img_tmp = __hfma(a.x, b.y, c.y); + real_tmp = __hfma(__hneg(a.y), b.y, real_tmp); + img_tmp = __hfma(a.y, b.x, img_tmp); + return make_half2(real_tmp, img_tmp); +} + +#endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) || defined(_NVHPC_CUDA) */ + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800) +__CUDA_FP16_DECL__ __half __hmax_nan(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(max.NaN) +} +__CUDA_FP16_DECL__ __half __hmin_nan(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(min.NaN) +} +__CUDA_FP16_DECL__ __half __hfma_relu(const __half a, const __half b, const __half c) +{ + __TERNARY_OP_HALF_MACRO(fma.rn.relu) +} + +__CUDA_FP16_DECL__ __half2 __hmax2_nan(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(max.NaN) +} +__CUDA_FP16_DECL__ __half2 __hmin2_nan(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(min.NaN) +} +__CUDA_FP16_DECL__ __half2 __hfma2_relu(const __half2 a, const __half2 b, const __half2 c) +{ + __TERNARY_OP_HALF2_MACRO(fma.rn.relu) +} +#endif /*defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800)*/ + +/* Define __PTR for atomicAdd prototypes below, undef after done */ +#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__) +#define __PTR "l" +#else +#define __PTR "r" +#endif /*(defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)*/ + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 600) + +__CUDA_FP16_DECL__ __half2 atomicAdd(__half2 *const address, const __half2 val) { + __half2 r; + asm volatile ("{ atom.add.noftz.f16x2 %0,[%1],%2; }\n" + : "=r"(__HALF2_TO_UI(r)) : __PTR(address), "r"(__HALF2_TO_CUI(val)) + : "memory"); + return r; +} + +#endif /*defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 600)*/ + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) + +__CUDA_FP16_DECL__ __half atomicAdd(__half *const address, const __half val) { + __half r; + asm volatile ("{ atom.add.noftz.f16 %0,[%1],%2; }\n" + : "=h"(__HALF_TO_US(r)) + : __PTR(address), "h"(__HALF_TO_CUS(val)) + : "memory"); + return r; +} + +#endif /*defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700)*/ + +#undef __PTR + +#undef __CUDA_FP16_DECL__ +#endif /* defined(__CUDACC__) */ +#endif /* defined(__cplusplus) */ + +#undef __TERNARY_OP_HALF2_MACRO +#undef __TERNARY_OP_HALF_MACRO +#undef __BINARY_OP_HALF2_MACRO +#undef __BINARY_OP_HALF_MACRO + +#undef __CUDA_HOSTDEVICE_FP16_DECL__ +#undef __CUDA_FP16_DECL__ + +#undef __HALF_TO_US +#undef __HALF_TO_CUS +#undef __HALF2_TO_UI +#undef __HALF2_TO_CUI + +/* Define first-class types "half" and "half2", unless user specifies otherwise via "#define CUDA_NO_HALF" */ +/* C cannot ever have these types defined here, because __half and __half2 are C++ classes */ +#if defined(__cplusplus) && !defined(CUDA_NO_HALF) +typedef __half half; +typedef __half2 half2; +// for consistency with __nv_bfloat16 +typedef __half __nv_half; +typedef __half2 __nv_half2; +typedef __half_raw __nv_half_raw; +typedef __half2_raw __nv_half2_raw; +typedef __half nv_half; +typedef __half2 nv_half2; +#endif /* defined(__cplusplus) && !defined(CUDA_NO_HALF) */ + +#if defined(__CPP_VERSION_AT_LEAST_11_FP16) +#undef __CPP_VERSION_AT_LEAST_11_FP16 +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP16) */ + +#endif /* end of include guard: __CUDA_FP16_HPP__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp8.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp8.h new file mode 100644 index 0000000000000000000000000000000000000000..d47c9a9100b13192e1e6376001a4989e9c077340 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp8.h @@ -0,0 +1,367 @@ +/* + * Copyright 2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef __CUDA_FP8_H__ +#define __CUDA_FP8_H__ + +/* Set up function decorations */ +#if defined(__CUDACC__) +#define __CUDA_FP8_DECL__ static __device__ __inline__ +#define __CUDA_HOSTDEVICE_FP8__ __host__ __device__ +#define __CUDA_HOSTDEVICE_FP8_DECL__ static __host__ __device__ __inline__ +#else /* !defined(__CUDACC__) */ +#if defined(__GNUC__) +#define __CUDA_HOSTDEVICE_FP8_DECL__ static __attribute__((unused)) +#else +#define __CUDA_HOSTDEVICE_FP8_DECL__ static +#endif /* defined(__GNUC__) */ +#define __CUDA_HOSTDEVICE_FP8__ +#endif /* defined(__CUDACC_) */ + +#if !defined(_MSC_VER) && __cplusplus >= 201103L +#define __CPP_VERSION_AT_LEAST_11_FP8 +#elif _MSC_FULL_VER >= 190024210 && _MSVC_LANG >= 201103L +#define __CPP_VERSION_AT_LEAST_11_FP8 +#endif + +/* bring in __half_raw data type */ +#include "cuda_fp16.h" +/* bring in __nv_bfloat16_raw data type */ +#include "cuda_bf16.h" +/* bring in float2, double4, etc vector types */ +#include "vector_types.h" + +/** + * \defgroup CUDA_MATH_INTRINSIC_FP8 FP8 Intrinsics + * This section describes fp8 intrinsic functions. + * To use these functions, include the header file \p cuda_fp8.h in your + * program. + * The following macros are available to help users selectively enable/disable + * various definitions present in the header file: + * - \p __CUDA_NO_FP8_CONVERSIONS__ - If defined, this macro will prevent any + * use of the C++ type conversions (converting constructors and conversion + * operators) defined in the header. + * - \p __CUDA_NO_FP8_CONVERSION_OPERATORS__ - If defined, this macro will + * prevent any use of the C++ conversion operators from \p fp8 to other types. + */ + +/** + * \defgroup CUDA_MATH_FP8_MISC FP8 Conversion and Data Movement + * \ingroup CUDA_MATH_INTRINSIC_FP8 + * To use these functions, include the header file \p cuda_fp8.h in your + * program. + */ + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief 8-bit \p unsigned \p integer + * type abstraction used to for \p fp8 floating-point + * numbers storage. + */ +typedef unsigned char __nv_fp8_storage_t; + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief 16-bit \p unsigned \p integer + * type abstraction used to for storage of pairs of + * \p fp8 floating-point numbers. + */ +typedef unsigned short int __nv_fp8x2_storage_t; + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief 32-bit \p unsigned \p integer + * type abstraction used to for storage of tetrads of + * \p fp8 floating-point numbers. + */ +typedef unsigned int __nv_fp8x4_storage_t; + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief Enumerates the modes applicable when + * performing a narrowing conversion to \p fp8 destination types. + */ +typedef enum __nv_saturation_t { + /** + * Means no saturation to finite is performed when conversion + * results in rounding values outside the range of destination + * type. + * NOTE: for fp8 type of e4m3 kind, the results that are larger + * than the maximum representable finite number of the target + * format become NaN. + */ + __NV_NOSAT, + /** + * Means input larger than the maximum representable + * finite number MAXNORM of the target format round to the + * MAXNORM of the same sign as input. + */ + __NV_SATFINITE, +} __nv_saturation_t; + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief Enumerates the possible + * interpretations of the 8-bit values when referring to them as + * \p fp8 types. + */ +typedef enum __nv_fp8_interpretation_t { + __NV_E4M3, /**< Stands for \p fp8 numbers of \p e4m3 kind. */ + __NV_E5M2, /**< Stands for \p fp8 numbers of \p e5m2 kind. */ +} __nv_fp8_interpretation_t; + +/* Forward-declaration of C-style APIs */ + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief Converts input \p double precision \p x to \p fp8 type of the + * requested kind using round-to-nearest-even rounding and requested saturation + * mode. + * + * \details Converts input \p x to \p fp8 type of the kind specified by + * \p fp8_interpretation parameter, + * using round-to-nearest-even rounding and + * saturation mode specified by \p saturate parameter. + * + * \returns + * - The \p __nv_fp8_storage_t value holds the result of conversion. + */ +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t +__nv_cvt_double_to_fp8(const double x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation); + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief Converts input vector of two \p double precision numbers packed + * in \p double2 \p x into a vector of two values of \p fp8 type of + * the requested kind using round-to-nearest-even rounding and requested + * saturation mode. + * + * \details Converts input vector \p x to a vector of two \p fp8 values of the + * kind specified by \p fp8_interpretation parameter, using + * round-to-nearest-even rounding and saturation mode specified by \p saturate + * parameter. + * + * \returns + * - The \p __nv_fp8x2_storage_t value holds the result of conversion. + */ +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t +__nv_cvt_double2_to_fp8x2(const double2 x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation); + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief Converts input \p single precision \p x to \p fp8 type of the + * requested kind using round-to-nearest-even rounding and requested saturation + * mode. + * + * \details Converts input \p x to \p fp8 type of the kind specified by + * \p fp8_interpretation parameter, + * using round-to-nearest-even rounding and + * saturation mode specified by \p saturate parameter. + * + * \returns + * - The \p __nv_fp8_storage_t value holds the result of conversion. + */ +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t +__nv_cvt_float_to_fp8(const float x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation); + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief Converts input vector of two \p single precision numbers packed + * in \p float2 \p x into a vector of two values of \p fp8 type of + * the requested kind using round-to-nearest-even rounding and requested + * saturation mode. + * + * \details Converts input vector \p x to a vector of two \p fp8 values of the + * kind specified by \p fp8_interpretation parameter, using + * round-to-nearest-even rounding and saturation mode specified by \p saturate + * parameter. + * + * \returns + * - The \p __nv_fp8x2_storage_t value holds the result of conversion. + */ +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t +__nv_cvt_float2_to_fp8x2(const float2 x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation); + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief Converts input \p half precision \p x to \p fp8 type of the requested + * kind using round-to-nearest-even rounding and requested saturation mode. + * + * \details Converts input \p x to \p fp8 type of the kind specified by + * \p fp8_interpretation parameter, + * using round-to-nearest-even rounding and + * saturation mode specified by \p saturate parameter. + * + * \returns + * - The \p __nv_fp8_storage_t value holds the result of conversion. + */ +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t +__nv_cvt_halfraw_to_fp8(const __half_raw x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation); + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief Converts input vector of two \p half precision numbers packed + * in \p __half2_raw \p x into a vector of two values of \p fp8 type of + * the requested kind using round-to-nearest-even rounding and requested + * saturation mode. + * + * \details Converts input vector \p x to a vector of two \p fp8 values of the + * kind specified by \p fp8_interpretation parameter, using + * round-to-nearest-even rounding and saturation mode specified by \p saturate + * parameter. + * + * \returns + * - The \p __nv_fp8x2_storage_t value holds the result of conversion. + */ +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t __nv_cvt_halfraw2_to_fp8x2( + const __half2_raw x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation); + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief Converts input \p nv_bfloat16 precision \p x to \p fp8 type of the + * requested kind using round-to-nearest-even rounding and requested saturation + * mode. + * + * \details Converts input \p x to \p fp8 type of the kind specified by + * \p fp8_interpretation parameter, + * using round-to-nearest-even rounding and + * saturation mode specified by \p saturate parameter. + * + * \returns + * - The \p __nv_fp8_storage_t value holds the result of conversion. + */ +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t __nv_cvt_bfloat16raw_to_fp8( + const __nv_bfloat16_raw x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation); + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief Converts input vector of two \p nv_bfloat16 precision numbers packed + * in \p __nv_bfloat162_raw \p x into a vector of two values of \p fp8 type of + * the requested kind using round-to-nearest-even rounding and requested + * saturation mode. + * + * \details Converts input vector \p x to a vector of two \p fp8 values of the + * kind specified by \p fp8_interpretation parameter, using + * round-to-nearest-even rounding and saturation mode specified by \p saturate + * parameter. + * + * \returns + * - The \p __nv_fp8x2_storage_t value holds the result of conversion. + */ +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t +__nv_cvt_bfloat16raw2_to_fp8x2( + const __nv_bfloat162_raw x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation); + +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief Converts input \p fp8 \p x of the specified kind + * to \p half precision. + * + * \details Converts input \p x of \p fp8 type of the kind specified by + * \p fp8_interpretation parameter + * to \p half precision. + * + * \returns + * - The \p __half_raw value holds the result of conversion. + */ +__CUDA_HOSTDEVICE_FP8_DECL__ __half_raw +__nv_cvt_fp8_to_halfraw(const __nv_fp8_storage_t x, + const __nv_fp8_interpretation_t fp8_interpretation); +/** + * \ingroup CUDA_MATH_FP8_MISC + * \brief Converts input vector of two \p fp8 values of the specified kind + * to a vector of two \p half precision values packed in \p __half2_raw + * structure. + * + * \details Converts input vector \p x of \p fp8 type of the kind specified by + * \p fp8_interpretation parameter + * to a vector of two \p half precision values and returns as \p __half2_raw + * structure. + * + * \returns + * - The \p __half2_raw value holds the result of conversion. + */ +__CUDA_HOSTDEVICE_FP8_DECL__ __half2_raw +__nv_cvt_fp8x2_to_halfraw2(const __nv_fp8x2_storage_t x, + const __nv_fp8_interpretation_t fp8_interpretation); + +#if defined(__cplusplus) + +#define __CUDA_FP8_TYPES_EXIST__ + +/* Forward-declaration of structures defined in "cuda_fp8.hpp" */ +struct __nv_fp8_e5m2; +struct __nv_fp8x2_e5m2; +struct __nv_fp8x4_e5m2; + +struct __nv_fp8_e4m3; +struct __nv_fp8x2_e4m3; +struct __nv_fp8x4_e4m3; + +#endif /* defined(__cplusplus) */ + +#include "cuda_fp8.hpp" + +#undef __CUDA_FP8_DECL__ +#undef __CUDA_HOSTDEVICE_FP8__ +#undef __CUDA_HOSTDEVICE_FP8_DECL__ + +#if defined(__CPP_VERSION_AT_LEAST_11_FP8) +#undef __CPP_VERSION_AT_LEAST_11_FP8 +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */ + +#endif /* end of include guard: __CUDA_FP8_H__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_occupancy.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_occupancy.h new file mode 100644 index 0000000000000000000000000000000000000000..ffe55709f8ccdebf7341180f043006b68c08e104 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_occupancy.h @@ -0,0 +1,1958 @@ +/* + * Copyright 1993-2017 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/** + * CUDA Occupancy Calculator + * + * NAME + * + * cudaOccMaxActiveBlocksPerMultiprocessor, + * cudaOccMaxPotentialOccupancyBlockSize, + * cudaOccMaxPotentialOccupancyBlockSizeVariableSMem + * cudaOccAvailableDynamicSMemPerBlock + * + * DESCRIPTION + * + * The CUDA occupancy calculator provides a standalone, programmatical + * interface to compute the occupancy of a function on a device. It can also + * provide occupancy-oriented launch configuration suggestions. + * + * The function and device are defined by the user through + * cudaOccFuncAttributes, cudaOccDeviceProp, and cudaOccDeviceState + * structures. All APIs require all 3 of them. + * + * See the structure definition for more details about the device / function + * descriptors. + * + * See each API's prototype for API usage. + * + * COMPATIBILITY + * + * The occupancy calculator will be updated on each major CUDA toolkit + * release. It does not provide forward compatibility, i.e. new hardwares + * released after this implementation's release will not be supported. + * + * NOTE + * + * If there is access to CUDA runtime, and the sole intent is to calculate + * occupancy related values on one of the accessible CUDA devices, using CUDA + * runtime's occupancy calculation APIs is recommended. + * + */ + +#ifndef __cuda_occupancy_h__ +#define __cuda_occupancy_h__ + +#include +#include +#include + + +// __OCC_INLINE will be undefined at the end of this header +// +#ifdef __CUDACC__ +#define __OCC_INLINE inline __host__ __device__ +#elif defined _MSC_VER +#define __OCC_INLINE __inline +#else // GNUCC assumed +#define __OCC_INLINE inline +#endif + +enum cudaOccError_enum { + CUDA_OCC_SUCCESS = 0, // no error encountered + CUDA_OCC_ERROR_INVALID_INPUT = 1, // input parameter is invalid + CUDA_OCC_ERROR_UNKNOWN_DEVICE = 2, // requested device is not supported in + // current implementation or device is + // invalid +}; +typedef enum cudaOccError_enum cudaOccError; + +typedef struct cudaOccResult cudaOccResult; +typedef struct cudaOccDeviceProp cudaOccDeviceProp; +typedef struct cudaOccFuncAttributes cudaOccFuncAttributes; +typedef struct cudaOccDeviceState cudaOccDeviceState; + +/** + * The CUDA occupancy calculator computes the occupancy of the function + * described by attributes with the given block size (blockSize), static device + * properties (properties), dynamic device states (states) and per-block dynamic + * shared memory allocation (dynamicSMemSize) in bytes, and output it through + * result along with other useful information. The occupancy is computed in + * terms of the maximum number of active blocks per multiprocessor. The user can + * then convert it to other metrics, such as number of active warps. + * + * RETURN VALUE + * + * The occupancy and related information is returned through result. + * + * If result->activeBlocksPerMultiprocessor is 0, then the given parameter + * combination cannot run on the device. + * + * ERRORS + * + * CUDA_OCC_ERROR_INVALID_INPUT input parameter is invalid. + * CUDA_OCC_ERROR_UNKNOWN_DEVICE requested device is not supported in + * current implementation or device is invalid + */ +static __OCC_INLINE +cudaOccError cudaOccMaxActiveBlocksPerMultiprocessor( + cudaOccResult *result, // out + const cudaOccDeviceProp *properties, // in + const cudaOccFuncAttributes *attributes, // in + const cudaOccDeviceState *state, // in + int blockSize, // in + size_t dynamicSmemSize); // in + +/** + * The CUDA launch configurator C API suggests a grid / block size pair (in + * minGridSize and blockSize) that achieves the best potential occupancy + * (i.e. maximum number of active warps with the smallest number of blocks) for + * the given function described by attributes, on a device described by + * properties with settings in state. + * + * If per-block dynamic shared memory allocation is not needed, the user should + * leave both blockSizeToDynamicSMemSize and dynamicSMemSize as 0. + * + * If per-block dynamic shared memory allocation is needed, then if the dynamic + * shared memory size is constant regardless of block size, the size should be + * passed through dynamicSMemSize, and blockSizeToDynamicSMemSize should be + * NULL. + * + * Otherwise, if the per-block dynamic shared memory size varies with different + * block sizes, the user needs to provide a pointer to an unary function through + * blockSizeToDynamicSMemSize that computes the dynamic shared memory needed by + * a block of the function for any given block size. dynamicSMemSize is + * ignored. An example signature is: + * + * // Take block size, returns dynamic shared memory needed + * size_t blockToSmem(int blockSize); + * + * RETURN VALUE + * + * The suggested block size and the minimum number of blocks needed to achieve + * the maximum occupancy are returned through blockSize and minGridSize. + * + * If *blockSize is 0, then the given combination cannot run on the device. + * + * ERRORS + * + * CUDA_OCC_ERROR_INVALID_INPUT input parameter is invalid. + * CUDA_OCC_ERROR_UNKNOWN_DEVICE requested device is not supported in + * current implementation or device is invalid + * + */ +static __OCC_INLINE +cudaOccError cudaOccMaxPotentialOccupancyBlockSize( + int *minGridSize, // out + int *blockSize, // out + const cudaOccDeviceProp *properties, // in + const cudaOccFuncAttributes *attributes, // in + const cudaOccDeviceState *state, // in + size_t (*blockSizeToDynamicSMemSize)(int), // in + size_t dynamicSMemSize); // in + +/** + * The CUDA launch configurator C++ API suggests a grid / block size pair (in + * minGridSize and blockSize) that achieves the best potential occupancy + * (i.e. the maximum number of active warps with the smallest number of blocks) + * for the given function described by attributes, on a device described by + * properties with settings in state. + * + * If per-block dynamic shared memory allocation is 0 or constant regardless of + * block size, the user can use cudaOccMaxPotentialOccupancyBlockSize to + * configure the launch. A constant dynamic shared memory allocation size in + * bytes can be passed through dynamicSMemSize. + * + * Otherwise, if the per-block dynamic shared memory size varies with different + * block sizes, the user needs to use + * cudaOccMaxPotentialOccupancyBlockSizeVariableSmem instead, and provide a + * functor / pointer to an unary function (blockSizeToDynamicSMemSize) that + * computes the dynamic shared memory needed by func for any given block + * size. An example signature is: + * + * // Take block size, returns per-block dynamic shared memory needed + * size_t blockToSmem(int blockSize); + * + * RETURN VALUE + * + * The suggested block size and the minimum number of blocks needed to achieve + * the maximum occupancy are returned through blockSize and minGridSize. + * + * If *blockSize is 0, then the given combination cannot run on the device. + * + * ERRORS + * + * CUDA_OCC_ERROR_INVALID_INPUT input parameter is invalid. + * CUDA_OCC_ERROR_UNKNOWN_DEVICE requested device is not supported in + * current implementation or device is invalid + * + */ + +#if defined(__cplusplus) +namespace { + +__OCC_INLINE +cudaOccError cudaOccMaxPotentialOccupancyBlockSize( + int *minGridSize, // out + int *blockSize, // out + const cudaOccDeviceProp *properties, // in + const cudaOccFuncAttributes *attributes, // in + const cudaOccDeviceState *state, // in + size_t dynamicSMemSize = 0); // in + +template +__OCC_INLINE +cudaOccError cudaOccMaxPotentialOccupancyBlockSizeVariableSMem( + int *minGridSize, // out + int *blockSize, // out + const cudaOccDeviceProp *properties, // in + const cudaOccFuncAttributes *attributes, // in + const cudaOccDeviceState *state, // in + UnaryFunction blockSizeToDynamicSMemSize); // in + +} // namespace anonymous +#endif // defined(__cplusplus) + +/** + * + * The CUDA dynamic shared memory calculator computes the maximum size of + * per-block dynamic shared memory if we want to place numBlocks blocks + * on an SM. + * + * RETURN VALUE + * + * Returns in *dynamicSmemSize the maximum size of dynamic shared memory to allow + * numBlocks blocks per SM. + * + * ERRORS + * + * CUDA_OCC_ERROR_INVALID_INPUT input parameter is invalid. + * CUDA_OCC_ERROR_UNKNOWN_DEVICE requested device is not supported in + * current implementation or device is invalid + * + */ +static __OCC_INLINE +cudaOccError cudaOccAvailableDynamicSMemPerBlock( + size_t *dynamicSmemSize, + const cudaOccDeviceProp *properties, + const cudaOccFuncAttributes *attributes, + const cudaOccDeviceState *state, + int numBlocks, + int blockSize); + +/** + * Data structures + * + * These structures are subject to change for future architecture and CUDA + * releases. C users should initialize the structure as {0}. + * + */ + +/** + * Device descriptor + * + * This structure describes a device. + */ +struct cudaOccDeviceProp { + int computeMajor; // Compute capability major version + int computeMinor; // Compute capability minor + // version. None supported minor version + // may cause error + int maxThreadsPerBlock; // Maximum number of threads per block + int maxThreadsPerMultiprocessor; // Maximum number of threads per SM + // i.e. (Max. number of warps) x (warp + // size) + int regsPerBlock; // Maximum number of registers per block + int regsPerMultiprocessor; // Maximum number of registers per SM + int warpSize; // Warp size + size_t sharedMemPerBlock; // Maximum shared memory size per block + size_t sharedMemPerMultiprocessor; // Maximum shared memory size per SM + int numSms; // Number of SMs available + size_t sharedMemPerBlockOptin; // Maximum optin shared memory size per block + size_t reservedSharedMemPerBlock; // Shared memory per block reserved by driver + +#ifdef __cplusplus + // This structure can be converted from a cudaDeviceProp structure for users + // that use this header in their CUDA applications. + // + // If the application have access to the CUDA Runtime API, the application + // can obtain the device properties of a CUDA device through + // cudaGetDeviceProperties, and initialize a cudaOccDeviceProp with the + // cudaDeviceProp structure. + // + // Example: + /* + { + cudaDeviceProp prop; + + cudaGetDeviceProperties(&prop, ...); + + cudaOccDeviceProp occProp = prop; + + ... + + cudaOccMaxPotentialOccupancyBlockSize(..., &occProp, ...); + } + */ + // + template + __OCC_INLINE + cudaOccDeviceProp(const DeviceProp &props) + : computeMajor (props.major), + computeMinor (props.minor), + maxThreadsPerBlock (props.maxThreadsPerBlock), + maxThreadsPerMultiprocessor (props.maxThreadsPerMultiProcessor), + regsPerBlock (props.regsPerBlock), + regsPerMultiprocessor (props.regsPerMultiprocessor), + warpSize (props.warpSize), + sharedMemPerBlock (props.sharedMemPerBlock), + sharedMemPerMultiprocessor (props.sharedMemPerMultiprocessor), + numSms (props.multiProcessorCount), + sharedMemPerBlockOptin (props.sharedMemPerBlockOptin), + reservedSharedMemPerBlock (props.reservedSharedMemPerBlock) + {} + + __OCC_INLINE + cudaOccDeviceProp() + : computeMajor (0), + computeMinor (0), + maxThreadsPerBlock (0), + maxThreadsPerMultiprocessor (0), + regsPerBlock (0), + regsPerMultiprocessor (0), + warpSize (0), + sharedMemPerBlock (0), + sharedMemPerMultiprocessor (0), + numSms (0), + sharedMemPerBlockOptin (0), + reservedSharedMemPerBlock (0) + {} +#endif // __cplusplus +}; + +/** + * Partitioned global caching option + */ +typedef enum cudaOccPartitionedGCConfig_enum { + PARTITIONED_GC_OFF, // Disable partitioned global caching + PARTITIONED_GC_ON, // Prefer partitioned global caching + PARTITIONED_GC_ON_STRICT // Force partitioned global caching +} cudaOccPartitionedGCConfig; + +/** + * Per function opt in maximum dynamic shared memory limit + */ +typedef enum cudaOccFuncShmemConfig_enum { + FUNC_SHMEM_LIMIT_DEFAULT, // Default shmem limit + FUNC_SHMEM_LIMIT_OPTIN, // Use the optin shmem limit +} cudaOccFuncShmemConfig; + +/** + * Function descriptor + * + * This structure describes a CUDA function. + */ +struct cudaOccFuncAttributes { + int maxThreadsPerBlock; // Maximum block size the function can work with. If + // unlimited, use INT_MAX or any value greater than + // or equal to maxThreadsPerBlock of the device + int numRegs; // Number of registers used. When the function is + // launched on device, the register count may change + // due to internal tools requirements. + size_t sharedSizeBytes; // Number of static shared memory used + + cudaOccPartitionedGCConfig partitionedGCConfig; + // Partitioned global caching is required to enable + // caching on certain chips, such as sm_52 + // devices. Partitioned global caching can be + // automatically disabled if the occupancy + // requirement of the launch cannot support caching. + // + // To override this behavior with caching on and + // calculate occupancy strictly according to the + // preference, set partitionedGCConfig to + // PARTITIONED_GC_ON_STRICT. This is especially + // useful for experimenting and finding launch + // configurations (MaxPotentialOccupancyBlockSize) + // that allow global caching to take effect. + // + // This flag only affects the occupancy calculation. + + cudaOccFuncShmemConfig shmemLimitConfig; + // Certain chips like sm_70 allow a user to opt into + // a higher per block limit of dynamic shared memory + // This optin is performed on a per function basis + // using the cuFuncSetAttribute function + + size_t maxDynamicSharedSizeBytes; + // User set limit on maximum dynamic shared memory + // usable by the kernel + // This limit is set using the cuFuncSetAttribute + // function. + + int numBlockBarriers; // Number of block barriers used (default to 1) +#ifdef __cplusplus + // This structure can be converted from a cudaFuncAttributes structure for + // users that use this header in their CUDA applications. + // + // If the application have access to the CUDA Runtime API, the application + // can obtain the function attributes of a CUDA kernel function through + // cudaFuncGetAttributes, and initialize a cudaOccFuncAttributes with the + // cudaFuncAttributes structure. + // + // Example: + /* + __global__ void foo() {...} + + ... + + { + cudaFuncAttributes attr; + + cudaFuncGetAttributes(&attr, foo); + + cudaOccFuncAttributes occAttr = attr; + + ... + + cudaOccMaxPotentialOccupancyBlockSize(..., &occAttr, ...); + } + */ + // + template + __OCC_INLINE + cudaOccFuncAttributes(const FuncAttributes &attr) + : maxThreadsPerBlock (attr.maxThreadsPerBlock), + numRegs (attr.numRegs), + sharedSizeBytes (attr.sharedSizeBytes), + partitionedGCConfig (PARTITIONED_GC_OFF), + shmemLimitConfig (FUNC_SHMEM_LIMIT_OPTIN), + maxDynamicSharedSizeBytes (attr.maxDynamicSharedSizeBytes), + numBlockBarriers (1) + {} + + __OCC_INLINE + cudaOccFuncAttributes() + : maxThreadsPerBlock (0), + numRegs (0), + sharedSizeBytes (0), + partitionedGCConfig (PARTITIONED_GC_OFF), + shmemLimitConfig (FUNC_SHMEM_LIMIT_DEFAULT), + maxDynamicSharedSizeBytes (0), + numBlockBarriers (0) + {} +#endif +}; + +typedef enum cudaOccCacheConfig_enum { + CACHE_PREFER_NONE = 0x00, // no preference for shared memory or L1 (default) + CACHE_PREFER_SHARED = 0x01, // prefer larger shared memory and smaller L1 cache + CACHE_PREFER_L1 = 0x02, // prefer larger L1 cache and smaller shared memory + CACHE_PREFER_EQUAL = 0x03 // prefer equal sized L1 cache and shared memory +} cudaOccCacheConfig; + +typedef enum cudaOccCarveoutConfig_enum { + SHAREDMEM_CARVEOUT_DEFAULT = -1, // no preference for shared memory or L1 (default) + SHAREDMEM_CARVEOUT_MAX_SHARED = 100, // prefer maximum available shared memory, minimum L1 cache + SHAREDMEM_CARVEOUT_MAX_L1 = 0, // prefer maximum available L1 cache, minimum shared memory + SHAREDMEM_CARVEOUT_HALF = 50 // prefer half of maximum available shared memory, with the rest as L1 cache +} cudaOccCarveoutConfig; + +/** + * Device state descriptor + * + * This structure describes device settings that affect occupancy calculation. + */ +struct cudaOccDeviceState +{ + // Cache / shared memory split preference. Deprecated on Volta + cudaOccCacheConfig cacheConfig; + // Shared memory / L1 split preference. Supported on only Volta + int carveoutConfig; + +#ifdef __cplusplus + __OCC_INLINE + cudaOccDeviceState() + : cacheConfig (CACHE_PREFER_NONE), + carveoutConfig (SHAREDMEM_CARVEOUT_DEFAULT) + {} +#endif +}; + +typedef enum cudaOccLimitingFactor_enum { + // Occupancy limited due to: + OCC_LIMIT_WARPS = 0x01, // - warps available + OCC_LIMIT_REGISTERS = 0x02, // - registers available + OCC_LIMIT_SHARED_MEMORY = 0x04, // - shared memory available + OCC_LIMIT_BLOCKS = 0x08, // - blocks available + OCC_LIMIT_BARRIERS = 0x10 // - barrier available +} cudaOccLimitingFactor; + +/** + * Occupancy output + * + * This structure contains occupancy calculator's output. + */ +struct cudaOccResult { + int activeBlocksPerMultiprocessor; // Occupancy + unsigned int limitingFactors; // Factors that limited occupancy. A bit + // field that counts the limiting + // factors, see cudaOccLimitingFactor + int blockLimitRegs; // Occupancy due to register + // usage, INT_MAX if the kernel does not + // use any register. + int blockLimitSharedMem; // Occupancy due to shared memory + // usage, INT_MAX if the kernel does not + // use shared memory. + int blockLimitWarps; // Occupancy due to block size limit + int blockLimitBlocks; // Occupancy due to maximum number of blocks + // managable per SM + int blockLimitBarriers; // Occupancy due to block barrier usage + int allocatedRegistersPerBlock; // Actual number of registers allocated per + // block + size_t allocatedSharedMemPerBlock; // Actual size of shared memory allocated + // per block + cudaOccPartitionedGCConfig partitionedGCConfig; + // Report if partitioned global caching + // is actually enabled. +}; + +/** + * Partitioned global caching support + * + * See cudaOccPartitionedGlobalCachingModeSupport + */ +typedef enum cudaOccPartitionedGCSupport_enum { + PARTITIONED_GC_NOT_SUPPORTED, // Partitioned global caching is not supported + PARTITIONED_GC_SUPPORTED, // Partitioned global caching is supported +} cudaOccPartitionedGCSupport; + +/** + * Implementation + */ + +/** + * Max compute capability supported + */ +#define __CUDA_OCC_MAJOR__ 9 +#define __CUDA_OCC_MINOR__ 0 + +////////////////////////////////////////// +// Mathematical Helper Functions // +////////////////////////////////////////// + +static __OCC_INLINE int __occMin(int lhs, int rhs) +{ + return rhs < lhs ? rhs : lhs; +} + +static __OCC_INLINE int __occDivideRoundUp(int x, int y) +{ + return (x + (y - 1)) / y; +} + +static __OCC_INLINE int __occRoundUp(int x, int y) +{ + return y * __occDivideRoundUp(x, y); +} + +////////////////////////////////////////// +// Architectural Properties // +////////////////////////////////////////// + +/** + * Granularity of shared memory allocation + */ +static __OCC_INLINE cudaOccError cudaOccSMemAllocationGranularity(int *limit, const cudaOccDeviceProp *properties) +{ + int value; + + switch(properties->computeMajor) { + case 3: + case 5: + case 6: + case 7: + value = 256; + break; + case 8: + case 9: + value = 128; + break; + default: + return CUDA_OCC_ERROR_UNKNOWN_DEVICE; + } + + *limit = value; + + return CUDA_OCC_SUCCESS; +} + +/** + * Maximum number of registers per thread + */ +static __OCC_INLINE cudaOccError cudaOccRegAllocationMaxPerThread(int *limit, const cudaOccDeviceProp *properties) +{ + int value; + + switch(properties->computeMajor) { + case 3: + case 5: + case 6: + value = 255; + break; + case 7: + case 8: + case 9: + value = 256; + break; + default: + return CUDA_OCC_ERROR_UNKNOWN_DEVICE; + } + + *limit = value; + + return CUDA_OCC_SUCCESS; +} + +/** + * Granularity of register allocation + */ +static __OCC_INLINE cudaOccError cudaOccRegAllocationGranularity(int *limit, const cudaOccDeviceProp *properties) +{ + int value; + + switch(properties->computeMajor) { + case 3: + case 5: + case 6: + case 7: + case 8: + case 9: + value = 256; + break; + default: + return CUDA_OCC_ERROR_UNKNOWN_DEVICE; + } + + *limit = value; + + return CUDA_OCC_SUCCESS; +} + +/** + * Number of sub-partitions + */ +static __OCC_INLINE cudaOccError cudaOccSubPartitionsPerMultiprocessor(int *limit, const cudaOccDeviceProp *properties) +{ + int value; + + switch(properties->computeMajor) { + case 3: + case 5: + case 7: + case 8: + case 9: + value = 4; + break; + case 6: + value = properties->computeMinor ? 4 : 2; + break; + default: + return CUDA_OCC_ERROR_UNKNOWN_DEVICE; + } + + *limit = value; + + return CUDA_OCC_SUCCESS; +} + + +/** + * Maximum number of blocks that can run simultaneously on a multiprocessor + */ +static __OCC_INLINE cudaOccError cudaOccMaxBlocksPerMultiprocessor(int* limit, const cudaOccDeviceProp *properties) +{ + int value; + + switch(properties->computeMajor) { + case 3: + value = 16; + break; + case 5: + case 6: + value = 32; + break; + case 7: { + int isTuring = properties->computeMinor == 5; + value = (isTuring) ? 16 : 32; + break; + } + case 8: + if (properties->computeMinor == 0) { + value = 32; + } + else if (properties->computeMinor == 9) { + value = 24; + } + else { + value = 16; + } + break; + case 9: + value = 32; + break; + default: + return CUDA_OCC_ERROR_UNKNOWN_DEVICE; + } + + *limit = value; + + return CUDA_OCC_SUCCESS; +} + +/** + * Align up shared memory based on compute major configurations + */ +static __OCC_INLINE cudaOccError cudaOccAlignUpShmemSizeVoltaPlus(size_t *shMemSize, const cudaOccDeviceProp *properties) +{ + // Volta and Turing have shared L1 cache / shared memory, and support cache + // configuration to trade one for the other. These values are needed to + // map carveout config ratio to the next available architecture size + size_t size = *shMemSize; + + switch (properties->computeMajor) { + case 7: { + // Turing supports 32KB and 64KB shared mem. + int isTuring = properties->computeMinor == 5; + if (isTuring) { + if (size <= 32 * 1024) { + *shMemSize = 32 * 1024; + } + else if (size <= 64 * 1024) { + *shMemSize = 64 * 1024; + } + else { + return CUDA_OCC_ERROR_INVALID_INPUT; + } + } + // Volta supports 0KB, 8KB, 16KB, 32KB, 64KB, and 96KB shared mem. + else { + if (size == 0) { + *shMemSize = 0; + } + else if (size <= 8 * 1024) { + *shMemSize = 8 * 1024; + } + else if (size <= 16 * 1024) { + *shMemSize = 16 * 1024; + } + else if (size <= 32 * 1024) { + *shMemSize = 32 * 1024; + } + else if (size <= 64 * 1024) { + *shMemSize = 64 * 1024; + } + else if (size <= 96 * 1024) { + *shMemSize = 96 * 1024; + } + else { + return CUDA_OCC_ERROR_INVALID_INPUT; + } + } + break; + } + case 8: + if (properties->computeMinor == 0 || properties->computeMinor == 7) { + if (size == 0) { + *shMemSize = 0; + } + else if (size <= 8 * 1024) { + *shMemSize = 8 * 1024; + } + else if (size <= 16 * 1024) { + *shMemSize = 16 * 1024; + } + else if (size <= 32 * 1024) { + *shMemSize = 32 * 1024; + } + else if (size <= 64 * 1024) { + *shMemSize = 64 * 1024; + } + else if (size <= 100 * 1024) { + *shMemSize = 100 * 1024; + } + else if (size <= 132 * 1024) { + *shMemSize = 132 * 1024; + } + else if (size <= 164 * 1024) { + *shMemSize = 164 * 1024; + } + else { + return CUDA_OCC_ERROR_INVALID_INPUT; + } + } + else { + if (size == 0) { + *shMemSize = 0; + } + else if (size <= 8 * 1024) { + *shMemSize = 8 * 1024; + } + else if (size <= 16 * 1024) { + *shMemSize = 16 * 1024; + } + else if (size <= 32 * 1024) { + *shMemSize = 32 * 1024; + } + else if (size <= 64 * 1024) { + *shMemSize = 64 * 1024; + } + else if (size <= 100 * 1024) { + *shMemSize = 100 * 1024; + } + else { + return CUDA_OCC_ERROR_INVALID_INPUT; + } + } + break; + case 9: { + if (size == 0) { + *shMemSize = 0; + } + else if (size <= 8 * 1024) { + *shMemSize = 8 * 1024; + } + else if (size <= 16 * 1024) { + *shMemSize = 16 * 1024; + } + else if (size <= 32 * 1024) { + *shMemSize = 32 * 1024; + } + else if (size <= 64 * 1024) { + *shMemSize = 64 * 1024; + } + else if (size <= 100 * 1024) { + *shMemSize = 100 * 1024; + } + else if (size <= 132 * 1024) { + *shMemSize = 132 * 1024; + } + else if (size <= 164 * 1024) { + *shMemSize = 164 * 1024; + } + else if (size <= 196 * 1024) { + *shMemSize = 196 * 1024; + } + else if (size <= 228 * 1024) { + *shMemSize = 228 * 1024; + } + else { + return CUDA_OCC_ERROR_INVALID_INPUT; + } + break; + } + default: + return CUDA_OCC_ERROR_UNKNOWN_DEVICE; + } + + return CUDA_OCC_SUCCESS; +} + +/** + * Shared memory based on the new carveoutConfig API introduced with Volta + */ +static __OCC_INLINE cudaOccError cudaOccSMemPreferenceVoltaPlus(size_t *limit, const cudaOccDeviceProp *properties, const cudaOccDeviceState *state) +{ + cudaOccError status = CUDA_OCC_SUCCESS; + size_t preferenceShmemSize; + + // CUDA 9.0 introduces a new API to set shared memory - L1 configuration on supported + // devices. This preference will take precedence over the older cacheConfig setting. + // Map cacheConfig to its effective preference value. + int effectivePreference = state->carveoutConfig; + if ((effectivePreference < SHAREDMEM_CARVEOUT_DEFAULT) || (effectivePreference > SHAREDMEM_CARVEOUT_MAX_SHARED)) { + return CUDA_OCC_ERROR_INVALID_INPUT; + } + + if (effectivePreference == SHAREDMEM_CARVEOUT_DEFAULT) { + switch (state->cacheConfig) + { + case CACHE_PREFER_L1: + effectivePreference = SHAREDMEM_CARVEOUT_MAX_L1; + break; + case CACHE_PREFER_SHARED: + effectivePreference = SHAREDMEM_CARVEOUT_MAX_SHARED; + break; + case CACHE_PREFER_EQUAL: + effectivePreference = SHAREDMEM_CARVEOUT_HALF; + break; + default: + effectivePreference = SHAREDMEM_CARVEOUT_DEFAULT; + break; + } + } + + if (effectivePreference == SHAREDMEM_CARVEOUT_DEFAULT) { + preferenceShmemSize = properties->sharedMemPerMultiprocessor; + } + else { + preferenceShmemSize = (size_t) (effectivePreference * properties->sharedMemPerMultiprocessor) / 100; + } + + status = cudaOccAlignUpShmemSizeVoltaPlus(&preferenceShmemSize, properties); + *limit = preferenceShmemSize; + return status; +} + +/** + * Shared memory based on the cacheConfig + */ +static __OCC_INLINE cudaOccError cudaOccSMemPreference(size_t *limit, const cudaOccDeviceProp *properties, const cudaOccDeviceState *state) +{ + size_t bytes = 0; + size_t sharedMemPerMultiprocessorHigh = properties->sharedMemPerMultiprocessor; + cudaOccCacheConfig cacheConfig = state->cacheConfig; + + // Kepler has shared L1 cache / shared memory, and support cache + // configuration to trade one for the other. These values are needed to + // calculate the correct shared memory size for user requested cache + // configuration. + // + size_t minCacheSize = 16384; + size_t maxCacheSize = 49152; + size_t cacheAndSharedTotal = sharedMemPerMultiprocessorHigh + minCacheSize; + size_t sharedMemPerMultiprocessorLow = cacheAndSharedTotal - maxCacheSize; + + switch (properties->computeMajor) { + case 3: + // Kepler supports 16KB, 32KB, or 48KB partitions for L1. The rest + // is shared memory. + // + switch (cacheConfig) { + default : + case CACHE_PREFER_NONE: + case CACHE_PREFER_SHARED: + bytes = sharedMemPerMultiprocessorHigh; + break; + case CACHE_PREFER_L1: + bytes = sharedMemPerMultiprocessorLow; + break; + case CACHE_PREFER_EQUAL: + // Equal is the mid-point between high and low. It should be + // equivalent to low + 16KB. + // + bytes = (sharedMemPerMultiprocessorHigh + sharedMemPerMultiprocessorLow) / 2; + break; + } + break; + case 5: + case 6: + // Maxwell and Pascal have dedicated shared memory. + // + bytes = sharedMemPerMultiprocessorHigh; + break; + default: + return CUDA_OCC_ERROR_UNKNOWN_DEVICE; + } + + *limit = bytes; + + return CUDA_OCC_SUCCESS; +} + +/** + * Shared memory based on config requested by User + */ +static __OCC_INLINE cudaOccError cudaOccSMemPerMultiprocessor(size_t *limit, const cudaOccDeviceProp *properties, const cudaOccDeviceState *state) +{ + // Volta introduces a new API that allows for shared memory carveout preference. Because it is a shared memory preference, + // it is handled separately from the cache config preference. + if (properties->computeMajor >= 7) { + return cudaOccSMemPreferenceVoltaPlus(limit, properties, state); + } + return cudaOccSMemPreference(limit, properties, state); +} + +/** + * Return the per block shared memory limit based on function config + */ +static __OCC_INLINE cudaOccError cudaOccSMemPerBlock(size_t *limit, const cudaOccDeviceProp *properties, cudaOccFuncShmemConfig shmemLimitConfig, size_t smemPerCta) +{ + switch (properties->computeMajor) { + case 2: + case 3: + case 4: + case 5: + case 6: + *limit = properties->sharedMemPerBlock; + break; + case 7: + case 8: + case 9: + switch (shmemLimitConfig) { + default: + case FUNC_SHMEM_LIMIT_DEFAULT: + *limit = properties->sharedMemPerBlock; + break; + case FUNC_SHMEM_LIMIT_OPTIN: + if (smemPerCta > properties->sharedMemPerBlock) { + *limit = properties->sharedMemPerBlockOptin; + } + else { + *limit = properties->sharedMemPerBlock; + } + break; + } + break; + default: + return CUDA_OCC_ERROR_UNKNOWN_DEVICE; + } + + // Starting Ampere, CUDA driver reserves additional shared memory per block + if (properties->computeMajor >= 8) { + *limit += properties->reservedSharedMemPerBlock; + } + + return CUDA_OCC_SUCCESS; +} + +/** + * Partitioned global caching mode support + */ +static __OCC_INLINE cudaOccError cudaOccPartitionedGlobalCachingModeSupport(cudaOccPartitionedGCSupport *limit, const cudaOccDeviceProp *properties) +{ + *limit = PARTITIONED_GC_NOT_SUPPORTED; + + if ((properties->computeMajor == 5 && (properties->computeMinor == 2 || properties->computeMinor == 3)) || + properties->computeMajor == 6) { + *limit = PARTITIONED_GC_SUPPORTED; + } + + if (properties->computeMajor == 6 && properties->computeMinor == 0) { + *limit = PARTITIONED_GC_NOT_SUPPORTED; + } + + return CUDA_OCC_SUCCESS; +} + +/////////////////////////////////////////////// +// User Input Sanity // +/////////////////////////////////////////////// + +static __OCC_INLINE cudaOccError cudaOccDevicePropCheck(const cudaOccDeviceProp *properties) +{ + // Verify device properties + // + // Each of these limits must be a positive number. + // + // Compute capacity is checked during the occupancy calculation + // + if (properties->maxThreadsPerBlock <= 0 || + properties->maxThreadsPerMultiprocessor <= 0 || + properties->regsPerBlock <= 0 || + properties->regsPerMultiprocessor <= 0 || + properties->warpSize <= 0 || + properties->sharedMemPerBlock <= 0 || + properties->sharedMemPerMultiprocessor <= 0 || + properties->numSms <= 0) { + return CUDA_OCC_ERROR_INVALID_INPUT; + } + + return CUDA_OCC_SUCCESS; +} + +static __OCC_INLINE cudaOccError cudaOccFuncAttributesCheck(const cudaOccFuncAttributes *attributes) +{ + // Verify function attributes + // + if (attributes->maxThreadsPerBlock <= 0 || + attributes->numRegs < 0) { // Compiler may choose not to use + // any register (empty kernels, + // etc.) + return CUDA_OCC_ERROR_INVALID_INPUT; + } + + return CUDA_OCC_SUCCESS; +} + +static __OCC_INLINE cudaOccError cudaOccDeviceStateCheck(const cudaOccDeviceState *state) +{ + (void)state; // silence unused-variable warning + // Placeholder + // + + return CUDA_OCC_SUCCESS; +} + +static __OCC_INLINE cudaOccError cudaOccInputCheck( + const cudaOccDeviceProp *properties, + const cudaOccFuncAttributes *attributes, + const cudaOccDeviceState *state) +{ + cudaOccError status = CUDA_OCC_SUCCESS; + + status = cudaOccDevicePropCheck(properties); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + status = cudaOccFuncAttributesCheck(attributes); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + status = cudaOccDeviceStateCheck(state); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + return status; +} + +/////////////////////////////////////////////// +// Occupancy calculation Functions // +/////////////////////////////////////////////// + +static __OCC_INLINE cudaOccPartitionedGCConfig cudaOccPartitionedGCExpected( + const cudaOccDeviceProp *properties, + const cudaOccFuncAttributes *attributes) +{ + cudaOccPartitionedGCSupport gcSupport; + cudaOccPartitionedGCConfig gcConfig; + + cudaOccPartitionedGlobalCachingModeSupport(&gcSupport, properties); + + gcConfig = attributes->partitionedGCConfig; + + if (gcSupport == PARTITIONED_GC_NOT_SUPPORTED) { + gcConfig = PARTITIONED_GC_OFF; + } + + return gcConfig; +} + +// Warp limit +// +static __OCC_INLINE cudaOccError cudaOccMaxBlocksPerSMWarpsLimit( + int *limit, + cudaOccPartitionedGCConfig gcConfig, + const cudaOccDeviceProp *properties, + const cudaOccFuncAttributes *attributes, + int blockSize) +{ + cudaOccError status = CUDA_OCC_SUCCESS; + int maxWarpsPerSm; + int warpsAllocatedPerCTA; + int maxBlocks; + (void)attributes; // silence unused-variable warning + + if (blockSize > properties->maxThreadsPerBlock) { + maxBlocks = 0; + } + else { + maxWarpsPerSm = properties->maxThreadsPerMultiprocessor / properties->warpSize; + warpsAllocatedPerCTA = __occDivideRoundUp(blockSize, properties->warpSize); + maxBlocks = 0; + + if (gcConfig != PARTITIONED_GC_OFF) { + int maxBlocksPerSmPartition; + int maxWarpsPerSmPartition; + + // If partitioned global caching is on, then a CTA can only use a SM + // partition (a half SM), and thus a half of the warp slots + // available per SM + // + maxWarpsPerSmPartition = maxWarpsPerSm / 2; + maxBlocksPerSmPartition = maxWarpsPerSmPartition / warpsAllocatedPerCTA; + maxBlocks = maxBlocksPerSmPartition * 2; + } + // On hardware that supports partitioned global caching, each half SM is + // guaranteed to support at least 32 warps (maximum number of warps of a + // CTA), so caching will not cause 0 occupancy due to insufficient warp + // allocation slots. + // + else { + maxBlocks = maxWarpsPerSm / warpsAllocatedPerCTA; + } + } + + *limit = maxBlocks; + + return status; +} + +// Shared memory limit +// +static __OCC_INLINE cudaOccError cudaOccMaxBlocksPerSMSmemLimit( + int *limit, + cudaOccResult *result, + const cudaOccDeviceProp *properties, + const cudaOccFuncAttributes *attributes, + const cudaOccDeviceState *state, + int blockSize, + size_t dynamicSmemSize) +{ + cudaOccError status = CUDA_OCC_SUCCESS; + int allocationGranularity; + size_t userSmemPreference = 0; + size_t totalSmemUsagePerCTA; + size_t maxSmemUsagePerCTA; + size_t smemAllocatedPerCTA; + size_t staticSmemSize; + size_t sharedMemPerMultiprocessor; + size_t smemLimitPerCTA; + int maxBlocks; + int dynamicSmemSizeExceeded = 0; + int totalSmemSizeExceeded = 0; + (void)blockSize; // silence unused-variable warning + + status = cudaOccSMemAllocationGranularity(&allocationGranularity, properties); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + // Obtain the user preferred shared memory size. This setting is ignored if + // user requests more shared memory than preferred. + // + status = cudaOccSMemPerMultiprocessor(&userSmemPreference, properties, state); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + staticSmemSize = attributes->sharedSizeBytes + properties->reservedSharedMemPerBlock; + totalSmemUsagePerCTA = staticSmemSize + dynamicSmemSize; + smemAllocatedPerCTA = __occRoundUp((int)totalSmemUsagePerCTA, (int)allocationGranularity); + + maxSmemUsagePerCTA = staticSmemSize + attributes->maxDynamicSharedSizeBytes; + + dynamicSmemSizeExceeded = 0; + totalSmemSizeExceeded = 0; + + // Obtain the user set maximum dynamic size if it exists + // If so, the current launch dynamic shared memory must not + // exceed the set limit + if (attributes->shmemLimitConfig != FUNC_SHMEM_LIMIT_DEFAULT && + dynamicSmemSize > attributes->maxDynamicSharedSizeBytes) { + dynamicSmemSizeExceeded = 1; + } + + status = cudaOccSMemPerBlock(&smemLimitPerCTA, properties, attributes->shmemLimitConfig, maxSmemUsagePerCTA); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + if (smemAllocatedPerCTA > smemLimitPerCTA) { + totalSmemSizeExceeded = 1; + } + + if (dynamicSmemSizeExceeded || totalSmemSizeExceeded) { + maxBlocks = 0; + } + else { + // User requested shared memory limit is used as long as it is greater + // than the total shared memory used per CTA, i.e. as long as at least + // one CTA can be launched. + if (userSmemPreference >= smemAllocatedPerCTA) { + sharedMemPerMultiprocessor = userSmemPreference; + } + else { + // On Volta+, user requested shared memory will limit occupancy + // if it's less than shared memory per CTA. Otherwise, the + // maximum shared memory limit is used. + if (properties->computeMajor >= 7) { + sharedMemPerMultiprocessor = smemAllocatedPerCTA; + status = cudaOccAlignUpShmemSizeVoltaPlus(&sharedMemPerMultiprocessor, properties); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + } + else { + sharedMemPerMultiprocessor = properties->sharedMemPerMultiprocessor; + } + } + + if (smemAllocatedPerCTA > 0) { + maxBlocks = (int)(sharedMemPerMultiprocessor / smemAllocatedPerCTA); + } + else { + maxBlocks = INT_MAX; + } + } + + result->allocatedSharedMemPerBlock = smemAllocatedPerCTA; + + *limit = maxBlocks; + + return status; +} + +static __OCC_INLINE +cudaOccError cudaOccMaxBlocksPerSMRegsLimit( + int *limit, + cudaOccPartitionedGCConfig *gcConfig, + cudaOccResult *result, + const cudaOccDeviceProp *properties, + const cudaOccFuncAttributes *attributes, + int blockSize) +{ + cudaOccError status = CUDA_OCC_SUCCESS; + int allocationGranularity; + int warpsAllocatedPerCTA; + int regsAllocatedPerCTA; + int regsAssumedPerCTA; + int regsPerWarp; + int regsAllocatedPerWarp; + int numSubPartitions; + int numRegsPerSubPartition; + int numWarpsPerSubPartition; + int numWarpsPerSM; + int maxBlocks; + int maxRegsPerThread; + + status = cudaOccRegAllocationGranularity( + &allocationGranularity, + properties); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + status = cudaOccRegAllocationMaxPerThread( + &maxRegsPerThread, + properties); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + status = cudaOccSubPartitionsPerMultiprocessor(&numSubPartitions, properties); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + warpsAllocatedPerCTA = __occDivideRoundUp(blockSize, properties->warpSize); + + // GPUs of compute capability 2.x and higher allocate registers to warps + // + // Number of regs per warp is regs per thread x warp size, rounded up to + // register allocation granularity + // + regsPerWarp = attributes->numRegs * properties->warpSize; + regsAllocatedPerWarp = __occRoundUp(regsPerWarp, allocationGranularity); + regsAllocatedPerCTA = regsAllocatedPerWarp * warpsAllocatedPerCTA; + + // Hardware verifies if a launch fits the per-CTA register limit. For + // historical reasons, the verification logic assumes register + // allocations are made to all partitions simultaneously. Therefore, to + // simulate the hardware check, the warp allocation needs to be rounded + // up to the number of partitions. + // + regsAssumedPerCTA = regsAllocatedPerWarp * __occRoundUp(warpsAllocatedPerCTA, numSubPartitions); + + if (properties->regsPerBlock < regsAssumedPerCTA || // Hardware check + properties->regsPerBlock < regsAllocatedPerCTA || // Software check + attributes->numRegs > maxRegsPerThread) { // Per thread limit check + maxBlocks = 0; + } + else { + if (regsAllocatedPerWarp > 0) { + // Registers are allocated in each sub-partition. The max number + // of warps that can fit on an SM is equal to the max number of + // warps per sub-partition x number of sub-partitions. + // + numRegsPerSubPartition = properties->regsPerMultiprocessor / numSubPartitions; + numWarpsPerSubPartition = numRegsPerSubPartition / regsAllocatedPerWarp; + + maxBlocks = 0; + + if (*gcConfig != PARTITIONED_GC_OFF) { + int numSubPartitionsPerSmPartition; + int numWarpsPerSmPartition; + int maxBlocksPerSmPartition; + + // If partitioned global caching is on, then a CTA can only + // use a half SM, and thus a half of the registers available + // per SM + // + numSubPartitionsPerSmPartition = numSubPartitions / 2; + numWarpsPerSmPartition = numWarpsPerSubPartition * numSubPartitionsPerSmPartition; + maxBlocksPerSmPartition = numWarpsPerSmPartition / warpsAllocatedPerCTA; + maxBlocks = maxBlocksPerSmPartition * 2; + } + + // Try again if partitioned global caching is not enabled, or if + // the CTA cannot fit on the SM with caching on (maxBlocks == 0). In the latter + // case, the device will automatically turn off caching, except + // if the user forces enablement via PARTITIONED_GC_ON_STRICT to calculate + // occupancy and launch configuration. + // + if (maxBlocks == 0 && *gcConfig != PARTITIONED_GC_ON_STRICT) { + // In case *gcConfig was PARTITIONED_GC_ON flip it OFF since + // this is what it will be if we spread CTA across partitions. + // + *gcConfig = PARTITIONED_GC_OFF; + numWarpsPerSM = numWarpsPerSubPartition * numSubPartitions; + maxBlocks = numWarpsPerSM / warpsAllocatedPerCTA; + } + } + else { + maxBlocks = INT_MAX; + } + } + + + result->allocatedRegistersPerBlock = regsAllocatedPerCTA; + + *limit = maxBlocks; + + return status; +} + +// Barrier limit +// +static __OCC_INLINE cudaOccError cudaOccMaxBlocksPerSMBlockBarrierLimit( + int *limit, + int ctaLimitBlocks, + const cudaOccFuncAttributes *attributes) +{ + cudaOccError status = CUDA_OCC_SUCCESS; + int numBarriersAvailable = ctaLimitBlocks * 2; + int numBarriersUsed = attributes->numBlockBarriers; + int maxBlocks = INT_MAX; + + if (numBarriersUsed) { + maxBlocks = numBarriersAvailable / numBarriersUsed; + } + + *limit = maxBlocks; + + return status; +} + +/////////////////////////////////// +// API Implementations // +/////////////////////////////////// + +static __OCC_INLINE +cudaOccError cudaOccMaxActiveBlocksPerMultiprocessor( + cudaOccResult *result, + const cudaOccDeviceProp *properties, + const cudaOccFuncAttributes *attributes, + const cudaOccDeviceState *state, + int blockSize, + size_t dynamicSmemSize) +{ + cudaOccError status = CUDA_OCC_SUCCESS; + int ctaLimitWarps = 0; + int ctaLimitBlocks = 0; + int ctaLimitSMem = 0; + int ctaLimitRegs = 0; + int ctaLimitBars = 0; + int ctaLimit = 0; + unsigned int limitingFactors = 0; + + cudaOccPartitionedGCConfig gcConfig = PARTITIONED_GC_OFF; + + if (!result || !properties || !attributes || !state || blockSize <= 0) { + return CUDA_OCC_ERROR_INVALID_INPUT; + } + + /////////////////////////// + // Check user input + /////////////////////////// + + status = cudaOccInputCheck(properties, attributes, state); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + /////////////////////////// + // Initialization + /////////////////////////// + + gcConfig = cudaOccPartitionedGCExpected(properties, attributes); + + /////////////////////////// + // Compute occupancy + /////////////////////////// + + // Limits due to registers/SM + // Also compute if partitioned global caching has to be turned off + // + status = cudaOccMaxBlocksPerSMRegsLimit(&ctaLimitRegs, &gcConfig, result, properties, attributes, blockSize); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + // SMs on GP100 (6.0) have 2 subpartitions, while those on GP10x have 4. + // As a result, an SM on GP100 may be able to run more CTAs than the one on GP10x. + // For forward compatibility within Pascal family, if a function cannot run on GP10x (maxBlock == 0), + // we do not let it run on any Pascal processor, even though it may be able to run on GP100. + // Therefore, we check the occupancy on GP10x when it can run on GP100 + // + if (properties->computeMajor == 6 && properties->computeMinor == 0 && ctaLimitRegs) { + cudaOccDeviceProp propertiesGP10x; + cudaOccPartitionedGCConfig gcConfigGP10x = gcConfig; + int ctaLimitRegsGP10x = 0; + + // Set up properties for GP10x + memcpy(&propertiesGP10x, properties, sizeof(propertiesGP10x)); + propertiesGP10x.computeMinor = 1; + + status = cudaOccMaxBlocksPerSMRegsLimit(&ctaLimitRegsGP10x, &gcConfigGP10x, result, &propertiesGP10x, attributes, blockSize); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + if (ctaLimitRegsGP10x == 0) { + ctaLimitRegs = 0; + } + } + + // Limits due to warps/SM + // + status = cudaOccMaxBlocksPerSMWarpsLimit(&ctaLimitWarps, gcConfig, properties, attributes, blockSize); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + // Limits due to blocks/SM + // + status = cudaOccMaxBlocksPerMultiprocessor(&ctaLimitBlocks, properties); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + // Limits due to shared memory/SM + // + status = cudaOccMaxBlocksPerSMSmemLimit(&ctaLimitSMem, result, properties, attributes, state, blockSize, dynamicSmemSize); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + /////////////////////////// + // Overall occupancy + /////////////////////////// + + // Overall limit is min() of limits due to above reasons + // + ctaLimit = __occMin(ctaLimitRegs, __occMin(ctaLimitSMem, __occMin(ctaLimitWarps, ctaLimitBlocks))); + + // Determine occupancy limiting factors + // + if (ctaLimit == ctaLimitWarps) { + limitingFactors |= OCC_LIMIT_WARPS; + } + if (ctaLimit == ctaLimitRegs) { + limitingFactors |= OCC_LIMIT_REGISTERS; + } + if (ctaLimit == ctaLimitSMem) { + limitingFactors |= OCC_LIMIT_SHARED_MEMORY; + } + if (ctaLimit == ctaLimitBlocks) { + limitingFactors |= OCC_LIMIT_BLOCKS; + } + + // For Hopper onwards compute the limits to occupancy based on block barrier count + // + if (properties->computeMajor >= 9 && attributes->numBlockBarriers > 0) { + // Limits due to barrier/SM + // + status = cudaOccMaxBlocksPerSMBlockBarrierLimit(&ctaLimitBars, ctaLimitBlocks, attributes); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + // Recompute overall limit based on barrier/SM + // + ctaLimit = __occMin(ctaLimitBars, ctaLimit); + + // Determine if this is occupancy limiting factor + // + if (ctaLimit == ctaLimitBars) { + limitingFactors |= OCC_LIMIT_BARRIERS; + } + } + else { + ctaLimitBars = INT_MAX; + } + + // Fill in the return values + // + result->limitingFactors = limitingFactors; + + result->blockLimitRegs = ctaLimitRegs; + result->blockLimitSharedMem = ctaLimitSMem; + result->blockLimitWarps = ctaLimitWarps; + result->blockLimitBlocks = ctaLimitBlocks; + result->blockLimitBarriers = ctaLimitBars; + result->partitionedGCConfig = gcConfig; + + // Final occupancy + result->activeBlocksPerMultiprocessor = ctaLimit; + + return CUDA_OCC_SUCCESS; +} + +static __OCC_INLINE +cudaOccError cudaOccAvailableDynamicSMemPerBlock( + size_t *bytesAvailable, + const cudaOccDeviceProp *properties, + const cudaOccFuncAttributes *attributes, + const cudaOccDeviceState *state, + int numBlocks, + int blockSize) +{ + int allocationGranularity; + size_t smemLimitPerBlock; + size_t smemAvailableForDynamic; + size_t userSmemPreference = 0; + size_t sharedMemPerMultiprocessor; + cudaOccResult result; + cudaOccError status = CUDA_OCC_SUCCESS; + + if (numBlocks <= 0) + return CUDA_OCC_ERROR_INVALID_INPUT; + + // First compute occupancy of potential kernel launch. + // + status = cudaOccMaxActiveBlocksPerMultiprocessor(&result, properties, attributes, state, blockSize, 0); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + // Check if occupancy is achievable given user requested number of blocks. + // + if (result.activeBlocksPerMultiprocessor < numBlocks) { + return CUDA_OCC_ERROR_INVALID_INPUT; + } + + status = cudaOccSMemAllocationGranularity(&allocationGranularity, properties); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + // Return the per block shared memory limit based on function config. + // + status = cudaOccSMemPerBlock(&smemLimitPerBlock, properties, attributes->shmemLimitConfig, properties->sharedMemPerMultiprocessor); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + // If there is only a single block needed per SM, then the user preference can be ignored and the fully SW + // limit is allowed to be used as shared memory otherwise if more than one block is needed, then the user + // preference sets the total limit of available shared memory. + // + cudaOccSMemPerMultiprocessor(&userSmemPreference, properties, state); + if (numBlocks == 1) { + sharedMemPerMultiprocessor = smemLimitPerBlock; + } + else { + if (!userSmemPreference) { + userSmemPreference = 1 ; + status = cudaOccAlignUpShmemSizeVoltaPlus(&userSmemPreference, properties); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + } + sharedMemPerMultiprocessor = userSmemPreference; + } + + // Compute total shared memory available per SM + // + smemAvailableForDynamic = sharedMemPerMultiprocessor / numBlocks; + smemAvailableForDynamic = (smemAvailableForDynamic / allocationGranularity) * allocationGranularity; + + // Cap shared memory + // + if (smemAvailableForDynamic > smemLimitPerBlock) { + smemAvailableForDynamic = smemLimitPerBlock; + } + + // Now compute dynamic shared memory size + smemAvailableForDynamic = smemAvailableForDynamic - attributes->sharedSizeBytes; + + // Cap computed dynamic SM by user requested limit specified via cuFuncSetAttribute() + // + if (smemAvailableForDynamic > attributes->maxDynamicSharedSizeBytes) + smemAvailableForDynamic = attributes->maxDynamicSharedSizeBytes; + + *bytesAvailable = smemAvailableForDynamic; + return CUDA_OCC_SUCCESS; +} + +static __OCC_INLINE +cudaOccError cudaOccMaxPotentialOccupancyBlockSize( + int *minGridSize, + int *blockSize, + const cudaOccDeviceProp *properties, + const cudaOccFuncAttributes *attributes, + const cudaOccDeviceState *state, + size_t (*blockSizeToDynamicSMemSize)(int), + size_t dynamicSMemSize) +{ + cudaOccError status = CUDA_OCC_SUCCESS; + cudaOccResult result; + + // Limits + int occupancyLimit; + int granularity; + int blockSizeLimit; + + // Recorded maximum + int maxBlockSize = 0; + int numBlocks = 0; + int maxOccupancy = 0; + + // Temporary + int blockSizeToTryAligned; + int blockSizeToTry; + int blockSizeLimitAligned; + int occupancyInBlocks; + int occupancyInThreads; + + /////////////////////////// + // Check user input + /////////////////////////// + + if (!minGridSize || !blockSize || !properties || !attributes || !state) { + return CUDA_OCC_ERROR_INVALID_INPUT; + } + + status = cudaOccInputCheck(properties, attributes, state); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + ///////////////////////////////////////////////////////////////////////////////// + // Try each block size, and pick the block size with maximum occupancy + ///////////////////////////////////////////////////////////////////////////////// + + occupancyLimit = properties->maxThreadsPerMultiprocessor; + granularity = properties->warpSize; + + blockSizeLimit = __occMin(properties->maxThreadsPerBlock, attributes->maxThreadsPerBlock); + blockSizeLimitAligned = __occRoundUp(blockSizeLimit, granularity); + + for (blockSizeToTryAligned = blockSizeLimitAligned; blockSizeToTryAligned > 0; blockSizeToTryAligned -= granularity) { + blockSizeToTry = __occMin(blockSizeLimit, blockSizeToTryAligned); + + // Ignore dynamicSMemSize if the user provides a mapping + // + if (blockSizeToDynamicSMemSize) { + dynamicSMemSize = (*blockSizeToDynamicSMemSize)(blockSizeToTry); + } + + status = cudaOccMaxActiveBlocksPerMultiprocessor( + &result, + properties, + attributes, + state, + blockSizeToTry, + dynamicSMemSize); + + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + occupancyInBlocks = result.activeBlocksPerMultiprocessor; + occupancyInThreads = blockSizeToTry * occupancyInBlocks; + + if (occupancyInThreads > maxOccupancy) { + maxBlockSize = blockSizeToTry; + numBlocks = occupancyInBlocks; + maxOccupancy = occupancyInThreads; + } + + // Early out if we have reached the maximum + // + if (occupancyLimit == maxOccupancy) { + break; + } + } + + /////////////////////////// + // Return best available + /////////////////////////// + + // Suggested min grid size to achieve a full machine launch + // + *minGridSize = numBlocks * properties->numSms; + *blockSize = maxBlockSize; + + return status; +} + + +#if defined(__cplusplus) + +namespace { + +__OCC_INLINE +cudaOccError cudaOccMaxPotentialOccupancyBlockSize( + int *minGridSize, + int *blockSize, + const cudaOccDeviceProp *properties, + const cudaOccFuncAttributes *attributes, + const cudaOccDeviceState *state, + size_t dynamicSMemSize) +{ + return cudaOccMaxPotentialOccupancyBlockSize( + minGridSize, + blockSize, + properties, + attributes, + state, + NULL, + dynamicSMemSize); +} + +template +__OCC_INLINE +cudaOccError cudaOccMaxPotentialOccupancyBlockSizeVariableSMem( + int *minGridSize, + int *blockSize, + const cudaOccDeviceProp *properties, + const cudaOccFuncAttributes *attributes, + const cudaOccDeviceState *state, + UnaryFunction blockSizeToDynamicSMemSize) +{ + cudaOccError status = CUDA_OCC_SUCCESS; + cudaOccResult result; + + // Limits + int occupancyLimit; + int granularity; + int blockSizeLimit; + + // Recorded maximum + int maxBlockSize = 0; + int numBlocks = 0; + int maxOccupancy = 0; + + // Temporary + int blockSizeToTryAligned; + int blockSizeToTry; + int blockSizeLimitAligned; + int occupancyInBlocks; + int occupancyInThreads; + size_t dynamicSMemSize; + + /////////////////////////// + // Check user input + /////////////////////////// + + if (!minGridSize || !blockSize || !properties || !attributes || !state) { + return CUDA_OCC_ERROR_INVALID_INPUT; + } + + status = cudaOccInputCheck(properties, attributes, state); + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + ///////////////////////////////////////////////////////////////////////////////// + // Try each block size, and pick the block size with maximum occupancy + ///////////////////////////////////////////////////////////////////////////////// + + occupancyLimit = properties->maxThreadsPerMultiprocessor; + granularity = properties->warpSize; + blockSizeLimit = __occMin(properties->maxThreadsPerBlock, attributes->maxThreadsPerBlock); + blockSizeLimitAligned = __occRoundUp(blockSizeLimit, granularity); + + for (blockSizeToTryAligned = blockSizeLimitAligned; blockSizeToTryAligned > 0; blockSizeToTryAligned -= granularity) { + blockSizeToTry = __occMin(blockSizeLimit, blockSizeToTryAligned); + + dynamicSMemSize = blockSizeToDynamicSMemSize(blockSizeToTry); + + status = cudaOccMaxActiveBlocksPerMultiprocessor( + &result, + properties, + attributes, + state, + blockSizeToTry, + dynamicSMemSize); + + if (status != CUDA_OCC_SUCCESS) { + return status; + } + + occupancyInBlocks = result.activeBlocksPerMultiprocessor; + + occupancyInThreads = blockSizeToTry * occupancyInBlocks; + + if (occupancyInThreads > maxOccupancy) { + maxBlockSize = blockSizeToTry; + numBlocks = occupancyInBlocks; + maxOccupancy = occupancyInThreads; + } + + // Early out if we have reached the maximum + // + if (occupancyLimit == maxOccupancy) { + break; + } + } + + /////////////////////////// + // Return best available + /////////////////////////// + + // Suggested min grid size to achieve a full machine launch + // + *minGridSize = numBlocks * properties->numSms; + *blockSize = maxBlockSize; + + return status; +} + +} // namespace anonymous + +#endif /*__cplusplus */ + +#undef __OCC_INLINE + +#endif /*__cuda_occupancy_h__*/ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline.h new file mode 100644 index 0000000000000000000000000000000000000000..46bc89e4499576f1ae58848cd8684ba3e32420cf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline.h @@ -0,0 +1,224 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CUDA_PIPELINE_H_ +# define _CUDA_PIPELINE_H_ + +# include "cuda_pipeline_primitives.h" + +# if !defined(_CUDA_PIPELINE_CPLUSPLUS_11_OR_LATER) +# error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \ + -std=c++11 compiler option. +# endif + +# if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER) +# include "cuda_awbarrier.h" +# endif + +// Integration with libcu++'s cuda::barrier. + +# if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER) +# if defined(_LIBCUDACXX_CUDA_ABI_VERSION) +# define _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION _LIBCUDACXX_CUDA_ABI_VERSION +# else +# define _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION 4 +# endif + +# define _LIBCUDACXX_PIPELINE_CONCAT(X, Y) X ## Y +# define _LIBCUDACXX_PIPELINE_CONCAT2(X, Y) _LIBCUDACXX_PIPELINE_CONCAT(X, Y) +# define _LIBCUDACXX_PIPELINE_INLINE_NAMESPACE _LIBCUDACXX_PIPELINE_CONCAT2(__, _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION) + +namespace cuda { inline namespace _LIBCUDACXX_PIPELINE_INLINE_NAMESPACE { + struct __block_scope_barrier_base; +}} + +# endif + +_CUDA_PIPELINE_BEGIN_NAMESPACE + +template +_CUDA_PIPELINE_QUALIFIER +auto segment(T* ptr) -> T(*)[N]; + +class pipeline { +public: + pipeline(const pipeline&) = delete; + pipeline(pipeline&&) = delete; + pipeline& operator=(const pipeline&) = delete; + pipeline& operator=(pipeline&&) = delete; + + _CUDA_PIPELINE_QUALIFIER pipeline(); + _CUDA_PIPELINE_QUALIFIER size_t commit(); + _CUDA_PIPELINE_QUALIFIER void commit_and_wait(); + _CUDA_PIPELINE_QUALIFIER void wait(size_t batch); + template + _CUDA_PIPELINE_QUALIFIER void wait_prior(); + +# if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER) + _CUDA_PIPELINE_QUALIFIER void arrive_on(awbarrier& barrier); + _CUDA_PIPELINE_QUALIFIER void arrive_on(cuda::__block_scope_barrier_base& barrier); +# endif + +private: + size_t current_batch; +}; + +template +_CUDA_PIPELINE_QUALIFIER +void memcpy_async(T& dst, const T& src, pipeline& pipe); + +template +_CUDA_PIPELINE_QUALIFIER +void memcpy_async(T(*dst)[DstN], const T(*src)[SrcN], pipeline& pipe); + +template +_CUDA_PIPELINE_QUALIFIER +auto segment(T* ptr) -> T(*)[N] +{ + return (T(*)[N])ptr; +} + +_CUDA_PIPELINE_QUALIFIER +pipeline::pipeline() + : current_batch(0) +{ +} + +_CUDA_PIPELINE_QUALIFIER +size_t pipeline::commit() +{ + _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_commit(); + return this->current_batch++; +} + +_CUDA_PIPELINE_QUALIFIER +void pipeline::commit_and_wait() +{ + (void)pipeline::commit(); + pipeline::wait_prior<0>(); +} + +_CUDA_PIPELINE_QUALIFIER +void pipeline::wait(size_t batch) +{ + const size_t prior = this->current_batch > batch ? this->current_batch - batch : 0; + + switch (prior) { + case 0 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<0>(); break; + case 1 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<1>(); break; + case 2 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<2>(); break; + case 3 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<3>(); break; + case 4 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<4>(); break; + case 5 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<5>(); break; + case 6 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<6>(); break; + case 7 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<7>(); break; + default : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<8>(); break; + } +} + +template +_CUDA_PIPELINE_QUALIFIER +void pipeline::wait_prior() +{ + _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior(); +} + +# if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER) +_CUDA_PIPELINE_QUALIFIER +void pipeline::arrive_on(awbarrier& barrier) +{ + _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_arrive_on(&barrier.barrier); +} + +_CUDA_PIPELINE_QUALIFIER +void pipeline::arrive_on(cuda::__block_scope_barrier_base & barrier) +{ + _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_arrive_on(reinterpret_cast(&barrier)); +} +# endif + +template +_CUDA_PIPELINE_QUALIFIER +void memcpy_async(T& dst, const T& src, pipeline& pipe) +{ + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(&src) & (alignof(T) - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(&dst) & (alignof(T) - 1))); + + if (__is_trivially_copyable(T)) { + _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_copy_relaxed( + reinterpret_cast(&dst), reinterpret_cast(&src)); + } else { + dst = src; + } +} + +template +_CUDA_PIPELINE_QUALIFIER +void memcpy_async(T(*dst)[DstN], const T(*src)[SrcN], pipeline& pipe) +{ + constexpr size_t dst_size = sizeof(*dst); + constexpr size_t src_size = sizeof(*src); + static_assert(dst_size == 4 || dst_size == 8 || dst_size == 16, "Unsupported copy size."); + static_assert(src_size <= dst_size, "Source size must be less than or equal to destination size."); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(src) & (dst_size - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(dst) & (dst_size - 1))); + + if (__is_trivially_copyable(T)) { + _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_copy_strict( + reinterpret_cast(*dst), reinterpret_cast(*src)); + } else { + for (size_t i = 0; i < DstN; ++i) { + (*dst)[i] = (i < SrcN) ? (*src)[i] : T(); + } + } +} + +_CUDA_PIPELINE_END_NAMESPACE + +#endif /* !_CUDA_PIPELINE_H_ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_helpers.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..01882b6b976347b9bfdf276c3d0adcec1d8a55fa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_helpers.h @@ -0,0 +1,373 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CUDA_PIPELINE_HELPERS_H_ +# define _CUDA_PIPELINE_HELPERS_H_ + +# define _CUDA_PIPELINE_NAMESPACE nvcuda::experimental +# define _CUDA_PIPELINE_BEGIN_NAMESPACE namespace nvcuda { namespace experimental { +# define _CUDA_PIPELINE_END_NAMESPACE } } + +# define _CUDA_PIPELINE_INTERNAL_NAMESPACE _CUDA_PIPELINE_NAMESPACE::__pipeline_internal +# define _CUDA_PIPELINE_BEGIN_INTERNAL_NAMESPACE _CUDA_PIPELINE_BEGIN_NAMESPACE namespace __pipeline_internal { +# define _CUDA_PIPELINE_END_INTERNAL_NAMESPACE } _CUDA_PIPELINE_END_NAMESPACE + +# if !defined(_CUDA_PIPELINE_QUALIFIER) +# define _CUDA_PIPELINE_QUALIFIER inline __device__ +# endif +# if !defined(_CUDA_PIPELINE_STATIC_QUALIFIER) +# define _CUDA_PIPELINE_STATIC_QUALIFIER static inline __device__ +# endif + +# if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) +# define _CUDA_PIPELINE_ARCH_700_OR_LATER +# endif + +# if (__CUDA_ARCH__ >= 800) +# define _CUDA_PIPELINE_HAS_ASYNC_COPY 1 +# else +# define _CUDA_PIPELINE_HAS_ASYNC_COPY 0 +# endif + +# if !defined(_CUDA_PIPELINE_MAX_STAGES) +# define _CUDA_PIPELINE_MAX_STAGES 8 +# endif + +# if defined(__cplusplus) && ((__cplusplus >= 201103L) || (defined(_MSC_VER) && (_MSC_VER >= 1900))) +# define _CUDA_PIPELINE_CPLUSPLUS_11_OR_LATER +# endif + +# if !defined(_CUDA_PIPELINE_DEBUG) +# if defined(__CUDACC_DEBUG__) +# define _CUDA_PIPELINE_DEBUG 1 +# else +# define _CUDA_PIPELINE_DEBUG 0 +# endif +# endif + +# if defined(_CUDA_PIPELINE_DEBUG) && (_CUDA_PIPELINE_DEBUG == 1) && !defined(NDEBUG) +# if !defined(__CUDACC_RTC__) +# include +# endif +# define _CUDA_PIPELINE_ASSERT(x) assert((x)); +# define _CUDA_PIPELINE_ABORT() assert(0); +# else +# define _CUDA_PIPELINE_ASSERT(x) +# define _CUDA_PIPELINE_ABORT() __trap(); +# endif + +# if defined(_CUDA_PIPELINE_CPLUSPLUS_11_OR_LATER) +# define _CUDA_PIPELINE_STATIC_ASSERT(c, m) static_assert(c, m) +# else +# define _CUDA_PIPELINE_STATIC_ASSERT(c, m) +# endif + +# if (defined(_MSC_VER) && !defined(_WIN64)) || defined(__arm__) +# define _CUDA_PIPELINE_ASM_PTR_CONSTRAINT "r" +# else +# define _CUDA_PIPELINE_ASM_PTR_CONSTRAINT "l" +# endif + +# if defined(__CUDACC_RTC__) +typedef unsigned int uint32_t; +typedef unsigned long long uint64_t; +typedef uint64_t uintptr_t; +# else +# include +# endif + +_CUDA_PIPELINE_BEGIN_INTERNAL_NAMESPACE + +_CUDA_PIPELINE_STATIC_ASSERT(sizeof(short) == 2, "Size mismatch for type 'short'"); +_CUDA_PIPELINE_STATIC_ASSERT(sizeof(int) == 4, "Size mismatch for type 'int'"); +_CUDA_PIPELINE_STATIC_ASSERT(sizeof(int2) == 8, "Size mismatch for type 'int2'"); +_CUDA_PIPELINE_STATIC_ASSERT(sizeof(int4) == 16, "Size mismatch for type 'int4'"); + +extern "C" __device__ uint32_t __nvvm_get_smem_pointer(void *); + +template +_CUDA_PIPELINE_QUALIFIER +void pipeline_memcpy_sync(void* __restrict__ dst, const void* __restrict__ src) +{ + _CUDA_PIPELINE_STATIC_ASSERT(CopySize == 4 || CopySize == 8 || CopySize == 16, "Unsupported copy size."); + _CUDA_PIPELINE_STATIC_ASSERT(SourceSize <= CopySize, "Source size must be less than or equal to copy size"); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(dst) & (CopySize - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(src) & (CopySize - 1))); + + char* const d = reinterpret_cast(dst); + const char* const s = reinterpret_cast(src); + + size_t copy_step_size; + if (SourceSize == 0) { + copy_step_size = CopySize; + } else if (SourceSize == 2 || SourceSize == 4 || SourceSize == 8 || SourceSize == 16) { + copy_step_size = SourceSize; + } else { + copy_step_size = 1; + } + + for (size_t i = 0; i < CopySize; i += copy_step_size) { + const bool copy_source = SourceSize && (i < SourceSize); + + switch (copy_step_size) { + case 1: + d[i] = copy_source ? s[i] : char(); + break; + case 2: + *reinterpret_cast(d + i) = copy_source ? *reinterpret_cast(s + i) : short(); + break; + case 4: + *reinterpret_cast(d + i) = copy_source ? *reinterpret_cast(s + i) : int(); + break; + case 8: + *reinterpret_cast(d + i) = copy_source ? *reinterpret_cast(s + i) : int2(); + break; + case 16: + *reinterpret_cast(d + i) = copy_source ? *reinterpret_cast(s + i) : int4(); + break; + } + } +} + +template +struct ImplementationChooser; + +template<> +struct ImplementationChooser { + template + struct CpAsyncChooser { + _CUDA_PIPELINE_STATIC_QUALIFIER + void cp_async(void* __restrict__ dst, const void* __restrict__ src) + { + asm volatile ("cp.async.ca.shared.global [%0], [%1], %2, %3;" + : + : "r"(__nvvm_get_smem_pointer(dst)), _CUDA_PIPELINE_ASM_PTR_CONSTRAINT(src), "n"(CopySize), + "n"(SourceSize) + : "memory"); + } + }; + + template + struct CpAsyncChooser<16, SourceSize> { + _CUDA_PIPELINE_STATIC_QUALIFIER + void cp_async(void* __restrict__ dst, const void* __restrict__ src) + { + asm volatile ("cp.async.cg.shared.global [%0], [%1], %2, %3;" + : + : "r"(__nvvm_get_smem_pointer(dst)), _CUDA_PIPELINE_ASM_PTR_CONSTRAINT(src), "n"(16), "n"(SourceSize) + : "memory"); + } + }; + + template + _CUDA_PIPELINE_STATIC_QUALIFIER + void pipeline_memcpy_async(void* __restrict__ dst, const void* __restrict__ src) + { + _CUDA_PIPELINE_STATIC_ASSERT(CopySize == 4 || CopySize == 8 || CopySize == 16, "Unsupported copy size."); + _CUDA_PIPELINE_STATIC_ASSERT(SourceSize <= CopySize, "Source size must be less than or equal to copy size"); + _CUDA_PIPELINE_ASSERT(__isShared(dst)); + _CUDA_PIPELINE_ASSERT(__isGlobal(src)); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(dst) & (CopySize - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(src) & (CopySize - 1))); + + CpAsyncChooser::cp_async(dst, src); + } + + _CUDA_PIPELINE_STATIC_QUALIFIER + void pipeline_commit() + { + asm volatile ("cp.async.commit_group;"); + } + + template + _CUDA_PIPELINE_STATIC_QUALIFIER + void pipeline_wait_prior() + { + asm volatile ("cp.async.wait_group %0;" + : + : "n"(N < _CUDA_PIPELINE_MAX_STAGES ? N : _CUDA_PIPELINE_MAX_STAGES)); + } + + _CUDA_PIPELINE_STATIC_QUALIFIER + void pipeline_arrive_on(uint64_t* barrier) + { + _CUDA_PIPELINE_ASSERT(__isShared(barrier)); + + asm volatile ("cp.async.mbarrier.arrive.shared.b64 [%0];" + : + : "r"(__nvvm_get_smem_pointer(barrier))); + } +}; + +template<> +struct ImplementationChooser { + template + _CUDA_PIPELINE_STATIC_QUALIFIER + void pipeline_memcpy_async(void* __restrict__ dst, const void* __restrict__ src) + { + _CUDA_PIPELINE_STATIC_ASSERT(CopySize == 4 || CopySize == 8 || CopySize == 16, "Unsupported copy size."); + _CUDA_PIPELINE_STATIC_ASSERT(SourceSize <= CopySize, "Source size must be less than or equal to copy size"); + _CUDA_PIPELINE_ASSERT(__isShared(dst)); + _CUDA_PIPELINE_ASSERT(__isGlobal(src)); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(dst) & (CopySize - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(src) & (CopySize - 1))); + + pipeline_memcpy_sync(dst, src); + } + + _CUDA_PIPELINE_STATIC_QUALIFIER + void pipeline_commit() + { + } + + template + _CUDA_PIPELINE_STATIC_QUALIFIER + void pipeline_wait_prior() + { + } + + _CUDA_PIPELINE_STATIC_QUALIFIER + void pipeline_arrive_on(uint64_t* barrier) + { + } +}; + +template +_CUDA_PIPELINE_QUALIFIER +void pipeline_memcpy_async(void* __restrict__ dst, const void* __restrict__ src) +{ + _CUDA_PIPELINE_STATIC_ASSERT(CopySize == 4 || CopySize == 8 || CopySize == 16, "Unsupported copy size."); + _CUDA_PIPELINE_STATIC_ASSERT(SourceSize <= CopySize, "Source size must be less than or equal to copy size"); + _CUDA_PIPELINE_ASSERT(__isShared(dst)); + _CUDA_PIPELINE_ASSERT(__isGlobal(src)); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(dst) & (CopySize - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(src) & (CopySize - 1))); + + ImplementationChooser<_CUDA_PIPELINE_HAS_ASYNC_COPY>::pipeline_memcpy_async(dst, src); +} + +_CUDA_PIPELINE_QUALIFIER +void pipeline_commit() +{ + ImplementationChooser<_CUDA_PIPELINE_HAS_ASYNC_COPY>::pipeline_commit(); +} + +template +_CUDA_PIPELINE_QUALIFIER +void pipeline_wait_prior() +{ + ImplementationChooser<_CUDA_PIPELINE_HAS_ASYNC_COPY>::pipeline_wait_prior(); +} + +_CUDA_PIPELINE_QUALIFIER +void pipeline_arrive_on(uint64_t* barrier) +{ + ImplementationChooser<_CUDA_PIPELINE_HAS_ASYNC_COPY>::pipeline_arrive_on(barrier); +} + +template +_CUDA_PIPELINE_QUALIFIER +void pipeline_copy_strict(void* __restrict__ dst, const void* __restrict__ src) +{ + _CUDA_PIPELINE_STATIC_ASSERT(CopySize == 4 || CopySize == 8 || CopySize == 16, "Unsupported copy size."); + _CUDA_PIPELINE_STATIC_ASSERT(SourceSize <= CopySize, "Source size must be less than or equal to copy size."); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(src) & (CopySize - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(dst) & (CopySize - 1))); + + if (__isGlobal(src) && __isShared(dst)) { + pipeline_memcpy_async(dst, src); + } else { + pipeline_memcpy_sync(dst, src); + } +} + +template +_CUDA_PIPELINE_QUALIFIER +void pipeline_copy_relaxed(void* __restrict__ dst, const void* __restrict__ src) +{ + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(src) & (Align - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(dst) & (Align - 1))); + + const char* s = reinterpret_cast(src); + char* d = reinterpret_cast(dst); + size_t remaining = CopySize; + + while (remaining) { + if ((Align >= 16) && (remaining >= 16)) { + pipeline_copy_strict<16, 16>(dst, src); + d += 16; + s += 16; + remaining -= 16; + } else if ((Align >= 8) && (remaining >= 8)) { + pipeline_copy_strict<8, 8>(dst, src); + d += 8; + s += 8; + remaining -= 8; + } else if ((Align >= 4) && (remaining >= 4)) { + pipeline_copy_strict<4, 4>(dst, src); + d += 4; + s += 4; + remaining -= 4; + } else if ((Align >= 2) && (remaining >= 2)) { + *reinterpret_cast(d) = *reinterpret_cast(s); + d += 2; + s += 2; + remaining -= 2; + } else { + *d = *s; + d += 1; + s += 1; + remaining -= 1; + } + } +} + +_CUDA_PIPELINE_END_INTERNAL_NAMESPACE + +#endif /* !_CUDA_PIPELINE_HELPERS_H_ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_primitives.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_primitives.h new file mode 100644 index 0000000000000000000000000000000000000000..eaba0cfb5ac9184bec5e837d2ec2f9db11d873ae --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_primitives.h @@ -0,0 +1,148 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CUDA_PIPELINE_PRIMITIVES_H_ +# define _CUDA_PIPELINE_PRIMITIVES_H_ + +# include "cuda_pipeline_helpers.h" + +_CUDA_PIPELINE_STATIC_QUALIFIER +void __pipeline_memcpy_async(void* __restrict__ dst_shared, const void* __restrict__ src_global, size_t size_and_align, + size_t zfill = 0) +{ + _CUDA_PIPELINE_ASSERT(size_and_align == 4 || size_and_align == 8 || size_and_align == 16); + _CUDA_PIPELINE_ASSERT(zfill <= size_and_align); + _CUDA_PIPELINE_ASSERT(__isShared(dst_shared)); + _CUDA_PIPELINE_ASSERT(__isGlobal(src_global)); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(dst_shared) & (size_and_align - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(src_global) & (size_and_align - 1))); + + switch (size_and_align) { + case 16: + switch (zfill) { + case 0: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 16>(dst_shared, src_global); return; + case 1: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 15>(dst_shared, src_global); return; + case 2: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 14>(dst_shared, src_global); return; + case 3: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 13>(dst_shared, src_global); return; + case 4: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 12>(dst_shared, src_global); return; + case 5: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 11>(dst_shared, src_global); return; + case 6: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 10>(dst_shared, src_global); return; + case 7: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 9>(dst_shared, src_global); return; + case 8: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 8>(dst_shared, src_global); return; + case 9: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 7>(dst_shared, src_global); return; + case 10: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 6>(dst_shared, src_global); return; + case 11: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 5>(dst_shared, src_global); return; + case 12: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 4>(dst_shared, src_global); return; + case 13: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 3>(dst_shared, src_global); return; + case 14: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 2>(dst_shared, src_global); return; + case 15: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 1>(dst_shared, src_global); return; + case 16: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 0>(dst_shared, src_global); return; + default: _CUDA_PIPELINE_ABORT(); return; + } + case 8: + switch (zfill) { + case 0: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 8>(dst_shared, src_global); return; + case 1: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 7>(dst_shared, src_global); return; + case 2: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 6>(dst_shared, src_global); return; + case 3: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 5>(dst_shared, src_global); return; + case 4: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 4>(dst_shared, src_global); return; + case 5: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 3>(dst_shared, src_global); return; + case 6: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 2>(dst_shared, src_global); return; + case 7: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 1>(dst_shared, src_global); return; + case 8: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 0>(dst_shared, src_global); return; + default: _CUDA_PIPELINE_ABORT(); return; + } + case 4: + switch (zfill) { + case 0: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 4>(dst_shared, src_global); return; + case 1: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 3>(dst_shared, src_global); return; + case 2: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 2>(dst_shared, src_global); return; + case 3: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 1>(dst_shared, src_global); return; + case 4: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 0>(dst_shared, src_global); return; + default: _CUDA_PIPELINE_ABORT(); return; + } + default: + _CUDA_PIPELINE_ABORT(); + return; + } +} + +_CUDA_PIPELINE_STATIC_QUALIFIER +void __pipeline_commit() +{ + _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_commit(); +} + +_CUDA_PIPELINE_STATIC_QUALIFIER +void __pipeline_wait_prior(size_t prior) +{ + switch (prior) { + case 0 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<0>(); return; + case 1 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<1>(); return; + case 2 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<2>(); return; + case 3 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<3>(); return; + case 4 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<4>(); return; + case 5 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<5>(); return; + case 6 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<6>(); return; + case 7 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<7>(); return; + default : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<8>(); return; + } +} + +# if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER) +# include "cuda_awbarrier_primitives.h" + +_CUDA_PIPELINE_STATIC_QUALIFIER +void __pipeline_arrive_on(__mbarrier_t* barrier) +{ + _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_arrive_on(barrier); +} +# endif + +#endif /* !_CUDA_PIPELINE_PRIMITIVES_H_ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime.h new file mode 100644 index 0000000000000000000000000000000000000000..782bff6caaf2c5991f7f14d2de45863cdb3a3fad --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime.h @@ -0,0 +1,2300 @@ +/* + * Copyright 1993-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_RUNTIME_H__) +#define __CUDA_RUNTIME_H__ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_RUNTIME_H__ +#endif + +#if !defined(__CUDACC_RTC__) +#if defined(__GNUC__) +#if defined(__clang__) || (!defined(__PGIC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) +#pragma GCC diagnostic push +#endif +#if defined(__clang__) || (!defined(__PGIC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2))) +#pragma GCC diagnostic ignored "-Wunused-function" +#endif +#elif defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable: 4820) +#endif +#endif + +#ifdef __QNX__ +#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 7) +typedef unsigned size_t; +#endif +#endif +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "crt/host_config.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "builtin_types.h" +#include "library_types.h" +#if !defined(__CUDACC_RTC__) +#define EXCLUDE_FROM_RTC +#include "channel_descriptor.h" +#include "cuda_runtime_api.h" +#include "driver_functions.h" +#undef EXCLUDE_FROM_RTC +#endif /* !__CUDACC_RTC__ */ +#include "crt/host_defines.h" +#ifdef __CUDACC_RTC__ +#include "target" +#endif /* defined(__CUDACC_RTC__) */ + + +#include "vector_functions.h" + +#if defined(__CUDACC__) + +#if defined(__CUDACC_RTC__) +#include "nvrtc_device_runtime.h" +#include "crt/device_functions.h" +#include "crt/common_functions.h" +#include "device_launch_parameters.h" + +#else /* !__CUDACC_RTC__ */ +#define EXCLUDE_FROM_RTC +#include "crt/common_functions.h" +#include "crt/device_functions.h" +#include "device_launch_parameters.h" + +#if defined(__CUDACC_EXTENDED_LAMBDA__) +#include +#include +struct __device_builtin__ __nv_lambda_preheader_injection { }; +#endif /* defined(__CUDACC_EXTENDED_LAMBDA__) */ + +#undef EXCLUDE_FROM_RTC +#endif /* __CUDACC_RTC__ */ + +#endif /* __CUDACC__ */ + +/** \cond impl_private */ +#if defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED) +#define __CUDA_DEPRECATED +#elif defined(_MSC_VER) +#define __CUDA_DEPRECATED __declspec(deprecated) +#elif defined(__GNUC__) +#define __CUDA_DEPRECATED __attribute__((deprecated)) +#else +#define __CUDA_DEPRECATED +#endif +/** \endcond impl_private */ + +#if defined(__cplusplus) && !defined(__CUDACC_RTC__) + +#if __cplusplus >= 201103 +#include +#endif + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +/** + * \addtogroup CUDART_HIGHLEVEL + * @{ + */ + +/** + *\brief Launches a device function + * + * The function invokes kernel \p func on \p gridDim (\p gridDim.x × \p gridDim.y + * × \p gridDim.z) grid of blocks. Each block contains \p blockDim (\p blockDim.x × + * \p blockDim.y × \p blockDim.z) threads. + * + * If the kernel has N parameters the \p args should point to array of N pointers. + * Each pointer, from args[0] to args[N - 1], point to the region + * of memory from which the actual parameter will be copied. + * + * \p sharedMem sets the amount of dynamic shared memory that will be available to + * each thread block. + * + * \p stream specifies a stream the invocation is associated to. + * + * \param func - Device function symbol + * \param gridDim - Grid dimentions + * \param blockDim - Block dimentions + * \param args - Arguments + * \param sharedMem - Shared memory (defaults to 0) + * \param stream - Stream identifier (defaults to NULL) + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidConfiguration, + * ::cudaErrorLaunchFailure, + * ::cudaErrorLaunchTimeout, + * ::cudaErrorLaunchOutOfResources, + * ::cudaErrorSharedObjectInitFailed, + * ::cudaErrorInvalidPtx, + * ::cudaErrorUnsupportedPtxVersion, + * ::cudaErrorNoKernelImageForDevice, + * ::cudaErrorJitCompilerNotFound, + * ::cudaErrorJitCompilationDisabled + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \ref ::cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C API)" + */ +template +static __inline__ __host__ cudaError_t cudaLaunchKernel( + const T *func, + dim3 gridDim, + dim3 blockDim, + void **args, + size_t sharedMem = 0, + cudaStream_t stream = 0 +) +{ + return ::cudaLaunchKernel((const void *)func, gridDim, blockDim, args, sharedMem, stream); +} + + +#if __cplusplus >= 201103 || defined(__DOXYGEN_ONLY__) +/** + * \brief Launches a CUDA function with launch-time configuration + * + * Invokes the kernel \p func on \p config->gridDim (\p config->gridDim.x + * × \p config->gridDim.y × \p config->gridDim.z) grid of blocks. + * Each block contains \p config->blockDim (\p config->blockDim.x × + * \p config->blockDim.y × \p config->blockDim.z) threads. + * + * \p config->dynamicSmemBytes sets the amount of dynamic shared memory that + * will be available to each thread block. + * + * \p config->stream specifies a stream the invocation is associated to. + * + * Configuration beyond grid and block dimensions, dynamic shared memory size, + * and stream can be provided with the following two fields of \p config: + * + * \p config->attrs is an array of \p config->numAttrs contiguous + * ::cudaLaunchAttribute elements. The value of this pointer is not considered + * if \p config->numAttrs is zero. However, in that case, it is recommended to + * set the pointer to NULL. + * \p config->numAttrs is the number of attributes populating the first + * \p config->numAttrs positions of the \p config->attrs array. + * + * The kernel arguments should be passed as arguments to this function via the + * \p args parameter pack. + * + * The C API version of this function, \p cudaLaunchKernelExC, is also available + * for pre-C++11 compilers and for use cases where the ability to pass kernel + * parameters via void* array is preferable. + * + * \param config - Launch configuration + * \param func - Kernel to launch + * \param args - Parameter pack of kernel parameters + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidConfiguration, + * ::cudaErrorLaunchFailure, + * ::cudaErrorLaunchTimeout, + * ::cudaErrorLaunchOutOfResources, + * ::cudaErrorSharedObjectInitFailed, + * ::cudaErrorInvalidPtx, + * ::cudaErrorUnsupportedPtxVersion, + * ::cudaErrorNoKernelImageForDevice, + * ::cudaErrorJitCompilerNotFound, + * ::cudaErrorJitCompilationDisabled + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * \ref ::cudaLaunchKernelExC(const cudaLaunchConfig_t *config, const void *func, void **args) "cudaLaunchKernelEx (C API)", + * ::cuLaunchKernelEx + */ +template +static __inline__ __host__ cudaError_t cudaLaunchKernelEx( + const cudaLaunchConfig_t *config, + void (*kernel)(ExpTypes...), + ActTypes &&... args +) +{ + return [&](ExpTypes... coercedArgs){ + void *pArgs[] = { &coercedArgs... }; + return ::cudaLaunchKernelExC(config, (const void *)kernel, pArgs); + }(std::forward(args)...); +} +#endif + +/** + *\brief Launches a device function + * + * The function invokes kernel \p func on \p gridDim (\p gridDim.x × \p gridDim.y + * × \p gridDim.z) grid of blocks. Each block contains \p blockDim (\p blockDim.x × + * \p blockDim.y × \p blockDim.z) threads. + * + * The device on which this kernel is invoked must have a non-zero value for + * the device attribute ::cudaDevAttrCooperativeLaunch. + * + * The total number of blocks launched cannot exceed the maximum number of blocks per + * multiprocessor as returned by ::cudaOccupancyMaxActiveBlocksPerMultiprocessor (or + * ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times the number of multiprocessors + * as specified by the device attribute ::cudaDevAttrMultiProcessorCount. + * + * The kernel cannot make use of CUDA dynamic parallelism. + * + * If the kernel has N parameters the \p args should point to array of N pointers. + * Each pointer, from args[0] to args[N - 1], point to the region + * of memory from which the actual parameter will be copied. + * + * \p sharedMem sets the amount of dynamic shared memory that will be available to + * each thread block. + * + * \p stream specifies a stream the invocation is associated to. + * + * \param func - Device function symbol + * \param gridDim - Grid dimentions + * \param blockDim - Block dimentions + * \param args - Arguments + * \param sharedMem - Shared memory (defaults to 0) + * \param stream - Stream identifier (defaults to NULL) + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidConfiguration, + * ::cudaErrorLaunchFailure, + * ::cudaErrorLaunchTimeout, + * ::cudaErrorLaunchOutOfResources, + * ::cudaErrorSharedObjectInitFailed + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \ref ::cudaLaunchCooperativeKernel(const void *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchCooperativeKernel (C API)" + */ +template +static __inline__ __host__ cudaError_t cudaLaunchCooperativeKernel( + const T *func, + dim3 gridDim, + dim3 blockDim, + void **args, + size_t sharedMem = 0, + cudaStream_t stream = 0 +) +{ + return ::cudaLaunchCooperativeKernel((const void *)func, gridDim, blockDim, args, sharedMem, stream); +} + +/** + * \brief \hl Creates an event object with the specified flags + * + * Creates an event object with the specified flags. Valid flags include: + * - ::cudaEventDefault: Default event creation flag. + * - ::cudaEventBlockingSync: Specifies that event should use blocking + * synchronization. A host thread that uses ::cudaEventSynchronize() to wait + * on an event created with this flag will block until the event actually + * completes. + * - ::cudaEventDisableTiming: Specifies that the created event does not need + * to record timing data. Events created with this flag specified and + * the ::cudaEventBlockingSync flag not specified will provide the best + * performance when used with ::cudaStreamWaitEvent() and ::cudaEventQuery(). + * + * \param event - Newly created event + * \param flags - Flags for new event + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorLaunchFailure, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa \ref ::cudaEventCreate(cudaEvent_t*) "cudaEventCreate (C API)", + * ::cudaEventCreateWithFlags, ::cudaEventRecord, ::cudaEventQuery, + * ::cudaEventSynchronize, ::cudaEventDestroy, ::cudaEventElapsedTime, + * ::cudaStreamWaitEvent + */ +static __inline__ __host__ cudaError_t cudaEventCreate( + cudaEvent_t *event, + unsigned int flags +) +{ + return ::cudaEventCreateWithFlags(event, flags); +} + +/** + * \brief Creates an executable graph from a graph + * + * Instantiates \p graph as an executable graph. The graph is validated for any + * structural constraints or intra-node constraints which were not previously + * validated. If instantiation is successful, a handle to the instantiated graph + * is returned in \p pGraphExec. + * + * If there are any errors, diagnostic information may be returned in \p pErrorNode and + * \p pLogBuffer. This is the primary way to inspect instantiation errors. The output + * will be null terminated unless the diagnostics overflow + * the buffer. In this case, they will be truncated, and the last byte can be + * inspected to determine if truncation occurred. + * + * \param pGraphExec - Returns instantiated graph + * \param graph - Graph to instantiate + * \param pErrorNode - In case of an instantiation error, this may be modified to + * indicate a node contributing to the error + * \param pLogBuffer - A character buffer to store diagnostic messages + * \param bufferSize - Size of the log buffer in bytes + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphInstantiateWithFlags, + * ::cudaGraphCreate, + * ::cudaGraphUpload, + * ::cudaGraphLaunch, + * ::cudaGraphExecDestroy + */ +static __inline__ __host__ cudaError_t cudaGraphInstantiate( + cudaGraphExec_t *pGraphExec, + cudaGraph_t graph, + cudaGraphNode_t *pErrorNode, + char *pLogBuffer, + size_t bufferSize +) +{ + (void)pErrorNode; + (void)pLogBuffer; + (void)bufferSize; + return ::cudaGraphInstantiate(pGraphExec, graph, 0); +} + +/** + * \brief \hl Allocates page-locked memory on the host + * + * Allocates \p size bytes of host memory that is page-locked and accessible + * to the device. The driver tracks the virtual memory ranges allocated with + * this function and automatically accelerates calls to functions such as + * ::cudaMemcpy(). Since the memory can be accessed directly by the device, it + * can be read or written with much higher bandwidth than pageable memory + * obtained with functions such as ::malloc(). Allocating excessive amounts of + * pinned memory may degrade system performance, since it reduces the amount + * of memory available to the system for paging. As a result, this function is + * best used sparingly to allocate staging areas for data exchange between host + * and device. + * + * The \p flags parameter enables different options to be specified that affect + * the allocation, as follows. + * - ::cudaHostAllocDefault: This flag's value is defined to be 0. + * - ::cudaHostAllocPortable: The memory returned by this call will be + * considered as pinned memory by all CUDA contexts, not just the one that + * performed the allocation. + * - ::cudaHostAllocMapped: Maps the allocation into the CUDA address space. + * The device pointer to the memory may be obtained by calling + * ::cudaHostGetDevicePointer(). + * - ::cudaHostAllocWriteCombined: Allocates the memory as write-combined (WC). + * WC memory can be transferred across the PCI Express bus more quickly on some + * system configurations, but cannot be read efficiently by most CPUs. WC + * memory is a good option for buffers that will be written by the CPU and read + * by the device via mapped pinned memory or host->device transfers. + * + * All of these flags are orthogonal to one another: a developer may allocate + * memory that is portable, mapped and/or write-combined with no restrictions. + * + * ::cudaSetDeviceFlags() must have been called with the ::cudaDeviceMapHost + * flag in order for the ::cudaHostAllocMapped flag to have any effect. + * + * The ::cudaHostAllocMapped flag may be specified on CUDA contexts for devices + * that do not support mapped pinned memory. The failure is deferred to + * ::cudaHostGetDevicePointer() because the memory may be mapped into other + * CUDA contexts via the ::cudaHostAllocPortable flag. + * + * Memory allocated by this function must be freed with ::cudaFreeHost(). + * + * \param ptr - Device pointer to allocated memory + * \param size - Requested allocation size in bytes + * \param flags - Requested properties of allocated memory + * + * \return + * ::cudaSuccess, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaSetDeviceFlags, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaHostAlloc + */ +static __inline__ __host__ cudaError_t cudaMallocHost( + void **ptr, + size_t size, + unsigned int flags +) +{ + return ::cudaHostAlloc(ptr, size, flags); +} + +template +static __inline__ __host__ cudaError_t cudaHostAlloc( + T **ptr, + size_t size, + unsigned int flags +) +{ + return ::cudaHostAlloc((void**)(void*)ptr, size, flags); +} + +template +static __inline__ __host__ cudaError_t cudaHostGetDevicePointer( + T **pDevice, + void *pHost, + unsigned int flags +) +{ + return ::cudaHostGetDevicePointer((void**)(void*)pDevice, pHost, flags); +} + +/** + * \brief Allocates memory that will be automatically managed by the Unified Memory system + * + * Allocates \p size bytes of managed memory on the device and returns in + * \p *devPtr a pointer to the allocated memory. If the device doesn't support + * allocating managed memory, ::cudaErrorNotSupported is returned. Support + * for managed memory can be queried using the device attribute + * ::cudaDevAttrManagedMemory. The allocated memory is suitably + * aligned for any kind of variable. The memory is not cleared. If \p size + * is 0, ::cudaMallocManaged returns ::cudaErrorInvalidValue. The pointer + * is valid on the CPU and on all GPUs in the system that support managed memory. + * All accesses to this pointer must obey the Unified Memory programming model. + * + * \p flags specifies the default stream association for this allocation. + * \p flags must be one of ::cudaMemAttachGlobal or ::cudaMemAttachHost. The + * default value for \p flags is ::cudaMemAttachGlobal. + * If ::cudaMemAttachGlobal is specified, then this memory is accessible from + * any stream on any device. If ::cudaMemAttachHost is specified, then the + * allocation should not be accessed from devices that have a zero value for the + * device attribute ::cudaDevAttrConcurrentManagedAccess; an explicit call to + * ::cudaStreamAttachMemAsync will be required to enable access on such devices. + * + * If the association is later changed via ::cudaStreamAttachMemAsync to + * a single stream, the default association, as specifed during ::cudaMallocManaged, + * is restored when that stream is destroyed. For __managed__ variables, the + * default association is always ::cudaMemAttachGlobal. Note that destroying a + * stream is an asynchronous operation, and as a result, the change to default + * association won't happen until all work in the stream has completed. + * + * Memory allocated with ::cudaMallocManaged should be released with ::cudaFree. + * + * Device memory oversubscription is possible for GPUs that have a non-zero value for the + * device attribute ::cudaDevAttrConcurrentManagedAccess. Managed memory on + * such GPUs may be evicted from device memory to host memory at any time by the Unified + * Memory driver in order to make room for other allocations. + * + * In a multi-GPU system where all GPUs have a non-zero value for the device attribute + * ::cudaDevAttrConcurrentManagedAccess, managed memory may not be populated when this + * API returns and instead may be populated on access. In such systems, managed memory can + * migrate to any processor's memory at any time. The Unified Memory driver will employ heuristics to + * maintain data locality and prevent excessive page faults to the extent possible. The application + * can also guide the driver about memory usage patterns via ::cudaMemAdvise. The application + * can also explicitly migrate memory to a desired processor's memory via + * ::cudaMemPrefetchAsync. + * + * In a multi-GPU system where all of the GPUs have a zero value for the device attribute + * ::cudaDevAttrConcurrentManagedAccess and all the GPUs have peer-to-peer support + * with each other, the physical storage for managed memory is created on the GPU which is active + * at the time ::cudaMallocManaged is called. All other GPUs will reference the data at reduced + * bandwidth via peer mappings over the PCIe bus. The Unified Memory driver does not migrate + * memory among such GPUs. + * + * In a multi-GPU system where not all GPUs have peer-to-peer support with each other and + * where the value of the device attribute ::cudaDevAttrConcurrentManagedAccess + * is zero for at least one of those GPUs, the location chosen for physical storage of managed + * memory is system-dependent. + * - On Linux, the location chosen will be device memory as long as the current set of active + * contexts are on devices that either have peer-to-peer support with each other or have a + * non-zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess. + * If there is an active context on a GPU that does not have a non-zero value for that device + * attribute and it does not have peer-to-peer support with the other devices that have active + * contexts on them, then the location for physical storage will be 'zero-copy' or host memory. + * Note that this means that managed memory that is located in device memory is migrated to + * host memory if a new context is created on a GPU that doesn't have a non-zero value for + * the device attribute and does not support peer-to-peer with at least one of the other devices + * that has an active context. This in turn implies that context creation may fail if there is + * insufficient host memory to migrate all managed allocations. + * - On Windows, the physical storage is always created in 'zero-copy' or host memory. + * All GPUs will reference the data at reduced bandwidth over the PCIe bus. In these + * circumstances, use of the environment variable CUDA_VISIBLE_DEVICES is recommended to + * restrict CUDA to only use those GPUs that have peer-to-peer support. + * Alternatively, users can also set CUDA_MANAGED_FORCE_DEVICE_ALLOC to a non-zero + * value to force the driver to always use device memory for physical storage. + * When this environment variable is set to a non-zero value, all devices used in + * that process that support managed memory have to be peer-to-peer compatible + * with each other. The error ::cudaErrorInvalidDevice will be returned if a device + * that supports managed memory is used and it is not peer-to-peer compatible with + * any of the other managed memory supporting devices that were previously used in + * that process, even if ::cudaDeviceReset has been called on those devices. These + * environment variables are described in the CUDA programming guide under the + * "CUDA environment variables" section. + * - On ARM, managed memory is not available on discrete gpu with Drive PX-2. + * + * \param devPtr - Pointer to allocated device memory + * \param size - Requested allocation size in bytes + * \param flags - Must be either ::cudaMemAttachGlobal or ::cudaMemAttachHost (defaults to ::cudaMemAttachGlobal) + * + * \return + * ::cudaSuccess, + * ::cudaErrorMemoryAllocation, + * ::cudaErrorNotSupported, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * + * \sa ::cudaMallocPitch, ::cudaFree, ::cudaMallocArray, ::cudaFreeArray, + * ::cudaMalloc3D, ::cudaMalloc3DArray, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaHostAlloc, ::cudaDeviceGetAttribute, ::cudaStreamAttachMemAsync + */ +template +static __inline__ __host__ cudaError_t cudaMallocManaged( + T **devPtr, + size_t size, + unsigned int flags = cudaMemAttachGlobal +) +{ + return ::cudaMallocManaged((void**)(void*)devPtr, size, flags); +} + +/** + * \brief Attach memory to a stream asynchronously + * + * Enqueues an operation in \p stream to specify stream association of + * \p length bytes of memory starting from \p devPtr. This function is a + * stream-ordered operation, meaning that it is dependent on, and will + * only take effect when, previous work in stream has completed. Any + * previous association is automatically replaced. + * + * \p devPtr must point to an one of the following types of memories: + * - managed memory declared using the __managed__ keyword or allocated with + * ::cudaMallocManaged. + * - a valid host-accessible region of system-allocated pageable memory. This + * type of memory may only be specified if the device associated with the + * stream reports a non-zero value for the device attribute + * ::cudaDevAttrPageableMemoryAccess. + * + * For managed allocations, \p length must be either zero or the entire + * allocation's size. Both indicate that the entire allocation's stream + * association is being changed. Currently, it is not possible to change stream + * association for a portion of a managed allocation. + * + * For pageable allocations, \p length must be non-zero. + * + * The stream association is specified using \p flags which must be + * one of ::cudaMemAttachGlobal, ::cudaMemAttachHost or ::cudaMemAttachSingle. + * The default value for \p flags is ::cudaMemAttachSingle + * If the ::cudaMemAttachGlobal flag is specified, the memory can be accessed + * by any stream on any device. + * If the ::cudaMemAttachHost flag is specified, the program makes a guarantee + * that it won't access the memory on the device from any stream on a device that + * has a zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess. + * If the ::cudaMemAttachSingle flag is specified and \p stream is associated with + * a device that has a zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess, + * the program makes a guarantee that it will only access the memory on the device + * from \p stream. It is illegal to attach singly to the NULL stream, because the + * NULL stream is a virtual global stream and not a specific stream. An error will + * be returned in this case. + * + * When memory is associated with a single stream, the Unified Memory system will + * allow CPU access to this memory region so long as all operations in \p stream + * have completed, regardless of whether other streams are active. In effect, + * this constrains exclusive ownership of the managed memory region by + * an active GPU to per-stream activity instead of whole-GPU activity. + * + * Accessing memory on the device from streams that are not associated with + * it will produce undefined results. No error checking is performed by the + * Unified Memory system to ensure that kernels launched into other streams + * do not access this region. + * + * It is a program's responsibility to order calls to ::cudaStreamAttachMemAsync + * via events, synchronization or other means to ensure legal access to memory + * at all times. Data visibility and coherency will be changed appropriately + * for all kernels which follow a stream-association change. + * + * If \p stream is destroyed while data is associated with it, the association is + * removed and the association reverts to the default visibility of the allocation + * as specified at ::cudaMallocManaged. For __managed__ variables, the default + * association is always ::cudaMemAttachGlobal. Note that destroying a stream is an + * asynchronous operation, and as a result, the change to default association won't + * happen until all work in the stream has completed. + * + * \param stream - Stream in which to enqueue the attach operation + * \param devPtr - Pointer to memory (must be a pointer to managed memory or + * to a valid host-accessible region of system-allocated + * memory) + * \param length - Length of memory (defaults to zero) + * \param flags - Must be one of ::cudaMemAttachGlobal, ::cudaMemAttachHost or ::cudaMemAttachSingle (defaults to ::cudaMemAttachSingle) + * + * \return + * ::cudaSuccess, + * ::cudaErrorNotReady, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreate, ::cudaStreamCreateWithFlags, ::cudaStreamWaitEvent, ::cudaStreamSynchronize, ::cudaStreamAddCallback, ::cudaStreamDestroy, ::cudaMallocManaged + */ +template +static __inline__ __host__ cudaError_t cudaStreamAttachMemAsync( + cudaStream_t stream, + T *devPtr, + size_t length = 0, + unsigned int flags = cudaMemAttachSingle +) +{ + return ::cudaStreamAttachMemAsync(stream, (void*)devPtr, length, flags); +} + +template +static __inline__ __host__ cudaError_t cudaMalloc( + T **devPtr, + size_t size +) +{ + return ::cudaMalloc((void**)(void*)devPtr, size); +} + +template +static __inline__ __host__ cudaError_t cudaMallocHost( + T **ptr, + size_t size, + unsigned int flags = 0 +) +{ + return cudaMallocHost((void**)(void*)ptr, size, flags); +} + +template +static __inline__ __host__ cudaError_t cudaMallocPitch( + T **devPtr, + size_t *pitch, + size_t width, + size_t height +) +{ + return ::cudaMallocPitch((void**)(void*)devPtr, pitch, width, height); +} + +/** + * \brief Allocate from a pool + * + * This is an alternate spelling for cudaMallocFromPoolAsync + * made available through operator overloading. + * + * \sa ::cudaMallocFromPoolAsync, + * \ref ::cudaMallocAsync(void** ptr, size_t size, cudaStream_t hStream) "cudaMallocAsync (C API)" + */ +static __inline__ __host__ cudaError_t cudaMallocAsync( + void **ptr, + size_t size, + cudaMemPool_t memPool, + cudaStream_t stream +) +{ + return ::cudaMallocFromPoolAsync(ptr, size, memPool, stream); +} + +template +static __inline__ __host__ cudaError_t cudaMallocAsync( + T **ptr, + size_t size, + cudaMemPool_t memPool, + cudaStream_t stream +) +{ + return ::cudaMallocFromPoolAsync((void**)(void*)ptr, size, memPool, stream); +} + +template +static __inline__ __host__ cudaError_t cudaMallocAsync( + T **ptr, + size_t size, + cudaStream_t stream +) +{ + return ::cudaMallocAsync((void**)(void*)ptr, size, stream); +} + +template +static __inline__ __host__ cudaError_t cudaMallocFromPoolAsync( + T **ptr, + size_t size, + cudaMemPool_t memPool, + cudaStream_t stream +) +{ + return ::cudaMallocFromPoolAsync((void**)(void*)ptr, size, memPool, stream); +} + +#if defined(__CUDACC__) + +/** + * \brief \hl Copies data to the given symbol on the device + * + * Copies \p count bytes from the memory area pointed to by \p src + * to the memory area \p offset bytes from the start of symbol + * \p symbol. The memory areas may not overlap. \p symbol is a variable that + * resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToDevice. + * + * \param symbol - Device symbol reference + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorInvalidMemcpyDirection, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_sync + * \note_string_api_deprecation + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync + */ +template +static __inline__ __host__ cudaError_t cudaMemcpyToSymbol( + const T &symbol, + const void *src, + size_t count, + size_t offset = 0, + enum cudaMemcpyKind kind = cudaMemcpyHostToDevice +) +{ + return ::cudaMemcpyToSymbol((const void*)&symbol, src, count, offset, kind); +} + +/** + * \brief \hl Copies data to the given symbol on the device + * + * Copies \p count bytes from the memory area pointed to by \p src + * to the memory area \p offset bytes from the start of symbol + * \p symbol. The memory areas may not overlap. \p symbol is a variable that + * resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToDevice. + * + * ::cudaMemcpyToSymbolAsync() is asynchronous with respect to the host, so + * the call may return before the copy is complete. The copy can optionally + * be associated to a stream by passing a non-zero \p stream argument. If + * \p kind is ::cudaMemcpyHostToDevice and \p stream is non-zero, the copy + * may overlap with operations in other streams. + * + * \param symbol - Device symbol reference + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorInvalidMemcpyDirection, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_async + * \note_string_api_deprecation + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyFromSymbolAsync + */ +template +static __inline__ __host__ cudaError_t cudaMemcpyToSymbolAsync( + const T &symbol, + const void *src, + size_t count, + size_t offset = 0, + enum cudaMemcpyKind kind = cudaMemcpyHostToDevice, + cudaStream_t stream = 0 +) +{ + return ::cudaMemcpyToSymbolAsync((const void*)&symbol, src, count, offset, kind, stream); +} + +/** + * \brief \hl Copies data from the given symbol on the device + * + * Copies \p count bytes from the memory area \p offset bytes + * from the start of symbol \p symbol to the memory area pointed to by \p dst. + * The memory areas may not overlap. \p symbol is a variable that + * resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyDeviceToHost or ::cudaMemcpyDeviceToDevice. + * + * \param dst - Destination memory address + * \param symbol - Device symbol reference + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorInvalidMemcpyDirection, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_sync + * \note_string_api_deprecation + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync + */ +template +static __inline__ __host__ cudaError_t cudaMemcpyFromSymbol( + void *dst, + const T &symbol, + size_t count, + size_t offset = 0, + enum cudaMemcpyKind kind = cudaMemcpyDeviceToHost +) +{ + return ::cudaMemcpyFromSymbol(dst, (const void*)&symbol, count, offset, kind); +} + +/** + * \brief \hl Copies data from the given symbol on the device + * + * Copies \p count bytes from the memory area \p offset bytes + * from the start of symbol \p symbol to the memory area pointed to by \p dst. + * The memory areas may not overlap. \p symbol is a variable that resides in + * global or constant memory space. \p kind can be either + * ::cudaMemcpyDeviceToHost or ::cudaMemcpyDeviceToDevice. + * + * ::cudaMemcpyFromSymbolAsync() is asynchronous with respect to the host, so + * the call may return before the copy is complete. The copy can optionally be + * associated to a stream by passing a non-zero \p stream argument. If \p kind + * is ::cudaMemcpyDeviceToHost and \p stream is non-zero, the copy may overlap + * with operations in other streams. + * + * \param dst - Destination memory address + * \param symbol - Device symbol reference + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorInvalidMemcpyDirection, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_async + * \note_string_api_deprecation + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync + */ +template +static __inline__ __host__ cudaError_t cudaMemcpyFromSymbolAsync( + void *dst, + const T &symbol, + size_t count, + size_t offset = 0, + enum cudaMemcpyKind kind = cudaMemcpyDeviceToHost, + cudaStream_t stream = 0 +) +{ + return ::cudaMemcpyFromSymbolAsync(dst, (const void*)&symbol, count, offset, kind, stream); +} + +/** + * \brief Creates a memcpy node to copy to a symbol on the device and adds it to a graph + * + * Creates a new memcpy node to copy to \p symbol and adds it to \p graph with + * \p numDependencies dependencies specified via \p pDependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p pGraphNode. + * + * When the graph is launched, the node will copy \p count bytes from the memory area + * pointed to by \p src to the memory area pointed to by \p offset bytes from the start + * of symbol \p symbol. The memory areas may not overlap. \p symbol is a variable that + * resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of + * transfer is inferred from the pointer values. However, ::cudaMemcpyDefault + * is only allowed on systems that support unified virtual addressing. + * + * Memcpy nodes have some additional restrictions with regards to managed memory, if the + * system contains at least one device which has a zero value for the device attribute + * ::cudaDevAttrConcurrentManagedAccess. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param symbol - Device symbol address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpyToSymbol, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemcpyNodeFromSymbol, + * ::cudaGraphMemcpyNodeGetParams, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsToSymbol, + * ::cudaGraphMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemsetNode + */ +template +static __inline__ __host__ cudaError_t cudaGraphAddMemcpyNodeToSymbol( + cudaGraphNode_t *pGraphNode, + cudaGraph_t graph, + const cudaGraphNode_t *pDependencies, + size_t numDependencies, + const T &symbol, + const void* src, + size_t count, + size_t offset, + enum cudaMemcpyKind kind) +{ + return ::cudaGraphAddMemcpyNodeToSymbol(pGraphNode, graph, pDependencies, numDependencies, (const void*)&symbol, src, count, offset, kind); +} + +/** + * \brief Creates a memcpy node to copy from a symbol on the device and adds it to a graph + * + * Creates a new memcpy node to copy from \p symbol and adds it to \p graph with + * \p numDependencies dependencies specified via \p pDependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p pGraphNode. + * + * When the graph is launched, the node will copy \p count bytes from the memory area + * pointed to by \p offset bytes from the start of symbol \p symbol to the memory area + * pointed to by \p dst. The memory areas may not overlap. \p symbol is a variable + * that resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyDeviceToHost, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of transfer + * is inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * Memcpy nodes have some additional restrictions with regards to managed memory, if the + * system contains at least one device which has a zero value for the device attribute + * ::cudaDevAttrConcurrentManagedAccess. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param dst - Destination memory address + * \param symbol - Device symbol address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpyFromSymbol, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemcpyNodeToSymbol, + * ::cudaGraphMemcpyNodeGetParams, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphMemcpyNodeSetParamsToSymbol, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemsetNode + */ +template +static __inline__ __host__ cudaError_t cudaGraphAddMemcpyNodeFromSymbol( + cudaGraphNode_t* pGraphNode, + cudaGraph_t graph, + const cudaGraphNode_t* pDependencies, + size_t numDependencies, + void* dst, + const T &symbol, + size_t count, + size_t offset, + enum cudaMemcpyKind kind) +{ + return ::cudaGraphAddMemcpyNodeFromSymbol(pGraphNode, graph, pDependencies, numDependencies, dst, (const void*)&symbol, count, offset, kind); +} + +/** + * \brief Sets a memcpy node's parameters to copy to a symbol on the device + * + * Sets the parameters of memcpy node \p node to the copy described by the provided parameters. + * + * When the graph is launched, the node will copy \p count bytes from the memory area + * pointed to by \p src to the memory area pointed to by \p offset bytes from the start + * of symbol \p symbol. The memory areas may not overlap. \p symbol is a variable that + * resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of + * transfer is inferred from the pointer values. However, ::cudaMemcpyDefault + * is only allowed on systems that support unified virtual addressing. + * + * \param node - Node to set the parameters for + * \param symbol - Device symbol address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpyToSymbol, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphMemcpyNodeGetParams + */ +template +static __inline__ __host__ cudaError_t cudaGraphMemcpyNodeSetParamsToSymbol( + cudaGraphNode_t node, + const T &symbol, + const void* src, + size_t count, + size_t offset, + enum cudaMemcpyKind kind) +{ + return ::cudaGraphMemcpyNodeSetParamsToSymbol(node, (const void*)&symbol, src, count, offset, kind); +} + +/** + * \brief Sets a memcpy node's parameters to copy from a symbol on the device + * + * Sets the parameters of memcpy node \p node to the copy described by the provided parameters. + * + * When the graph is launched, the node will copy \p count bytes from the memory area + * pointed to by \p offset bytes from the start of symbol \p symbol to the memory area + * pointed to by \p dst. The memory areas may not overlap. \p symbol is a variable + * that resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyDeviceToHost, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of transfer + * is inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * \param node - Node to set the parameters for + * \param dst - Destination memory address + * \param symbol - Device symbol address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpyFromSymbol, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsToSymbol, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphMemcpyNodeGetParams + */ +template +static __inline__ __host__ cudaError_t cudaGraphMemcpyNodeSetParamsFromSymbol( + cudaGraphNode_t node, + void* dst, + const T &symbol, + size_t count, + size_t offset, + enum cudaMemcpyKind kind) +{ + return ::cudaGraphMemcpyNodeSetParamsFromSymbol(node, dst, (const void*)&symbol, count, offset, kind); +} + +/** + * \brief Sets the parameters for a memcpy node in the given graphExec to copy to a symbol on the device + * + * Updates the work represented by \p node in \p hGraphExec as though \p node had + * contained the given params at instantiation. \p node must remain in the graph which was + * used to instantiate \p hGraphExec. Changed edges to and from \p node are ignored. + * + * \p src and \p symbol must be allocated from the same contexts as the original source and + * destination memory. The instantiation-time memory operands must be 1-dimensional. + * Zero-length operations are not supported. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. \p node is also + * not modified by this call. + * + * Returns ::cudaErrorInvalidValue if the memory operands' mappings changed or + * the original memory operands are multidimensional. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param node - Memcpy node from the graph which was used to instantiate graphExec + * \param symbol - Device symbol address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemcpyNodeToSymbol, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsToSymbol, + * ::cudaGraphInstantiate, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams + */ +template +static __inline__ __host__ cudaError_t cudaGraphExecMemcpyNodeSetParamsToSymbol( + cudaGraphExec_t hGraphExec, + cudaGraphNode_t node, + const T &symbol, + const void* src, + size_t count, + size_t offset, + enum cudaMemcpyKind kind) +{ + return ::cudaGraphExecMemcpyNodeSetParamsToSymbol(hGraphExec, node, (const void*)&symbol, src, count, offset, kind); +} + +/** + * \brief Sets the parameters for a memcpy node in the given graphExec to copy from a symbol on the device + * + * Updates the work represented by \p node in \p hGraphExec as though \p node had + * contained the given params at instantiation. \p node must remain in the graph which was + * used to instantiate \p hGraphExec. Changed edges to and from \p node are ignored. + * + * \p symbol and \p dst must be allocated from the same contexts as the original source and + * destination memory. The instantiation-time memory operands must be 1-dimensional. + * Zero-length operations are not supported. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. \p node is also + * not modified by this call. + * + * Returns ::cudaErrorInvalidValue if the memory operands' mappings changed or + * the original memory operands are multidimensional. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param node - Memcpy node from the graph which was used to instantiate graphExec + * \param dst - Destination memory address + * \param symbol - Device symbol address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemcpyNodeFromSymbol, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphInstantiate, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParamsToSymbol, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams + */ +template +static __inline__ __host__ cudaError_t cudaGraphExecMemcpyNodeSetParamsFromSymbol( + cudaGraphExec_t hGraphExec, + cudaGraphNode_t node, + void* dst, + const T &symbol, + size_t count, + size_t offset, + enum cudaMemcpyKind kind) +{ + return ::cudaGraphExecMemcpyNodeSetParamsFromSymbol(hGraphExec, node, dst, (const void*)&symbol, count, offset, kind); +} + +// convenience function to avoid source breakage in c++ code +static __inline__ __host__ cudaError_t CUDARTAPI cudaGraphExecUpdate(cudaGraphExec_t hGraphExec, cudaGraph_t hGraph, cudaGraphNode_t *hErrorNode_out, enum cudaGraphExecUpdateResult *updateResult_out) +{ + cudaGraphExecUpdateResultInfo resultInfo; + cudaError_t status = cudaGraphExecUpdate(hGraphExec, hGraph, &resultInfo); + if (hErrorNode_out) { + *hErrorNode_out = resultInfo.errorNode; + } + if (updateResult_out) { + *updateResult_out = resultInfo.result; + } + return status; +} + +#if __cplusplus >= 201103 + +/** + * \brief Creates a user object by wrapping a C++ object + * + * TODO detail + * + * \param object_out - Location to return the user object handle + * \param objectToWrap - This becomes the \ptr argument to ::cudaUserObjectCreate. A + * lambda will be passed for the \p destroy argument, which calls + * delete on this object pointer. + * \param initialRefcount - The initial refcount to create the object with, typically 1. The + * initial references are owned by the calling thread. + * \param flags - Currently it is required to pass cudaUserObjectNoDestructorSync, + * which is the only defined flag. This indicates that the destroy + * callback cannot be waited on by any CUDA API. Users requiring + * synchronization of the callback should signal its completion + * manually. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * + * \sa + * ::cudaUserObjectCreate + */ +template +static __inline__ __host__ cudaError_t cudaUserObjectCreate( + cudaUserObject_t *object_out, + T *objectToWrap, + unsigned int initialRefcount, + unsigned int flags) +{ + return ::cudaUserObjectCreate( + object_out, + objectToWrap, + [](void *vpObj) { delete reinterpret_cast(vpObj); }, + initialRefcount, + flags); +} + +template +static __inline__ __host__ cudaError_t cudaUserObjectCreate( + cudaUserObject_t *object_out, + T *objectToWrap, + unsigned int initialRefcount, + cudaUserObjectFlags flags) +{ + return cudaUserObjectCreate(object_out, objectToWrap, initialRefcount, (unsigned int)flags); +} + +#endif + +/** + * \brief \hl Finds the address associated with a CUDA symbol + * + * Returns in \p *devPtr the address of symbol \p symbol on the device. + * \p symbol can either be a variable that resides in global or constant memory space. + * If \p symbol cannot be found, or if \p symbol is not declared + * in the global or constant memory space, \p *devPtr is unchanged and the error + * ::cudaErrorInvalidSymbol is returned. + * + * \param devPtr - Return device pointer associated with symbol + * \param symbol - Device symbol reference + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa \ref ::cudaGetSymbolAddress(void**, const void*) "cudaGetSymbolAddress (C API)", + * \ref ::cudaGetSymbolSize(size_t*, const T&) "cudaGetSymbolSize (C++ API)" + */ +template +static __inline__ __host__ cudaError_t cudaGetSymbolAddress( + void **devPtr, + const T &symbol +) +{ + return ::cudaGetSymbolAddress(devPtr, (const void*)&symbol); +} + +/** + * \brief \hl Finds the size of the object associated with a CUDA symbol + * + * Returns in \p *size the size of symbol \p symbol. \p symbol must be a + * variable that resides in global or constant memory space. + * If \p symbol cannot be found, or if \p symbol is not declared + * in global or constant memory space, \p *size is unchanged and the error + * ::cudaErrorInvalidSymbol is returned. + * + * \param size - Size of object associated with symbol + * \param symbol - Device symbol reference + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa \ref ::cudaGetSymbolAddress(void**, const T&) "cudaGetSymbolAddress (C++ API)", + * \ref ::cudaGetSymbolSize(size_t*, const void*) "cudaGetSymbolSize (C API)" + */ +template +static __inline__ __host__ cudaError_t cudaGetSymbolSize( + size_t *size, + const T &symbol +) +{ + return ::cudaGetSymbolSize(size, (const void*)&symbol); +} + +/** + * \brief \hl Sets the preferred cache configuration for a device function + * + * On devices where the L1 cache and shared memory use the same hardware + * resources, this sets through \p cacheConfig the preferred cache configuration + * for the function specified via \p func. This is only a preference. The + * runtime will use the requested configuration if possible, but it is free to + * choose a different configuration if required to execute \p func. + * + * \p func must be a pointer to a function that executes on the device. + * The parameter specified by \p func must be declared as a \p __global__ + * function. If the specified function does not exist, + * then ::cudaErrorInvalidDeviceFunction is returned. + * + * This setting does nothing on devices where the size of the L1 cache and + * shared memory are fixed. + * + * Launching a kernel with a different preference than the most recent + * preference setting may insert a device-side synchronization point. + * + * The supported cache configurations are: + * - ::cudaFuncCachePreferNone: no preference for shared memory or L1 (default) + * - ::cudaFuncCachePreferShared: prefer larger shared memory and smaller L1 cache + * - ::cudaFuncCachePreferL1: prefer larger L1 cache and smaller shared memory + * + * \param func - device function pointer + * \param cacheConfig - Requested cache configuration + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction + * \notefnerr + * \note_init_rt + * \note_callback + * + * \ref ::cudaLaunchKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C++ API)", + * \ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C API)", + * \ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, T*) "cudaFuncGetAttributes (C++ API)", + * ::cudaSetDoubleForDevice, + * ::cudaSetDoubleForHost, + * ::cudaThreadGetCacheConfig, + * ::cudaThreadSetCacheConfig + */ +template +static __inline__ __host__ cudaError_t cudaFuncSetCacheConfig( + T *func, + enum cudaFuncCache cacheConfig +) +{ + return ::cudaFuncSetCacheConfig((const void*)func, cacheConfig); +} + +template +static __inline__ __host__ cudaError_t cudaFuncSetSharedMemConfig( + T *func, + enum cudaSharedMemConfig config +) +{ + return ::cudaFuncSetSharedMemConfig((const void*)func, config); +} + +#endif // __CUDACC__ + +/** + * \brief Returns occupancy for a device function + * + * Returns in \p *numBlocks the maximum number of active blocks per + * streaming multiprocessor for the device function. + * + * \param numBlocks - Returned occupancy + * \param func - Kernel function for which occupancy is calulated + * \param blockSize - Block size the kernel is intended to be launched with + * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags + * \sa ::cudaOccupancyMaxPotentialBlockSize + * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags + * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock + */ +template +static __inline__ __host__ cudaError_t cudaOccupancyMaxActiveBlocksPerMultiprocessor( + int *numBlocks, + T func, + int blockSize, + size_t dynamicSMemSize) +{ + return ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(numBlocks, (const void*)func, blockSize, dynamicSMemSize, cudaOccupancyDefault); +} + +/** + * \brief Returns occupancy for a device function with the specified flags + * + * Returns in \p *numBlocks the maximum number of active blocks per + * streaming multiprocessor for the device function. + * + * The \p flags parameter controls how special cases are handled. Valid flags include: + * + * - ::cudaOccupancyDefault: keeps the default behavior as + * ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + * + * - ::cudaOccupancyDisableCachingOverride: suppresses the default behavior + * on platform where global caching affects occupancy. On such platforms, if caching + * is enabled, but per-block SM resource usage would result in zero occupancy, the + * occupancy calculator will calculate the occupancy as if caching is disabled. + * Setting this flag makes the occupancy calculator to return 0 in such cases. + * More information can be found about this feature in the "Unified L1/Texture Cache" + * section of the Maxwell tuning guide. + * + * \param numBlocks - Returned occupancy + * \param func - Kernel function for which occupancy is calulated + * \param blockSize - Block size the kernel is intended to be launched with + * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes + * \param flags - Requested behavior for the occupancy calculator + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + * \sa ::cudaOccupancyMaxPotentialBlockSize + * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags + * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock + */ +template +static __inline__ __host__ cudaError_t cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags( + int *numBlocks, + T func, + int blockSize, + size_t dynamicSMemSize, + unsigned int flags) +{ + return ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(numBlocks, (const void*)func, blockSize, dynamicSMemSize, flags); +} + +/** + * Helper functor for cudaOccupancyMaxPotentialBlockSize + */ +class __cudaOccupancyB2DHelper { + size_t n; +public: + inline __host__ CUDART_DEVICE __cudaOccupancyB2DHelper(size_t n_) : n(n_) {} + inline __host__ CUDART_DEVICE size_t operator()(int) + { + return n; + } +}; + +/** + * \brief Returns grid and block size that achieves maximum potential occupancy for a device function + * + * Returns in \p *minGridSize and \p *blocksize a suggested grid / + * block size pair that achieves the best potential occupancy + * (i.e. the maximum number of active warps with the smallest number + * of blocks). + * + * The \p flags parameter controls how special cases are handled. Valid flags include: + * + * - ::cudaOccupancyDefault: keeps the default behavior as + * ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags + * + * - ::cudaOccupancyDisableCachingOverride: This flag suppresses the default behavior + * on platform where global caching affects occupancy. On such platforms, if caching + * is enabled, but per-block SM resource usage would result in zero occupancy, the + * occupancy calculator will calculate the occupancy as if caching is disabled. + * Setting this flag makes the occupancy calculator to return 0 in such cases. + * More information can be found about this feature in the "Unified L1/Texture Cache" + * section of the Maxwell tuning guide. + * + * \param minGridSize - Returned minimum grid size needed to achieve the best potential occupancy + * \param blockSize - Returned block size + * \param func - Device function symbol + * \param blockSizeToDynamicSMemSize - A unary function / functor that takes block size, and returns the size, in bytes, of dynamic shared memory needed for a block + * \param blockSizeLimit - The maximum block size \p func is designed to work with. 0 means no limit. + * \param flags - Requested behavior for the occupancy calculator + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags + * \sa ::cudaOccupancyMaxPotentialBlockSize + * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags + * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock + */ + +template +static __inline__ __host__ CUDART_DEVICE cudaError_t cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags( + int *minGridSize, + int *blockSize, + T func, + UnaryFunction blockSizeToDynamicSMemSize, + int blockSizeLimit = 0, + unsigned int flags = 0) +{ + cudaError_t status; + + // Device and function properties + int device; + struct cudaFuncAttributes attr; + + // Limits + int maxThreadsPerMultiProcessor; + int warpSize; + int devMaxThreadsPerBlock; + int multiProcessorCount; + int funcMaxThreadsPerBlock; + int occupancyLimit; + int granularity; + + // Recorded maximum + int maxBlockSize = 0; + int numBlocks = 0; + int maxOccupancy = 0; + + // Temporary + int blockSizeToTryAligned; + int blockSizeToTry; + int blockSizeLimitAligned; + int occupancyInBlocks; + int occupancyInThreads; + size_t dynamicSMemSize; + + /////////////////////////// + // Check user input + /////////////////////////// + + if (!minGridSize || !blockSize || !func) { + return cudaErrorInvalidValue; + } + + ////////////////////////////////////////////// + // Obtain device and function properties + ////////////////////////////////////////////// + + status = ::cudaGetDevice(&device); + if (status != cudaSuccess) { + return status; + } + + status = cudaDeviceGetAttribute( + &maxThreadsPerMultiProcessor, + cudaDevAttrMaxThreadsPerMultiProcessor, + device); + if (status != cudaSuccess) { + return status; + } + + status = cudaDeviceGetAttribute( + &warpSize, + cudaDevAttrWarpSize, + device); + if (status != cudaSuccess) { + return status; + } + + status = cudaDeviceGetAttribute( + &devMaxThreadsPerBlock, + cudaDevAttrMaxThreadsPerBlock, + device); + if (status != cudaSuccess) { + return status; + } + + status = cudaDeviceGetAttribute( + &multiProcessorCount, + cudaDevAttrMultiProcessorCount, + device); + if (status != cudaSuccess) { + return status; + } + + status = cudaFuncGetAttributes(&attr, func); + if (status != cudaSuccess) { + return status; + } + + funcMaxThreadsPerBlock = attr.maxThreadsPerBlock; + + ///////////////////////////////////////////////////////////////////////////////// + // Try each block size, and pick the block size with maximum occupancy + ///////////////////////////////////////////////////////////////////////////////// + + occupancyLimit = maxThreadsPerMultiProcessor; + granularity = warpSize; + + if (blockSizeLimit == 0) { + blockSizeLimit = devMaxThreadsPerBlock; + } + + if (devMaxThreadsPerBlock < blockSizeLimit) { + blockSizeLimit = devMaxThreadsPerBlock; + } + + if (funcMaxThreadsPerBlock < blockSizeLimit) { + blockSizeLimit = funcMaxThreadsPerBlock; + } + + blockSizeLimitAligned = ((blockSizeLimit + (granularity - 1)) / granularity) * granularity; + + for (blockSizeToTryAligned = blockSizeLimitAligned; blockSizeToTryAligned > 0; blockSizeToTryAligned -= granularity) { + // This is needed for the first iteration, because + // blockSizeLimitAligned could be greater than blockSizeLimit + // + if (blockSizeLimit < blockSizeToTryAligned) { + blockSizeToTry = blockSizeLimit; + } else { + blockSizeToTry = blockSizeToTryAligned; + } + + dynamicSMemSize = blockSizeToDynamicSMemSize(blockSizeToTry); + + status = cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags( + &occupancyInBlocks, + func, + blockSizeToTry, + dynamicSMemSize, + flags); + + if (status != cudaSuccess) { + return status; + } + + occupancyInThreads = blockSizeToTry * occupancyInBlocks; + + if (occupancyInThreads > maxOccupancy) { + maxBlockSize = blockSizeToTry; + numBlocks = occupancyInBlocks; + maxOccupancy = occupancyInThreads; + } + + // Early out if we have reached the maximum + // + if (occupancyLimit == maxOccupancy) { + break; + } + } + + /////////////////////////// + // Return best available + /////////////////////////// + + // Suggested min grid size to achieve a full machine launch + // + *minGridSize = numBlocks * multiProcessorCount; + *blockSize = maxBlockSize; + + return status; +} + +/** + * \brief Returns grid and block size that achieves maximum potential occupancy for a device function + * + * Returns in \p *minGridSize and \p *blocksize a suggested grid / + * block size pair that achieves the best potential occupancy + * (i.e. the maximum number of active warps with the smallest number + * of blocks). + * + * \param minGridSize - Returned minimum grid size needed to achieve the best potential occupancy + * \param blockSize - Returned block size + * \param func - Device function symbol + * \param blockSizeToDynamicSMemSize - A unary function / functor that takes block size, and returns the size, in bytes, of dynamic shared memory needed for a block + * \param blockSizeLimit - The maximum block size \p func is designed to work with. 0 means no limit. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags + * \sa ::cudaOccupancyMaxPotentialBlockSize + * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags + * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock + */ + +template +static __inline__ __host__ CUDART_DEVICE cudaError_t cudaOccupancyMaxPotentialBlockSizeVariableSMem( + int *minGridSize, + int *blockSize, + T func, + UnaryFunction blockSizeToDynamicSMemSize, + int blockSizeLimit = 0) +{ + return cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(minGridSize, blockSize, func, blockSizeToDynamicSMemSize, blockSizeLimit, cudaOccupancyDefault); +} + +/** + * \brief Returns grid and block size that achieves maximum potential occupancy for a device function + * + * Returns in \p *minGridSize and \p *blocksize a suggested grid / + * block size pair that achieves the best potential occupancy + * (i.e. the maximum number of active warps with the smallest number + * of blocks). + * + * Use \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem if the + * amount of per-block dynamic shared memory changes with different + * block sizes. + * + * \param minGridSize - Returned minimum grid size needed to achieve the best potential occupancy + * \param blockSize - Returned block size + * \param func - Device function symbol + * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes + * \param blockSizeLimit - The maximum block size \p func is designed to work with. 0 means no limit. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags + * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock + */ +template +static __inline__ __host__ CUDART_DEVICE cudaError_t cudaOccupancyMaxPotentialBlockSize( + int *minGridSize, + int *blockSize, + T func, + size_t dynamicSMemSize = 0, + int blockSizeLimit = 0) +{ + return cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(minGridSize, blockSize, func, __cudaOccupancyB2DHelper(dynamicSMemSize), blockSizeLimit, cudaOccupancyDefault); +} + +/** + * \brief Returns dynamic shared memory available per block when launching \p numBlocks blocks on SM. + * + * Returns in \p *dynamicSmemSize the maximum size of dynamic shared memory to allow \p numBlocks blocks per SM. + * + * \param dynamicSmemSize - Returned maximum dynamic shared memory + * \param func - Kernel function for which occupancy is calculated + * \param numBlocks - Number of blocks to fit on SM + * \param blockSize - Size of the block + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxPotentialBlockSize + * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags + */ +template +static __inline__ __host__ cudaError_t cudaOccupancyAvailableDynamicSMemPerBlock( + size_t *dynamicSmemSize, + T func, + int numBlocks, + int blockSize) +{ + return ::cudaOccupancyAvailableDynamicSMemPerBlock(dynamicSmemSize, (const void*)func, numBlocks, blockSize); +} + +/** + * \brief Returns grid and block size that achived maximum potential occupancy for a device function with the specified flags + * + * Returns in \p *minGridSize and \p *blocksize a suggested grid / + * block size pair that achieves the best potential occupancy + * (i.e. the maximum number of active warps with the smallest number + * of blocks). + * + * The \p flags parameter controls how special cases are handle. Valid flags include: + * + * - ::cudaOccupancyDefault: keeps the default behavior as + * ::cudaOccupancyMaxPotentialBlockSize + * + * - ::cudaOccupancyDisableCachingOverride: This flag suppresses the default behavior + * on platform where global caching affects occupancy. On such platforms, if caching + * is enabled, but per-block SM resource usage would result in zero occupancy, the + * occupancy calculator will calculate the occupancy as if caching is disabled. + * Setting this flag makes the occupancy calculator to return 0 in such cases. + * More information can be found about this feature in the "Unified L1/Texture Cache" + * section of the Maxwell tuning guide. + * + * Use \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem if the + * amount of per-block dynamic shared memory changes with different + * block sizes. + * + * \param minGridSize - Returned minimum grid size needed to achieve the best potential occupancy + * \param blockSize - Returned block size + * \param func - Device function symbol + * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes + * \param blockSizeLimit - The maximum block size \p func is designed to work with. 0 means no limit. + * \param flags - Requested behavior for the occupancy calculator + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxPotentialBlockSize + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags + * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock + */ +template +static __inline__ __host__ CUDART_DEVICE cudaError_t cudaOccupancyMaxPotentialBlockSizeWithFlags( + int *minGridSize, + int *blockSize, + T func, + size_t dynamicSMemSize = 0, + int blockSizeLimit = 0, + unsigned int flags = 0) +{ + return cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(minGridSize, blockSize, func, __cudaOccupancyB2DHelper(dynamicSMemSize), blockSizeLimit, flags); +} + +/** + * \brief Given the kernel function (\p func) and launch configuration + * (\p config), return the maximum cluster size in \p *clusterSize. + * + * The cluster dimensions in \p config are ignored. If func has a required + * cluster size set (see ::cudaFuncGetAttributes),\p *clusterSize will reflect + * the required cluster size. + * + * By default this function will always return a value that's portable on + * future hardware. A higher value may be returned if the kernel function + * allows non-portable cluster sizes. + * + * This function will respect the compile time launch bounds. + * + * \param clusterSize - Returned maximum cluster size that can be launched + * for the given kernel function and launch configuration + * \param func - Kernel function for which maximum cluster + * size is calculated + * \param config - Launch configuration for the given kernel function + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaFuncGetAttributes + */ +template +static __inline__ __host__ cudaError_t cudaOccupancyMaxPotentialClusterSize( + int *clusterSize, + T *func, + const cudaLaunchConfig_t *config) +{ + return ::cudaOccupancyMaxPotentialClusterSize(clusterSize, (const void*)func, config); +} + +/** + * \brief Given the kernel function (\p func) and launch configuration + * (\p config), return the maximum number of clusters that could co-exist + * on the target device in \p *numClusters. + * + * If the function has required cluster size already set (see + * ::cudaFuncGetAttributes), the cluster size from config must either be + * unspecified or match the required size. + * Without required sizes, the cluster size must be specified in config, + * else the function will return an error. + * + * Note that various attributes of the kernel function may affect occupancy + * calculation. Runtime environment may affect how the hardware schedules + * the clusters, so the calculated occupancy is not guaranteed to be achievable. + * + * \param numClusters - Returned maximum number of clusters that + * could co-exist on the target device + * \param func - Kernel function for which maximum number + * of clusters are calculated + * \param config - Launch configuration for the given kernel function + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidClusterSize, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaFuncGetAttributes + */ +template +static __inline__ __host__ cudaError_t cudaOccupancyMaxActiveClusters( + int *numClusters, + T *func, + const cudaLaunchConfig_t *config) +{ + return ::cudaOccupancyMaxActiveClusters(numClusters, (const void*)func, config); +} + +#if defined __CUDACC__ + +/** + * \brief \hl Find out attributes for a given function + * + * This function obtains the attributes of a function specified via \p entry. + * The parameter \p entry must be a pointer to a function that executes + * on the device. The parameter specified by \p entry must be declared as a \p __global__ + * function. The fetched attributes are placed in \p attr. If the specified + * function does not exist, then ::cudaErrorInvalidDeviceFunction is returned. + * + * Note that some function attributes such as + * \ref ::cudaFuncAttributes::maxThreadsPerBlock "maxThreadsPerBlock" + * may vary based on the device that is currently being used. + * + * \param attr - Return pointer to function's attributes + * \param entry - Function to get attributes of + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction + * \notefnerr + * \note_init_rt + * \note_callback + * + * \ref ::cudaLaunchKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C++ API)", + * \ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C++ API)", + * \ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, const void*) "cudaFuncGetAttributes (C API)", + * ::cudaSetDoubleForDevice, + * ::cudaSetDoubleForHost + */ +template +static __inline__ __host__ cudaError_t cudaFuncGetAttributes( + struct cudaFuncAttributes *attr, + T *entry +) +{ + return ::cudaFuncGetAttributes(attr, (const void*)entry); +} + +/** + * \brief \hl Set attributes for a given function + * + * This function sets the attributes of a function specified via \p entry. + * The parameter \p entry must be a pointer to a function that executes + * on the device. The parameter specified by \p entry must be declared as a \p __global__ + * function. The enumeration defined by \p attr is set to the value defined by \p value. + * If the specified function does not exist, then ::cudaErrorInvalidDeviceFunction is returned. + * If the specified attribute cannot be written, or if the value is incorrect, + * then ::cudaErrorInvalidValue is returned. + * + * Valid values for \p attr are: + * - ::cudaFuncAttributeMaxDynamicSharedMemorySize - The requested maximum size in bytes of dynamically-allocated shared memory. The sum of this value and the function attribute ::sharedSizeBytes + * cannot exceed the device attribute ::cudaDevAttrMaxSharedMemoryPerBlockOptin. The maximal size of requestable dynamic shared memory may differ by GPU architecture. + * - ::cudaFuncAttributePreferredSharedMemoryCarveout - On devices where the L1 cache and shared memory use the same hardware resources, + * this sets the shared memory carveout preference, in percent of the total shared memory. See ::cudaDevAttrMaxSharedMemoryPerMultiprocessor. + * This is only a hint, and the driver can choose a different ratio if required to execute the function. + * - ::cudaFuncAttributeRequiredClusterWidth: The required cluster width in + * blocks. The width, height, and depth values must either all be 0 or all be + * positive. The validity of the cluster dimensions is checked at launch time. + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime will return cudaErrorNotPermitted. + * - ::cudaFuncAttributeRequiredClusterHeight: The required cluster height in + * blocks. The width, height, and depth values must either all be 0 or all be + * positive. The validity of the cluster dimensions is checked at launch time. + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime will return cudaErrorNotPermitted. + * - ::cudaFuncAttributeRequiredClusterDepth: The required cluster depth in + * blocks. The width, height, and depth values must either all be 0 or all be + * positive. The validity of the cluster dimensions is checked at launch time. + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime will return cudaErrorNotPermitted. + * - ::cudaFuncAttributeClusterSchedulingPolicyPreference: The block + * scheduling policy of a function. The value type is cudaClusterSchedulingPolicy. + * + * \param entry - Function to get attributes of + * \param attr - Attribute to set + * \param value - Value to set + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \ref ::cudaLaunchKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C++ API)", + * \ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C++ API)", + * \ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, const void*) "cudaFuncGetAttributes (C API)", + * ::cudaSetDoubleForDevice, + * ::cudaSetDoubleForHost + */ +template +static __inline__ __host__ cudaError_t cudaFuncSetAttribute( + T *entry, + enum cudaFuncAttribute attr, + int value +) +{ + return ::cudaFuncSetAttribute((const void*)entry, attr, value); +} + +/** + * \brief Get pointer to device kernel that matches entry function \p entryFuncAddr + * + * Returns in \p kernelPtr the device kernel corresponding to the entry function \p entryFuncAddr. + * + * \param kernelPtr - Returns the device kernel + * \param entryFuncAddr - Address of device entry function to search kernel for + * + * \return + * ::cudaSuccess + * + * \sa + * \ref ::cudaGetKernel(cudaKernel_t *kernelPtr, const void *entryFuncAddr) "cudaGetKernel (C API)" + */ +template +static __inline__ __host__ cudaError_t cudaGetKernel( + cudaKernel_t *kernelPtr, + const T *entryFuncAddr +) +{ + return ::cudaGetKernel(kernelPtr, (const void *)entryFuncAddr); +} + +#endif /* __CUDACC__ */ + +/** @} */ /* END CUDART_HIGHLEVEL */ + +#endif /* __cplusplus && !__CUDACC_RTC__ */ + +#if !defined(__CUDACC_RTC__) +#if defined(__GNUC__) +#if defined(__clang__) || (!defined(__PGIC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) +#pragma GCC diagnostic pop +#endif +#elif defined(_MSC_VER) +#pragma warning(pop) +#endif +#endif + +#undef __CUDA_DEPRECATED + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_RUNTIME_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_RUNTIME_H__ +#endif + +#endif /* !__CUDA_RUNTIME_H__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime_api.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime_api.h new file mode 100644 index 0000000000000000000000000000000000000000..1aabe6987f23b4fd31b891fcfddc43e3aa6980d4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime_api.h @@ -0,0 +1,13140 @@ +/* + * Copyright 1993-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + + + +#if !defined(__CUDA_RUNTIME_API_H__) +#define __CUDA_RUNTIME_API_H__ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_RUNTIME_API_H__ +#endif + +/** + * \latexonly + * \page sync_async API synchronization behavior + * + * \section memcpy_sync_async_behavior Memcpy + * The API provides memcpy/memset functions in both synchronous and asynchronous forms, + * the latter having an \e "Async" suffix. This is a misnomer as each function + * may exhibit synchronous or asynchronous behavior depending on the arguments + * passed to the function. In the reference documentation, each memcpy function is + * categorized as \e synchronous or \e asynchronous, corresponding to the definitions + * below. + * + * \subsection MemcpySynchronousBehavior Synchronous + * + *
    + *
  1. For transfers from pageable host memory to device memory, a stream sync is performed + * before the copy is initiated. The function will return once the pageable + * buffer has been copied to the staging memory for DMA transfer to device memory, + * but the DMA to final destination may not have completed. + * + *
  2. For transfers from pinned host memory to device memory, the function is synchronous + * with respect to the host. + * + *
  3. For transfers from device to either pageable or pinned host memory, the function returns + * only once the copy has completed. + * + *
  4. For transfers from device memory to device memory, no host-side synchronization is + * performed. + * + *
  5. For transfers from any host memory to any host memory, the function is fully + * synchronous with respect to the host. + *
+ * + * \subsection MemcpyAsynchronousBehavior Asynchronous + * + *
    + *
  1. For transfers from device memory to pageable host memory, the function + * will return only once the copy has completed. + * + *
  2. For transfers from any host memory to any host memory, the function is fully + * synchronous with respect to the host. + * + *
  3. If pageable memory must first be staged to pinned memory, the driver may + * synchronize with the stream and stage the copy into pinned memory. + * + *
  4. For all other transfers, the function should be fully asynchronous. + *
+ * + * \section memset_sync_async_behavior Memset + * The cudaMemset functions are asynchronous with respect to the host + * except when the target memory is pinned host memory. The \e Async + * versions are always asynchronous with respect to the host. + * + * \section kernel_launch_details Kernel Launches + * Kernel launches are asynchronous with respect to the host. Details of + * concurrent kernel execution and data transfers can be found in the CUDA + * Programmers Guide. + * + * \endlatexonly + */ + +/** + * There are two levels for the runtime API. + * + * The C API (cuda_runtime_api.h) is + * a C-style interface that does not require compiling with \p nvcc. + * + * The \ref CUDART_HIGHLEVEL "C++ API" (cuda_runtime.h) is a + * C++-style interface built on top of the C API. It wraps some of the + * C API routines, using overloading, references and default arguments. + * These wrappers can be used from C++ code and can be compiled with any C++ + * compiler. The C++ API also has some CUDA-specific wrappers that wrap + * C API routines that deal with symbols, textures, and device functions. + * These wrappers require the use of \p nvcc because they depend on code being + * generated by the compiler. For example, the execution configuration syntax + * to invoke kernels is only available in source code compiled with \p nvcc. + */ + +/** CUDA Runtime API Version */ +#define CUDART_VERSION 12010 + +#if defined(__CUDA_API_VER_MAJOR__) && defined(__CUDA_API_VER_MINOR__) +# define __CUDART_API_VERSION ((__CUDA_API_VER_MAJOR__ * 1000) + (__CUDA_API_VER_MINOR__ * 10)) +#else +# define __CUDART_API_VERSION CUDART_VERSION +#endif + +#ifndef __DOXYGEN_ONLY__ +#include "crt/host_defines.h" +#endif +#include "builtin_types.h" + +#include "cuda_device_runtime_api.h" + +#if defined(CUDA_API_PER_THREAD_DEFAULT_STREAM) || defined(__CUDA_API_VERSION_INTERNAL) + #define __CUDART_API_PER_THREAD_DEFAULT_STREAM + #define __CUDART_API_PTDS(api) api ## _ptds + #define __CUDART_API_PTSZ(api) api ## _ptsz +#else + #define __CUDART_API_PTDS(api) api + #define __CUDART_API_PTSZ(api) api +#endif + +#define cudaSignalExternalSemaphoresAsync __CUDART_API_PTSZ(cudaSignalExternalSemaphoresAsync_v2) +#define cudaWaitExternalSemaphoresAsync __CUDART_API_PTSZ(cudaWaitExternalSemaphoresAsync_v2) + + #define cudaStreamGetCaptureInfo __CUDART_API_PTSZ(cudaStreamGetCaptureInfo_v2) + +#define cudaGetDeviceProperties cudaGetDeviceProperties_v2 + +#if defined(__CUDART_API_PER_THREAD_DEFAULT_STREAM) + #define cudaMemcpy __CUDART_API_PTDS(cudaMemcpy) + #define cudaMemcpyToSymbol __CUDART_API_PTDS(cudaMemcpyToSymbol) + #define cudaMemcpyFromSymbol __CUDART_API_PTDS(cudaMemcpyFromSymbol) + #define cudaMemcpy2D __CUDART_API_PTDS(cudaMemcpy2D) + #define cudaMemcpyToArray __CUDART_API_PTDS(cudaMemcpyToArray) + #define cudaMemcpy2DToArray __CUDART_API_PTDS(cudaMemcpy2DToArray) + #define cudaMemcpyFromArray __CUDART_API_PTDS(cudaMemcpyFromArray) + #define cudaMemcpy2DFromArray __CUDART_API_PTDS(cudaMemcpy2DFromArray) + #define cudaMemcpyArrayToArray __CUDART_API_PTDS(cudaMemcpyArrayToArray) + #define cudaMemcpy2DArrayToArray __CUDART_API_PTDS(cudaMemcpy2DArrayToArray) + #define cudaMemcpy3D __CUDART_API_PTDS(cudaMemcpy3D) + #define cudaMemcpy3DPeer __CUDART_API_PTDS(cudaMemcpy3DPeer) + #define cudaMemset __CUDART_API_PTDS(cudaMemset) + #define cudaMemset2D __CUDART_API_PTDS(cudaMemset2D) + #define cudaMemset3D __CUDART_API_PTDS(cudaMemset3D) + #define cudaGraphInstantiateWithParams __CUDART_API_PTSZ(cudaGraphInstantiateWithParams) + #define cudaGraphUpload __CUDART_API_PTSZ(cudaGraphUpload) + #define cudaGraphLaunch __CUDART_API_PTSZ(cudaGraphLaunch) + #define cudaStreamBeginCapture __CUDART_API_PTSZ(cudaStreamBeginCapture) + #define cudaStreamEndCapture __CUDART_API_PTSZ(cudaStreamEndCapture) + #define cudaStreamIsCapturing __CUDART_API_PTSZ(cudaStreamIsCapturing) + #define cudaMemcpyAsync __CUDART_API_PTSZ(cudaMemcpyAsync) + #define cudaMemcpyToSymbolAsync __CUDART_API_PTSZ(cudaMemcpyToSymbolAsync) + #define cudaMemcpyFromSymbolAsync __CUDART_API_PTSZ(cudaMemcpyFromSymbolAsync) + #define cudaMemcpy2DAsync __CUDART_API_PTSZ(cudaMemcpy2DAsync) + #define cudaMemcpyToArrayAsync __CUDART_API_PTSZ(cudaMemcpyToArrayAsync) + #define cudaMemcpy2DToArrayAsync __CUDART_API_PTSZ(cudaMemcpy2DToArrayAsync) + #define cudaMemcpyFromArrayAsync __CUDART_API_PTSZ(cudaMemcpyFromArrayAsync) + #define cudaMemcpy2DFromArrayAsync __CUDART_API_PTSZ(cudaMemcpy2DFromArrayAsync) + #define cudaMemcpy3DAsync __CUDART_API_PTSZ(cudaMemcpy3DAsync) + #define cudaMemcpy3DPeerAsync __CUDART_API_PTSZ(cudaMemcpy3DPeerAsync) + #define cudaMemsetAsync __CUDART_API_PTSZ(cudaMemsetAsync) + #define cudaMemset2DAsync __CUDART_API_PTSZ(cudaMemset2DAsync) + #define cudaMemset3DAsync __CUDART_API_PTSZ(cudaMemset3DAsync) + #define cudaStreamQuery __CUDART_API_PTSZ(cudaStreamQuery) + #define cudaStreamGetFlags __CUDART_API_PTSZ(cudaStreamGetFlags) + #define cudaStreamGetId __CUDART_API_PTSZ(cudaStreamGetId) + #define cudaStreamGetPriority __CUDART_API_PTSZ(cudaStreamGetPriority) + #define cudaEventRecord __CUDART_API_PTSZ(cudaEventRecord) + #define cudaEventRecordWithFlags __CUDART_API_PTSZ(cudaEventRecordWithFlags) + #define cudaStreamWaitEvent __CUDART_API_PTSZ(cudaStreamWaitEvent) + #define cudaStreamAddCallback __CUDART_API_PTSZ(cudaStreamAddCallback) + #define cudaStreamAttachMemAsync __CUDART_API_PTSZ(cudaStreamAttachMemAsync) + #define cudaStreamSynchronize __CUDART_API_PTSZ(cudaStreamSynchronize) + #define cudaLaunchKernel __CUDART_API_PTSZ(cudaLaunchKernel) + #define cudaLaunchKernelExC __CUDART_API_PTSZ(cudaLaunchKernelExC) + #define cudaLaunchHostFunc __CUDART_API_PTSZ(cudaLaunchHostFunc) + #define cudaMemPrefetchAsync __CUDART_API_PTSZ(cudaMemPrefetchAsync) + #define cudaLaunchCooperativeKernel __CUDART_API_PTSZ(cudaLaunchCooperativeKernel) + #define cudaStreamCopyAttributes __CUDART_API_PTSZ(cudaStreamCopyAttributes) + #define cudaStreamGetAttribute __CUDART_API_PTSZ(cudaStreamGetAttribute) + #define cudaStreamSetAttribute __CUDART_API_PTSZ(cudaStreamSetAttribute) + #define cudaMallocAsync __CUDART_API_PTSZ(cudaMallocAsync) + #define cudaFreeAsync __CUDART_API_PTSZ(cudaFreeAsync) + #define cudaMallocFromPoolAsync __CUDART_API_PTSZ(cudaMallocFromPoolAsync) + #define cudaGetDriverEntryPoint __CUDART_API_PTSZ(cudaGetDriverEntryPoint) +#endif + +/** \cond impl_private */ +#if !defined(__dv) + +#if defined(__cplusplus) + +#define __dv(v) \ + = v + +#else /* __cplusplus */ + +#define __dv(v) + +#endif /* __cplusplus */ + +#endif /* !__dv */ +/** \endcond impl_private */ + +#if (defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 350)) /** Visible to SM>=3.5 and "__host__ __device__" only **/ + +#define CUDART_DEVICE __device__ + +#else + +#define CUDART_DEVICE + +#endif /** CUDART_DEVICE */ + +#if !defined(__CUDACC_RTC__) +#define EXCLUDE_FROM_RTC + +/** \cond impl_private */ +#if defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED) +#define __CUDA_DEPRECATED +#elif defined(_MSC_VER) +#define __CUDA_DEPRECATED __declspec(deprecated) +#elif defined(__GNUC__) +#define __CUDA_DEPRECATED __attribute__((deprecated)) +#else +#define __CUDA_DEPRECATED +#endif +/** \endcond impl_private */ + +#if defined(__cplusplus) +extern "C" { +#endif /* __cplusplus */ + +/** + * \defgroup CUDART_DEVICE Device Management + * + * ___MANBRIEF___ device management functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the device management functions of the CUDA runtime + * application programming interface. + * + * @{ + */ + +/** + * \brief Destroy all allocations and reset all state on the current device + * in the current process. + * + * Explicitly destroys and cleans up all resources associated with the current + * device in the current process. It is the caller's responsibility to ensure + * that the resources are not accessed or passed in subsequent API calls and + * doing so will result in undefined behavior. These resources include CUDA types + * such as ::cudaStream_t, ::cudaEvent_t, ::cudaArray_t, ::cudaMipmappedArray_t, + * ::cudaTextureObject_t, ::cudaSurfaceObject_t, ::textureReference, ::surfaceReference, + * ::cudaExternalMemory_t, ::cudaExternalSemaphore_t and ::cudaGraphicsResource_t. + * Any subsequent API call to this device will reinitialize the device. + * + * Note that this function will reset the device immediately. It is the caller's + * responsibility to ensure that the device is not being accessed by any + * other host threads from the process when this function is called. + * + * \return + * ::cudaSuccess + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceSynchronize + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceReset(void); + +/** + * \brief Wait for compute device to finish + * + * Blocks until the device has completed all preceding requested tasks. + * ::cudaDeviceSynchronize() returns an error if one of the preceding tasks + * has failed. If the ::cudaDeviceScheduleBlockingSync flag was set for + * this device, the host thread will block until the device has finished + * its work. + * + * \return + * ::cudaSuccess + * \note_device_sync_deprecated + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaDeviceReset, + * ::cuCtxSynchronize + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceSynchronize(void); + +/** + * \brief Set resource limits + * + * Setting \p limit to \p value is a request by the application to update + * the current limit maintained by the device. The driver is free to + * modify the requested value to meet h/w requirements (this could be + * clamping to minimum or maximum values, rounding up to nearest element + * size, etc). The application can use ::cudaDeviceGetLimit() to find out + * exactly what the limit has been set to. + * + * Setting each ::cudaLimit has its own specific restrictions, so each is + * discussed here. + * + * - ::cudaLimitStackSize controls the stack size in bytes of each GPU thread. + * + * - ::cudaLimitPrintfFifoSize controls the size in bytes of the shared FIFO + * used by the ::printf() device system call. Setting + * ::cudaLimitPrintfFifoSize must not be performed after launching any kernel + * that uses the ::printf() device system call - in such case + * ::cudaErrorInvalidValue will be returned. + * + * - ::cudaLimitMallocHeapSize controls the size in bytes of the heap used by + * the ::malloc() and ::free() device system calls. Setting + * ::cudaLimitMallocHeapSize must not be performed after launching any kernel + * that uses the ::malloc() or ::free() device system calls - in such case + * ::cudaErrorInvalidValue will be returned. + * + * - ::cudaLimitDevRuntimeSyncDepth controls the maximum nesting depth of a + * grid at which a thread can safely call ::cudaDeviceSynchronize(). Setting + * this limit must be performed before any launch of a kernel that uses the + * device runtime and calls ::cudaDeviceSynchronize() above the default sync + * depth, two levels of grids. Calls to ::cudaDeviceSynchronize() will fail + * with error code ::cudaErrorSyncDepthExceeded if the limitation is + * violated. This limit can be set smaller than the default or up the maximum + * launch depth of 24. When setting this limit, keep in mind that additional + * levels of sync depth require the runtime to reserve large amounts of + * device memory which can no longer be used for user allocations. If these + * reservations of device memory fail, ::cudaDeviceSetLimit will return + * ::cudaErrorMemoryAllocation, and the limit can be reset to a lower value. + * This limit is only applicable to devices of compute capability < 9.0. + * Attempting to set this limit on devices of other compute capability will + * results in error ::cudaErrorUnsupportedLimit being returned. + * + * - ::cudaLimitDevRuntimePendingLaunchCount controls the maximum number of + * outstanding device runtime launches that can be made from the current + * device. A grid is outstanding from the point of launch up until the grid + * is known to have been completed. Device runtime launches which violate + * this limitation fail and return ::cudaErrorLaunchPendingCountExceeded when + * ::cudaGetLastError() is called after launch. If more pending launches than + * the default (2048 launches) are needed for a module using the device + * runtime, this limit can be increased. Keep in mind that being able to + * sustain additional pending launches will require the runtime to reserve + * larger amounts of device memory upfront which can no longer be used for + * allocations. If these reservations fail, ::cudaDeviceSetLimit will return + * ::cudaErrorMemoryAllocation, and the limit can be reset to a lower value. + * This limit is only applicable to devices of compute capability 3.5 and + * higher. Attempting to set this limit on devices of compute capability less + * than 3.5 will result in the error ::cudaErrorUnsupportedLimit being + * returned. + * + * - ::cudaLimitMaxL2FetchGranularity controls the L2 cache fetch granularity. + * Values can range from 0B to 128B. This is purely a performance hint and + * it can be ignored or clamped depending on the platform. + * + * - ::cudaLimitPersistingL2CacheSize controls size in bytes available + * for persisting L2 cache. This is purely a performance hint and it + * can be ignored or clamped depending on the platform. + * + * \param limit - Limit to set + * \param value - Size of limit + * + * \return + * ::cudaSuccess, + * ::cudaErrorUnsupportedLimit, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaDeviceGetLimit, + * ::cuCtxSetLimit + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceSetLimit(enum cudaLimit limit, size_t value); + +/** + * \brief Return resource limits + * + * Returns in \p *pValue the current size of \p limit. The following ::cudaLimit values are supported. + * - ::cudaLimitStackSize is the stack size in bytes of each GPU thread. + * - ::cudaLimitPrintfFifoSize is the size in bytes of the shared FIFO used by the + * ::printf() device system call. + * - ::cudaLimitMallocHeapSize is the size in bytes of the heap used by the + * ::malloc() and ::free() device system calls. + * - ::cudaLimitDevRuntimeSyncDepth is the maximum grid depth at which a + * thread can isssue the device runtime call ::cudaDeviceSynchronize() + * to wait on child grid launches to complete. This functionality is removed + * for devices of compute capability >= 9.0, and hence will return error + * ::cudaErrorUnsupportedLimit on such devices. + * - ::cudaLimitDevRuntimePendingLaunchCount is the maximum number of outstanding + * device runtime launches. + * - ::cudaLimitMaxL2FetchGranularity is the L2 cache fetch granularity. + * - ::cudaLimitPersistingL2CacheSize is the persisting L2 cache size in bytes. + * + * \param limit - Limit to query + * \param pValue - Returned size of the limit + * + * \return + * ::cudaSuccess, + * ::cudaErrorUnsupportedLimit, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaDeviceSetLimit, + * ::cuCtxGetLimit + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetLimit(size_t *pValue, enum cudaLimit limit); + +/** + * \brief Returns the maximum number of elements allocatable in a 1D linear texture for a given element size. + * + * Returns in \p maxWidthInElements the maximum number of elements allocatable in a 1D linear texture + * for given format descriptor \p fmtDesc. + * + * \param maxWidthInElements - Returns maximum number of texture elements allocatable for given \p fmtDesc. + * \param fmtDesc - Texture format description. + * + * \return + * ::cudaSuccess, + * ::cudaErrorUnsupportedLimit, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cuDeviceGetTexture1DLinearMaxWidth + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetTexture1DLinearMaxWidth(size_t *maxWidthInElements, const struct cudaChannelFormatDesc *fmtDesc, int device); +#endif + +/** + * \brief Returns the preferred cache configuration for the current device. + * + * On devices where the L1 cache and shared memory use the same hardware + * resources, this returns through \p pCacheConfig the preferred cache + * configuration for the current device. This is only a preference. The + * runtime will use the requested configuration if possible, but it is free to + * choose a different configuration if required to execute functions. + * + * This will return a \p pCacheConfig of ::cudaFuncCachePreferNone on devices + * where the size of the L1 cache and shared memory are fixed. + * + * The supported cache configurations are: + * - ::cudaFuncCachePreferNone: no preference for shared memory or L1 (default) + * - ::cudaFuncCachePreferShared: prefer larger shared memory and smaller L1 cache + * - ::cudaFuncCachePreferL1: prefer larger L1 cache and smaller shared memory + * - ::cudaFuncCachePreferEqual: prefer equal size L1 cache and shared memory + * + * \param pCacheConfig - Returned cache configuration + * + * \return + * ::cudaSuccess + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceSetCacheConfig, + * \ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C API)", + * \ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C++ API)", + * ::cuCtxGetCacheConfig + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetCacheConfig(enum cudaFuncCache *pCacheConfig); + +/** + * \brief Returns numerical values that correspond to the least and + * greatest stream priorities. + * + * Returns in \p *leastPriority and \p *greatestPriority the numerical values that correspond + * to the least and greatest stream priorities respectively. Stream priorities + * follow a convention where lower numbers imply greater priorities. The range of + * meaningful stream priorities is given by [\p *greatestPriority, \p *leastPriority]. + * If the user attempts to create a stream with a priority value that is + * outside the the meaningful range as specified by this API, the priority is + * automatically clamped down or up to either \p *leastPriority or \p *greatestPriority + * respectively. See ::cudaStreamCreateWithPriority for details on creating a + * priority stream. + * A NULL may be passed in for \p *leastPriority or \p *greatestPriority if the value + * is not desired. + * + * This function will return '0' in both \p *leastPriority and \p *greatestPriority if + * the current context's device does not support stream priorities + * (see ::cudaDeviceGetAttribute). + * + * \param leastPriority - Pointer to an int in which the numerical value for least + * stream priority is returned + * \param greatestPriority - Pointer to an int in which the numerical value for greatest + * stream priority is returned + * + * \return + * ::cudaSuccess + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreateWithPriority, + * ::cudaStreamGetPriority, + * ::cuCtxGetStreamPriorityRange + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetStreamPriorityRange(int *leastPriority, int *greatestPriority); + +/** + * \brief Sets the preferred cache configuration for the current device. + * + * On devices where the L1 cache and shared memory use the same hardware + * resources, this sets through \p cacheConfig the preferred cache + * configuration for the current device. This is only a preference. The + * runtime will use the requested configuration if possible, but it is free to + * choose a different configuration if required to execute the function. Any + * function preference set via + * \ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C API)" + * or + * \ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C++ API)" + * will be preferred over this device-wide setting. Setting the device-wide + * cache configuration to ::cudaFuncCachePreferNone will cause subsequent + * kernel launches to prefer to not change the cache configuration unless + * required to launch the kernel. + * + * This setting does nothing on devices where the size of the L1 cache and + * shared memory are fixed. + * + * Launching a kernel with a different preference than the most recent + * preference setting may insert a device-side synchronization point. + * + * The supported cache configurations are: + * - ::cudaFuncCachePreferNone: no preference for shared memory or L1 (default) + * - ::cudaFuncCachePreferShared: prefer larger shared memory and smaller L1 cache + * - ::cudaFuncCachePreferL1: prefer larger L1 cache and smaller shared memory + * - ::cudaFuncCachePreferEqual: prefer equal size L1 cache and shared memory + * + * \param cacheConfig - Requested cache configuration + * + * \return + * ::cudaSuccess + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceGetCacheConfig, + * \ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C API)", + * \ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C++ API)", + * ::cuCtxSetCacheConfig + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceSetCacheConfig(enum cudaFuncCache cacheConfig); + +/** + * \brief Returns the shared memory configuration for the current device. + * + * This function will return in \p pConfig the current size of shared memory banks + * on the current device. On devices with configurable shared memory banks, + * ::cudaDeviceSetSharedMemConfig can be used to change this setting, so that all + * subsequent kernel launches will by default use the new bank size. When + * ::cudaDeviceGetSharedMemConfig is called on devices without configurable shared + * memory, it will return the fixed bank size of the hardware. + * + * The returned bank configurations can be either: + * - ::cudaSharedMemBankSizeFourByte - shared memory bank width is four bytes. + * - ::cudaSharedMemBankSizeEightByte - shared memory bank width is eight bytes. + * + * \param pConfig - Returned cache configuration + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceSetCacheConfig, + * ::cudaDeviceGetCacheConfig, + * ::cudaDeviceSetSharedMemConfig, + * ::cudaFuncSetCacheConfig, + * ::cuCtxGetSharedMemConfig + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetSharedMemConfig(enum cudaSharedMemConfig *pConfig); + +/** + * \brief Sets the shared memory configuration for the current device. + * + * On devices with configurable shared memory banks, this function will set + * the shared memory bank size which is used for all subsequent kernel launches. + * Any per-function setting of shared memory set via ::cudaFuncSetSharedMemConfig + * will override the device wide setting. + * + * Changing the shared memory configuration between launches may introduce + * a device side synchronization point. + * + * Changing the shared memory bank size will not increase shared memory usage + * or affect occupancy of kernels, but may have major effects on performance. + * Larger bank sizes will allow for greater potential bandwidth to shared memory, + * but will change what kinds of accesses to shared memory will result in bank + * conflicts. + * + * This function will do nothing on devices with fixed shared memory bank size. + * + * The supported bank configurations are: + * - ::cudaSharedMemBankSizeDefault: set bank width the device default (currently, + * four bytes) + * - ::cudaSharedMemBankSizeFourByte: set shared memory bank width to be four bytes + * natively. + * - ::cudaSharedMemBankSizeEightByte: set shared memory bank width to be eight + * bytes natively. + * + * \param config - Requested cache configuration + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceSetCacheConfig, + * ::cudaDeviceGetCacheConfig, + * ::cudaDeviceGetSharedMemConfig, + * ::cudaFuncSetCacheConfig, + * ::cuCtxSetSharedMemConfig + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceSetSharedMemConfig(enum cudaSharedMemConfig config); + +/** + * \brief Returns a handle to a compute device + * + * Returns in \p *device a device ordinal given a PCI bus ID string. + * + * \param device - Returned device ordinal + * + * \param pciBusId - String in one of the following forms: + * [domain]:[bus]:[device].[function] + * [domain]:[bus]:[device] + * [bus]:[device].[function] + * where \p domain, \p bus, \p device, and \p function are all hexadecimal values + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidDevice + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaDeviceGetPCIBusId, + * ::cuDeviceGetByPCIBusId + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceGetByPCIBusId(int *device, const char *pciBusId); + +/** + * \brief Returns a PCI Bus Id string for the device + * + * Returns an ASCII string identifying the device \p dev in the NULL-terminated + * string pointed to by \p pciBusId. \p len specifies the maximum length of the + * string that may be returned. + * + * \param pciBusId - Returned identifier string for the device in the following format + * [domain]:[bus]:[device].[function] + * where \p domain, \p bus, \p device, and \p function are all hexadecimal values. + * pciBusId should be large enough to store 13 characters including the NULL-terminator. + * + * \param len - Maximum length of string to store in \p name + * + * \param device - Device to get identifier string for + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidDevice + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaDeviceGetByPCIBusId, + * ::cuDeviceGetPCIBusId + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceGetPCIBusId(char *pciBusId, int len, int device); + +/** + * \brief Gets an interprocess handle for a previously allocated event + * + * Takes as input a previously allocated event. This event must have been + * created with the ::cudaEventInterprocess and ::cudaEventDisableTiming + * flags set. This opaque handle may be copied into other processes and + * opened with ::cudaIpcOpenEventHandle to allow efficient hardware + * synchronization between GPU work in different processes. + * + * After the event has been been opened in the importing process, + * ::cudaEventRecord, ::cudaEventSynchronize, ::cudaStreamWaitEvent and + * ::cudaEventQuery may be used in either process. Performing operations + * on the imported event after the exported event has been freed + * with ::cudaEventDestroy will result in undefined behavior. + * + * IPC functionality is restricted to devices with support for unified + * addressing on Linux and Windows operating systems. + * IPC functionality on Windows is restricted to GPUs in TCC mode. + * Users can test their device for IPC functionality by calling + * ::cudaDeviceGetAttribute with ::cudaDevAttrIpcEventSupport + * + * \param handle - Pointer to a user allocated cudaIpcEventHandle + * in which to return the opaque event handle + * \param event - Event allocated with ::cudaEventInterprocess and + * ::cudaEventDisableTiming flags. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorMemoryAllocation, + * ::cudaErrorMapBufferObjectFailed, + * ::cudaErrorNotSupported, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaEventCreate, + * ::cudaEventDestroy, + * ::cudaEventSynchronize, + * ::cudaEventQuery, + * ::cudaStreamWaitEvent, + * ::cudaIpcOpenEventHandle, + * ::cudaIpcGetMemHandle, + * ::cudaIpcOpenMemHandle, + * ::cudaIpcCloseMemHandle, + * ::cuIpcGetEventHandle + */ +extern __host__ cudaError_t CUDARTAPI cudaIpcGetEventHandle(cudaIpcEventHandle_t *handle, cudaEvent_t event); + +/** + * \brief Opens an interprocess event handle for use in the current process + * + * Opens an interprocess event handle exported from another process with + * ::cudaIpcGetEventHandle. This function returns a ::cudaEvent_t that behaves like + * a locally created event with the ::cudaEventDisableTiming flag specified. + * This event must be freed with ::cudaEventDestroy. + * + * Performing operations on the imported event after the exported event has + * been freed with ::cudaEventDestroy will result in undefined behavior. + * + * IPC functionality is restricted to devices with support for unified + * addressing on Linux and Windows operating systems. + * IPC functionality on Windows is restricted to GPUs in TCC mode. + * Users can test their device for IPC functionality by calling + * ::cudaDeviceGetAttribute with ::cudaDevAttrIpcEventSupport + * + * \param event - Returns the imported event + * \param handle - Interprocess handle to open + * + * \returns + * ::cudaSuccess, + * ::cudaErrorMapBufferObjectFailed, + * ::cudaErrorNotSupported, + * ::cudaErrorInvalidValue, + * ::cudaErrorDeviceUninitialized + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaEventCreate, + * ::cudaEventDestroy, + * ::cudaEventSynchronize, + * ::cudaEventQuery, + * ::cudaStreamWaitEvent, + * ::cudaIpcGetEventHandle, + * ::cudaIpcGetMemHandle, + * ::cudaIpcOpenMemHandle, + * ::cudaIpcCloseMemHandle, + * ::cuIpcOpenEventHandle + */ +extern __host__ cudaError_t CUDARTAPI cudaIpcOpenEventHandle(cudaEvent_t *event, cudaIpcEventHandle_t handle); + +/** + * \brief Gets an interprocess memory handle for an existing device memory + * allocation + * + * Takes a pointer to the base of an existing device memory allocation created + * with ::cudaMalloc and exports it for use in another process. This is a + * lightweight operation and may be called multiple times on an allocation + * without adverse effects. + * + * If a region of memory is freed with ::cudaFree and a subsequent call + * to ::cudaMalloc returns memory with the same device address, + * ::cudaIpcGetMemHandle will return a unique handle for the + * new memory. + * + * IPC functionality is restricted to devices with support for unified + * addressing on Linux and Windows operating systems. + * IPC functionality on Windows is restricted to GPUs in TCC mode. + * Users can test their device for IPC functionality by calling + * ::cudaDeviceGetAttribute with ::cudaDevAttrIpcEventSupport + * + * \param handle - Pointer to user allocated ::cudaIpcMemHandle to return + * the handle in. + * \param devPtr - Base pointer to previously allocated device memory + * + * \returns + * ::cudaSuccess, + * ::cudaErrorMemoryAllocation, + * ::cudaErrorMapBufferObjectFailed, + * ::cudaErrorNotSupported, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMalloc, + * ::cudaFree, + * ::cudaIpcGetEventHandle, + * ::cudaIpcOpenEventHandle, + * ::cudaIpcOpenMemHandle, + * ::cudaIpcCloseMemHandle, + * ::cuIpcGetMemHandle + */ +extern __host__ cudaError_t CUDARTAPI cudaIpcGetMemHandle(cudaIpcMemHandle_t *handle, void *devPtr); + +/** + * \brief Opens an interprocess memory handle exported from another process + * and returns a device pointer usable in the local process. + * + * Maps memory exported from another process with ::cudaIpcGetMemHandle into + * the current device address space. For contexts on different devices + * ::cudaIpcOpenMemHandle can attempt to enable peer access between the + * devices as if the user called ::cudaDeviceEnablePeerAccess. This behavior is + * controlled by the ::cudaIpcMemLazyEnablePeerAccess flag. + * ::cudaDeviceCanAccessPeer can determine if a mapping is possible. + * + * ::cudaIpcOpenMemHandle can open handles to devices that may not be visible + * in the process calling the API. + * + * Contexts that may open ::cudaIpcMemHandles are restricted in the following way. + * ::cudaIpcMemHandles from each device in a given process may only be opened + * by one context per device per other process. + * + * If the memory handle has already been opened by the current context, the + * reference count on the handle is incremented by 1 and the existing device pointer + * is returned. + * + * Memory returned from ::cudaIpcOpenMemHandle must be freed with + * ::cudaIpcCloseMemHandle. + * + * Calling ::cudaFree on an exported memory region before calling + * ::cudaIpcCloseMemHandle in the importing context will result in undefined + * behavior. + * + * IPC functionality is restricted to devices with support for unified + * addressing on Linux and Windows operating systems. + * IPC functionality on Windows is restricted to GPUs in TCC mode. + * Users can test their device for IPC functionality by calling + * ::cudaDeviceGetAttribute with ::cudaDevAttrIpcEventSupport + * + * \param devPtr - Returned device pointer + * \param handle - ::cudaIpcMemHandle to open + * \param flags - Flags for this operation. Must be specified as ::cudaIpcMemLazyEnablePeerAccess + * + * \returns + * ::cudaSuccess, + * ::cudaErrorMapBufferObjectFailed, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorDeviceUninitialized, + * ::cudaErrorTooManyPeers, + * ::cudaErrorNotSupported, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * + * \note No guarantees are made about the address returned in \p *devPtr. + * In particular, multiple processes may not receive the same address for the same \p handle. + * + * \sa + * ::cudaMalloc, + * ::cudaFree, + * ::cudaIpcGetEventHandle, + * ::cudaIpcOpenEventHandle, + * ::cudaIpcGetMemHandle, + * ::cudaIpcCloseMemHandle, + * ::cudaDeviceEnablePeerAccess, + * ::cudaDeviceCanAccessPeer, + * ::cuIpcOpenMemHandle + */ +extern __host__ cudaError_t CUDARTAPI cudaIpcOpenMemHandle(void **devPtr, cudaIpcMemHandle_t handle, unsigned int flags); + +/** + * \brief Attempts to close memory mapped with cudaIpcOpenMemHandle + * + * Decrements the reference count of the memory returnd by ::cudaIpcOpenMemHandle by 1. + * When the reference count reaches 0, this API unmaps the memory. The original allocation + * in the exporting process as well as imported mappings in other processes + * will be unaffected. + * + * Any resources used to enable peer access will be freed if this is the + * last mapping using them. + * + * IPC functionality is restricted to devices with support for unified + * addressing on Linux and Windows operating systems. + * IPC functionality on Windows is restricted to GPUs in TCC mode. + * Users can test their device for IPC functionality by calling + * ::cudaDeviceGetAttribute with ::cudaDevAttrIpcEventSupport + * + * \param devPtr - Device pointer returned by ::cudaIpcOpenMemHandle + * + * \returns + * ::cudaSuccess, + * ::cudaErrorMapBufferObjectFailed, + * ::cudaErrorNotSupported, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMalloc, + * ::cudaFree, + * ::cudaIpcGetEventHandle, + * ::cudaIpcOpenEventHandle, + * ::cudaIpcGetMemHandle, + * ::cudaIpcOpenMemHandle, + * ::cuIpcCloseMemHandle + */ +extern __host__ cudaError_t CUDARTAPI cudaIpcCloseMemHandle(void *devPtr); + +/** + * \brief Blocks until remote writes are visible to the specified scope + * + * Blocks until remote writes to the target context via mappings created + * through GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see + * https://docs.nvidia.com/cuda/gpudirect-rdma for more information), are + * visible to the specified scope. + * + * If the scope equals or lies within the scope indicated by + * ::cudaDevAttrGPUDirectRDMAWritesOrdering, the call will be a no-op and + * can be safely omitted for performance. This can be determined by + * comparing the numerical values between the two enums, with smaller + * scopes having smaller values. + * + * Users may query support for this API via ::cudaDevAttrGPUDirectRDMAFlushWritesOptions. + * + * \param target - The target of the operation, see cudaFlushGPUDirectRDMAWritesTarget + * \param scope - The scope of the operation, see cudaFlushGPUDirectRDMAWritesScope + * + * \return + * ::cudaSuccess, + * ::cudaErrorNotSupported, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cuFlushGPUDirectRDMAWrites + */ +#if __CUDART_API_VERSION >= 11030 +extern __host__ cudaError_t CUDARTAPI cudaDeviceFlushGPUDirectRDMAWrites(enum cudaFlushGPUDirectRDMAWritesTarget target, enum cudaFlushGPUDirectRDMAWritesScope scope); +#endif + +/** @} */ /* END CUDART_DEVICE */ + +/** + * \defgroup CUDART_THREAD_DEPRECATED Thread Management [DEPRECATED] + * + * ___MANBRIEF___ deprecated thread management functions of the CUDA runtime + * API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes deprecated thread management functions of the CUDA runtime + * application programming interface. + * + * @{ + */ + +/** + * \brief Exit and clean up from CUDA launches + * + * \deprecated + * + * Note that this function is deprecated because its name does not + * reflect its behavior. Its functionality is identical to the + * non-deprecated function ::cudaDeviceReset(), which should be used + * instead. + * + * Explicitly destroys all cleans up all resources associated with the current + * device in the current process. Any subsequent API call to this device will + * reinitialize the device. + * + * Note that this function will reset the device immediately. It is the caller's + * responsibility to ensure that the device is not being accessed by any + * other host threads from the process when this function is called. + * + * \return + * ::cudaSuccess + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceReset + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaThreadExit(void); + +/** + * \brief Wait for compute device to finish + * + * \deprecated + * + * Note that this function is deprecated because its name does not + * reflect its behavior. Its functionality is similar to the + * non-deprecated function ::cudaDeviceSynchronize(), which should be used + * instead. + * + * Blocks until the device has completed all preceding requested tasks. + * ::cudaThreadSynchronize() returns an error if one of the preceding tasks + * has failed. If the ::cudaDeviceScheduleBlockingSync flag was set for + * this device, the host thread will block until the device has finished + * its work. + * + * \return + * ::cudaSuccess + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceSynchronize + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaThreadSynchronize(void); + +/** + * \brief Set resource limits + * + * \deprecated + * + * Note that this function is deprecated because its name does not + * reflect its behavior. Its functionality is identical to the + * non-deprecated function ::cudaDeviceSetLimit(), which should be used + * instead. + * + * Setting \p limit to \p value is a request by the application to update + * the current limit maintained by the device. The driver is free to + * modify the requested value to meet h/w requirements (this could be + * clamping to minimum or maximum values, rounding up to nearest element + * size, etc). The application can use ::cudaThreadGetLimit() to find out + * exactly what the limit has been set to. + * + * Setting each ::cudaLimit has its own specific restrictions, so each is + * discussed here. + * + * - ::cudaLimitStackSize controls the stack size of each GPU thread. + * + * - ::cudaLimitPrintfFifoSize controls the size of the shared FIFO + * used by the ::printf() device system call. + * Setting ::cudaLimitPrintfFifoSize must be performed before + * launching any kernel that uses the ::printf() device + * system call, otherwise ::cudaErrorInvalidValue will be returned. + * + * - ::cudaLimitMallocHeapSize controls the size of the heap used + * by the ::malloc() and ::free() device system calls. Setting + * ::cudaLimitMallocHeapSize must be performed before launching + * any kernel that uses the ::malloc() or ::free() device system calls, + * otherwise ::cudaErrorInvalidValue will be returned. + * + * \param limit - Limit to set + * \param value - Size in bytes of limit + * + * \return + * ::cudaSuccess, + * ::cudaErrorUnsupportedLimit, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceSetLimit + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaThreadSetLimit(enum cudaLimit limit, size_t value); + +/** + * \brief Returns resource limits + * + * \deprecated + * + * Note that this function is deprecated because its name does not + * reflect its behavior. Its functionality is identical to the + * non-deprecated function ::cudaDeviceGetLimit(), which should be used + * instead. + * + * Returns in \p *pValue the current size of \p limit. The supported + * ::cudaLimit values are: + * - ::cudaLimitStackSize: stack size of each GPU thread; + * - ::cudaLimitPrintfFifoSize: size of the shared FIFO used by the + * ::printf() device system call. + * - ::cudaLimitMallocHeapSize: size of the heap used by the + * ::malloc() and ::free() device system calls; + * + * \param limit - Limit to query + * \param pValue - Returned size in bytes of limit + * + * \return + * ::cudaSuccess, + * ::cudaErrorUnsupportedLimit, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceGetLimit + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaThreadGetLimit(size_t *pValue, enum cudaLimit limit); + +/** + * \brief Returns the preferred cache configuration for the current device. + * + * \deprecated + * + * Note that this function is deprecated because its name does not + * reflect its behavior. Its functionality is identical to the + * non-deprecated function ::cudaDeviceGetCacheConfig(), which should be + * used instead. + * + * On devices where the L1 cache and shared memory use the same hardware + * resources, this returns through \p pCacheConfig the preferred cache + * configuration for the current device. This is only a preference. The + * runtime will use the requested configuration if possible, but it is free to + * choose a different configuration if required to execute functions. + * + * This will return a \p pCacheConfig of ::cudaFuncCachePreferNone on devices + * where the size of the L1 cache and shared memory are fixed. + * + * The supported cache configurations are: + * - ::cudaFuncCachePreferNone: no preference for shared memory or L1 (default) + * - ::cudaFuncCachePreferShared: prefer larger shared memory and smaller L1 cache + * - ::cudaFuncCachePreferL1: prefer larger L1 cache and smaller shared memory + * + * \param pCacheConfig - Returned cache configuration + * + * \return + * ::cudaSuccess + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceGetCacheConfig + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaThreadGetCacheConfig(enum cudaFuncCache *pCacheConfig); + +/** + * \brief Sets the preferred cache configuration for the current device. + * + * \deprecated + * + * Note that this function is deprecated because its name does not + * reflect its behavior. Its functionality is identical to the + * non-deprecated function ::cudaDeviceSetCacheConfig(), which should be + * used instead. + * + * On devices where the L1 cache and shared memory use the same hardware + * resources, this sets through \p cacheConfig the preferred cache + * configuration for the current device. This is only a preference. The + * runtime will use the requested configuration if possible, but it is free to + * choose a different configuration if required to execute the function. Any + * function preference set via + * \ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C API)" + * or + * \ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C++ API)" + * will be preferred over this device-wide setting. Setting the device-wide + * cache configuration to ::cudaFuncCachePreferNone will cause subsequent + * kernel launches to prefer to not change the cache configuration unless + * required to launch the kernel. + * + * This setting does nothing on devices where the size of the L1 cache and + * shared memory are fixed. + * + * Launching a kernel with a different preference than the most recent + * preference setting may insert a device-side synchronization point. + * + * The supported cache configurations are: + * - ::cudaFuncCachePreferNone: no preference for shared memory or L1 (default) + * - ::cudaFuncCachePreferShared: prefer larger shared memory and smaller L1 cache + * - ::cudaFuncCachePreferL1: prefer larger L1 cache and smaller shared memory + * + * \param cacheConfig - Requested cache configuration + * + * \return + * ::cudaSuccess + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceSetCacheConfig + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaThreadSetCacheConfig(enum cudaFuncCache cacheConfig); + +/** @} */ /* END CUDART_THREAD_DEPRECATED */ + +/** + * \defgroup CUDART_ERROR Error Handling + * + * ___MANBRIEF___ error handling functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the error handling functions of the CUDA runtime + * application programming interface. + * + * @{ + */ + +/** + * \brief Returns the last error from a runtime call + * + * Returns the last error that has been produced by any of the runtime calls + * in the same instance of the CUDA Runtime library in the host thread and + * resets it to ::cudaSuccess. + * + * Note: Multiple instances of the CUDA Runtime library can be present in an + * application when using a library that statically links the CUDA Runtime. + * + * \return + * ::cudaSuccess, + * ::cudaErrorMissingConfiguration, + * ::cudaErrorMemoryAllocation, + * ::cudaErrorInitializationError, + * ::cudaErrorLaunchFailure, + * ::cudaErrorLaunchTimeout, + * ::cudaErrorLaunchOutOfResources, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidConfiguration, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidPitchValue, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorUnmapBufferObjectFailed, + * ::cudaErrorInvalidDevicePointer, + * ::cudaErrorInvalidTexture, + * ::cudaErrorInvalidTextureBinding, + * ::cudaErrorInvalidChannelDescriptor, + * ::cudaErrorInvalidMemcpyDirection, + * ::cudaErrorInvalidFilterSetting, + * ::cudaErrorInvalidNormSetting, + * ::cudaErrorUnknown, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorInsufficientDriver, + * ::cudaErrorNoDevice, + * ::cudaErrorSetOnActiveProcess, + * ::cudaErrorStartupFailure, + * ::cudaErrorInvalidPtx, + * ::cudaErrorUnsupportedPtxVersion, + * ::cudaErrorNoKernelImageForDevice, + * ::cudaErrorJitCompilerNotFound, + * ::cudaErrorJitCompilationDisabled + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaPeekAtLastError, ::cudaGetErrorName, ::cudaGetErrorString, ::cudaError + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetLastError(void); + +/** + * \brief Returns the last error from a runtime call + * + * Returns the last error that has been produced by any of the runtime calls + * in the same instance of the CUDA Runtime library in the host thread. This + * call does not reset the error to ::cudaSuccess like ::cudaGetLastError(). + * + * Note: Multiple instances of the CUDA Runtime library can be present in an + * application when using a library that statically links the CUDA Runtime. + * + * \return + * ::cudaSuccess, + * ::cudaErrorMissingConfiguration, + * ::cudaErrorMemoryAllocation, + * ::cudaErrorInitializationError, + * ::cudaErrorLaunchFailure, + * ::cudaErrorLaunchTimeout, + * ::cudaErrorLaunchOutOfResources, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidConfiguration, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidPitchValue, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorUnmapBufferObjectFailed, + * ::cudaErrorInvalidDevicePointer, + * ::cudaErrorInvalidTexture, + * ::cudaErrorInvalidTextureBinding, + * ::cudaErrorInvalidChannelDescriptor, + * ::cudaErrorInvalidMemcpyDirection, + * ::cudaErrorInvalidFilterSetting, + * ::cudaErrorInvalidNormSetting, + * ::cudaErrorUnknown, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorInsufficientDriver, + * ::cudaErrorNoDevice, + * ::cudaErrorSetOnActiveProcess, + * ::cudaErrorStartupFailure, + * ::cudaErrorInvalidPtx, + * ::cudaErrorUnsupportedPtxVersion, + * ::cudaErrorNoKernelImageForDevice, + * ::cudaErrorJitCompilerNotFound, + * ::cudaErrorJitCompilationDisabled + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaGetLastError, ::cudaGetErrorName, ::cudaGetErrorString, ::cudaError + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaPeekAtLastError(void); + +/** + * \brief Returns the string representation of an error code enum name + * + * Returns a string containing the name of an error code in the enum. If the error + * code is not recognized, "unrecognized error code" is returned. + * + * \param error - Error code to convert to string + * + * \return + * \p char* pointer to a NULL-terminated string + * + * \sa ::cudaGetErrorString, ::cudaGetLastError, ::cudaPeekAtLastError, ::cudaError, + * ::cuGetErrorName + */ +extern __host__ __cudart_builtin__ const char* CUDARTAPI cudaGetErrorName(cudaError_t error); + +/** + * \brief Returns the description string for an error code + * + * Returns the description string for an error code. If the error + * code is not recognized, "unrecognized error code" is returned. + * + * \param error - Error code to convert to string + * + * \return + * \p char* pointer to a NULL-terminated string + * + * \sa ::cudaGetErrorName, ::cudaGetLastError, ::cudaPeekAtLastError, ::cudaError, + * ::cuGetErrorString + */ +extern __host__ __cudart_builtin__ const char* CUDARTAPI cudaGetErrorString(cudaError_t error); +/** @} */ /* END CUDART_ERROR */ + +/** + * \addtogroup CUDART_DEVICE + * + * @{ + */ + +/** + * \brief Returns the number of compute-capable devices + * + * Returns in \p *count the number of devices with compute capability greater + * or equal to 2.0 that are available for execution. + * + * \param count - Returns the number of devices with compute capability + * greater or equal to 2.0 + * + * \return + * ::cudaSuccess + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaGetDevice, ::cudaSetDevice, ::cudaGetDeviceProperties, + * ::cudaChooseDevice, + * ::cudaInitDevice, + * ::cuDeviceGetCount + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetDeviceCount(int *count); + +/** + * \brief Returns information about the compute-device + * + * Returns in \p *prop the properties of device \p dev. The ::cudaDeviceProp + * structure is defined as: + * \code + struct cudaDeviceProp { + char name[256]; + cudaUUID_t uuid; + size_t totalGlobalMem; + size_t sharedMemPerBlock; + int regsPerBlock; + int warpSize; + size_t memPitch; + int maxThreadsPerBlock; + int maxThreadsDim[3]; + int maxGridSize[3]; + int clockRate; + size_t totalConstMem; + int major; + int minor; + size_t textureAlignment; + size_t texturePitchAlignment; + int deviceOverlap; + int multiProcessorCount; + int kernelExecTimeoutEnabled; + int integrated; + int canMapHostMemory; + int computeMode; + int maxTexture1D; + int maxTexture1DMipmap; + int maxTexture1DLinear; + int maxTexture2D[2]; + int maxTexture2DMipmap[2]; + int maxTexture2DLinear[3]; + int maxTexture2DGather[2]; + int maxTexture3D[3]; + int maxTexture3DAlt[3]; + int maxTextureCubemap; + int maxTexture1DLayered[2]; + int maxTexture2DLayered[3]; + int maxTextureCubemapLayered[2]; + int maxSurface1D; + int maxSurface2D[2]; + int maxSurface3D[3]; + int maxSurface1DLayered[2]; + int maxSurface2DLayered[3]; + int maxSurfaceCubemap; + int maxSurfaceCubemapLayered[2]; + size_t surfaceAlignment; + int concurrentKernels; + int ECCEnabled; + int pciBusID; + int pciDeviceID; + int pciDomainID; + int tccDriver; + int asyncEngineCount; + int unifiedAddressing; + int memoryClockRate; + int memoryBusWidth; + int l2CacheSize; + int persistingL2CacheMaxSize; + int maxThreadsPerMultiProcessor; + int streamPrioritiesSupported; + int globalL1CacheSupported; + int localL1CacheSupported; + size_t sharedMemPerMultiprocessor; + int regsPerMultiprocessor; + int managedMemory; + int isMultiGpuBoard; + int multiGpuBoardGroupID; + int singleToDoublePrecisionPerfRatio; + int pageableMemoryAccess; + int concurrentManagedAccess; + int computePreemptionSupported; + int canUseHostPointerForRegisteredMem; + int cooperativeLaunch; + int cooperativeMultiDeviceLaunch; + int pageableMemoryAccessUsesHostPageTables; + int directManagedMemAccessFromHost; + int accessPolicyMaxWindowSize; + } + \endcode + * where: + * - \ref ::cudaDeviceProp::name "name[256]" is an ASCII string identifying + * the device. + * - \ref ::cudaDeviceProp::uuid "uuid" is a 16-byte unique identifier. + * - \ref ::cudaDeviceProp::totalGlobalMem "totalGlobalMem" is the total + * amount of global memory available on the device in bytes. + * - \ref ::cudaDeviceProp::sharedMemPerBlock "sharedMemPerBlock" is the + * maximum amount of shared memory available to a thread block in bytes. + * - \ref ::cudaDeviceProp::regsPerBlock "regsPerBlock" is the maximum number + * of 32-bit registers available to a thread block. + * - \ref ::cudaDeviceProp::warpSize "warpSize" is the warp size in threads. + * - \ref ::cudaDeviceProp::memPitch "memPitch" is the maximum pitch in + * bytes allowed by the memory copy functions that involve memory regions + * allocated through ::cudaMallocPitch(). + * - \ref ::cudaDeviceProp::maxThreadsPerBlock "maxThreadsPerBlock" is the + * maximum number of threads per block. + * - \ref ::cudaDeviceProp::maxThreadsDim "maxThreadsDim[3]" contains the + * maximum size of each dimension of a block. + * - \ref ::cudaDeviceProp::maxGridSize "maxGridSize[3]" contains the + * maximum size of each dimension of a grid. + * - \ref ::cudaDeviceProp::clockRate "clockRate" is the clock frequency in + * kilohertz. + * - \ref ::cudaDeviceProp::totalConstMem "totalConstMem" is the total amount + * of constant memory available on the device in bytes. + * - \ref ::cudaDeviceProp::major "major", + * \ref ::cudaDeviceProp::minor "minor" are the major and minor revision + * numbers defining the device's compute capability. + * - \ref ::cudaDeviceProp::textureAlignment "textureAlignment" is the + * alignment requirement; texture base addresses that are aligned to + * \ref ::cudaDeviceProp::textureAlignment "textureAlignment" bytes do not + * need an offset applied to texture fetches. + * - \ref ::cudaDeviceProp::texturePitchAlignment "texturePitchAlignment" is the + * pitch alignment requirement for 2D texture references that are bound to + * pitched memory. + * - \ref ::cudaDeviceProp::deviceOverlap "deviceOverlap" is 1 if the device + * can concurrently copy memory between host and device while executing a + * kernel, or 0 if not. Deprecated, use instead asyncEngineCount. + * - \ref ::cudaDeviceProp::multiProcessorCount "multiProcessorCount" is the + * number of multiprocessors on the device. + * - \ref ::cudaDeviceProp::kernelExecTimeoutEnabled "kernelExecTimeoutEnabled" + * is 1 if there is a run time limit for kernels executed on the device, or + * 0 if not. + * - \ref ::cudaDeviceProp::integrated "integrated" is 1 if the device is an + * integrated (motherboard) GPU and 0 if it is a discrete (card) component. + * - \ref ::cudaDeviceProp::canMapHostMemory "canMapHostMemory" is 1 if the + * device can map host memory into the CUDA address space for use with + * ::cudaHostAlloc()/::cudaHostGetDevicePointer(), or 0 if not. + * - \ref ::cudaDeviceProp::computeMode "computeMode" is the compute mode + * that the device is currently in. Available modes are as follows: + * - cudaComputeModeDefault: Default mode - Device is not restricted and + * multiple threads can use ::cudaSetDevice() with this device. + * - cudaComputeModeProhibited: Compute-prohibited mode - No threads can use + * ::cudaSetDevice() with this device. + * - cudaComputeModeExclusiveProcess: Compute-exclusive-process mode - Many + * threads in one process will be able to use ::cudaSetDevice() with this device. + *
When an occupied exclusive mode device is chosen with ::cudaSetDevice, + * all subsequent non-device management runtime functions will return + * ::cudaErrorDevicesUnavailable. + * - \ref ::cudaDeviceProp::maxTexture1D "maxTexture1D" is the maximum 1D + * texture size. + * - \ref ::cudaDeviceProp::maxTexture1DMipmap "maxTexture1DMipmap" is the maximum + * 1D mipmapped texture texture size. + * - \ref ::cudaDeviceProp::maxTexture1DLinear "maxTexture1DLinear" is the maximum + * 1D texture size for textures bound to linear memory. + * - \ref ::cudaDeviceProp::maxTexture2D "maxTexture2D[2]" contains the maximum + * 2D texture dimensions. + * - \ref ::cudaDeviceProp::maxTexture2DMipmap "maxTexture2DMipmap[2]" contains the + * maximum 2D mipmapped texture dimensions. + * - \ref ::cudaDeviceProp::maxTexture2DLinear "maxTexture2DLinear[3]" contains the + * maximum 2D texture dimensions for 2D textures bound to pitch linear memory. + * - \ref ::cudaDeviceProp::maxTexture2DGather "maxTexture2DGather[2]" contains the + * maximum 2D texture dimensions if texture gather operations have to be performed. + * - \ref ::cudaDeviceProp::maxTexture3D "maxTexture3D[3]" contains the maximum + * 3D texture dimensions. + * - \ref ::cudaDeviceProp::maxTexture3DAlt "maxTexture3DAlt[3]" + * contains the maximum alternate 3D texture dimensions. + * - \ref ::cudaDeviceProp::maxTextureCubemap "maxTextureCubemap" is the + * maximum cubemap texture width or height. + * - \ref ::cudaDeviceProp::maxTexture1DLayered "maxTexture1DLayered[2]" contains + * the maximum 1D layered texture dimensions. + * - \ref ::cudaDeviceProp::maxTexture2DLayered "maxTexture2DLayered[3]" contains + * the maximum 2D layered texture dimensions. + * - \ref ::cudaDeviceProp::maxTextureCubemapLayered "maxTextureCubemapLayered[2]" + * contains the maximum cubemap layered texture dimensions. + * - \ref ::cudaDeviceProp::maxSurface1D "maxSurface1D" is the maximum 1D + * surface size. + * - \ref ::cudaDeviceProp::maxSurface2D "maxSurface2D[2]" contains the maximum + * 2D surface dimensions. + * - \ref ::cudaDeviceProp::maxSurface3D "maxSurface3D[3]" contains the maximum + * 3D surface dimensions. + * - \ref ::cudaDeviceProp::maxSurface1DLayered "maxSurface1DLayered[2]" contains + * the maximum 1D layered surface dimensions. + * - \ref ::cudaDeviceProp::maxSurface2DLayered "maxSurface2DLayered[3]" contains + * the maximum 2D layered surface dimensions. + * - \ref ::cudaDeviceProp::maxSurfaceCubemap "maxSurfaceCubemap" is the maximum + * cubemap surface width or height. + * - \ref ::cudaDeviceProp::maxSurfaceCubemapLayered "maxSurfaceCubemapLayered[2]" + * contains the maximum cubemap layered surface dimensions. + * - \ref ::cudaDeviceProp::surfaceAlignment "surfaceAlignment" specifies the + * alignment requirements for surfaces. + * - \ref ::cudaDeviceProp::concurrentKernels "concurrentKernels" is 1 if the + * device supports executing multiple kernels within the same context + * simultaneously, or 0 if not. It is not guaranteed that multiple kernels + * will be resident on the device concurrently so this feature should not be + * relied upon for correctness. + * - \ref ::cudaDeviceProp::ECCEnabled "ECCEnabled" is 1 if the device has ECC + * support turned on, or 0 if not. + * - \ref ::cudaDeviceProp::pciBusID "pciBusID" is the PCI bus identifier of + * the device. + * - \ref ::cudaDeviceProp::pciDeviceID "pciDeviceID" is the PCI device + * (sometimes called slot) identifier of the device. + * - \ref ::cudaDeviceProp::pciDomainID "pciDomainID" is the PCI domain identifier + * of the device. + * - \ref ::cudaDeviceProp::tccDriver "tccDriver" is 1 if the device is using a + * TCC driver or 0 if not. + * - \ref ::cudaDeviceProp::asyncEngineCount "asyncEngineCount" is 1 when the + * device can concurrently copy memory between host and device while executing + * a kernel. It is 2 when the device can concurrently copy memory between host + * and device in both directions and execute a kernel at the same time. It is + * 0 if neither of these is supported. + * - \ref ::cudaDeviceProp::unifiedAddressing "unifiedAddressing" is 1 if the device + * shares a unified address space with the host and 0 otherwise. + * - \ref ::cudaDeviceProp::memoryClockRate "memoryClockRate" is the peak memory + * clock frequency in kilohertz. + * - \ref ::cudaDeviceProp::memoryBusWidth "memoryBusWidth" is the memory bus width + * in bits. + * - \ref ::cudaDeviceProp::l2CacheSize "l2CacheSize" is L2 cache size in bytes. + * - \ref ::cudaDeviceProp::persistingL2CacheMaxSize "persistingL2CacheMaxSize" is L2 cache's maximum persisting lines size in bytes. + * - \ref ::cudaDeviceProp::maxThreadsPerMultiProcessor "maxThreadsPerMultiProcessor" + * is the number of maximum resident threads per multiprocessor. + * - \ref ::cudaDeviceProp::streamPrioritiesSupported "streamPrioritiesSupported" + * is 1 if the device supports stream priorities, or 0 if it is not supported. + * - \ref ::cudaDeviceProp::globalL1CacheSupported "globalL1CacheSupported" + * is 1 if the device supports caching of globals in L1 cache, or 0 if it is not supported. + * - \ref ::cudaDeviceProp::localL1CacheSupported "localL1CacheSupported" + * is 1 if the device supports caching of locals in L1 cache, or 0 if it is not supported. + * - \ref ::cudaDeviceProp::sharedMemPerMultiprocessor "sharedMemPerMultiprocessor" is the + * maximum amount of shared memory available to a multiprocessor in bytes; this amount is + * shared by all thread blocks simultaneously resident on a multiprocessor. + * - \ref ::cudaDeviceProp::regsPerMultiprocessor "regsPerMultiprocessor" is the maximum number + * of 32-bit registers available to a multiprocessor; this number is shared + * by all thread blocks simultaneously resident on a multiprocessor. + * - \ref ::cudaDeviceProp::managedMemory "managedMemory" + * is 1 if the device supports allocating managed memory on this system, or 0 if it is not supported. + * - \ref ::cudaDeviceProp::isMultiGpuBoard "isMultiGpuBoard" + * is 1 if the device is on a multi-GPU board (e.g. Gemini cards), and 0 if not; + * - \ref ::cudaDeviceProp::multiGpuBoardGroupID "multiGpuBoardGroupID" is a unique identifier + * for a group of devices associated with the same board. + * Devices on the same multi-GPU board will share the same identifier. + * - \ref ::cudaDeviceProp::hostNativeAtomicSupported "hostNativeAtomicSupported" + * is 1 if the link between the device and the host supports native atomic operations, or 0 if it is not supported. + * - \ref ::cudaDeviceProp::singleToDoublePrecisionPerfRatio "singleToDoublePrecisionPerfRatio" + * is the ratio of single precision performance (in floating-point operations per second) + * to double precision performance. + * - \ref ::cudaDeviceProp::pageableMemoryAccess "pageableMemoryAccess" is 1 if the device supports + * coherently accessing pageable memory without calling cudaHostRegister on it, and 0 otherwise. + * - \ref ::cudaDeviceProp::concurrentManagedAccess "concurrentManagedAccess" is 1 if the device can + * coherently access managed memory concurrently with the CPU, and 0 otherwise. + * - \ref ::cudaDeviceProp::computePreemptionSupported "computePreemptionSupported" is 1 if the device + * supports Compute Preemption, and 0 otherwise. + * - \ref ::cudaDeviceProp::canUseHostPointerForRegisteredMem "canUseHostPointerForRegisteredMem" is 1 if + * the device can access host registered memory at the same virtual address as the CPU, and 0 otherwise. + * - \ref ::cudaDeviceProp::cooperativeLaunch "cooperativeLaunch" is 1 if the device supports launching + * cooperative kernels via ::cudaLaunchCooperativeKernel, and 0 otherwise. + * - \ref ::cudaDeviceProp::cooperativeMultiDeviceLaunch "cooperativeMultiDeviceLaunch" is 1 if the device + * supports launching cooperative kernels via ::cudaLaunchCooperativeKernelMultiDevice, and 0 otherwise. + * - \ref ::cudaDeviceProp::sharedMemPerBlockOptin "sharedMemPerBlockOptin" + * is the per device maximum shared memory per block usable by special opt in + * - \ref ::cudaDeviceProp::pageableMemoryAccessUsesHostPageTables "pageableMemoryAccessUsesHostPageTables" is 1 if the device accesses + * pageable memory via the host's page tables, and 0 otherwise. + * - \ref ::cudaDeviceProp::directManagedMemAccessFromHost "directManagedMemAccessFromHost" is 1 if the host can directly access managed + * memory on the device without migration, and 0 otherwise. + * - \ref ::cudaDeviceProp::maxBlocksPerMultiProcessor "maxBlocksPerMultiProcessor" is the maximum number of thread blocks + * that can reside on a multiprocessor. + * - \ref ::cudaDeviceProp::accessPolicyMaxWindowSize "accessPolicyMaxWindowSize" is + * the maximum value of ::cudaAccessPolicyWindow::num_bytes. + * - \ref ::cudaDeviceProp::reservedSharedMemPerBlock "reservedSharedMemPerBlock" + * is the shared memory reserved by CUDA driver per block in bytes + * - \ref ::cudaDeviceProp::hostRegisterSupported "hostRegisterSupported" + * is 1 if the device supports host memory registration via ::cudaHostRegister, and 0 otherwise. + * - \ref ::cudaDeviceProp::sparseCudaArraySupported "sparseCudaArraySupported" + * is 1 if the device supports sparse CUDA arrays and sparse CUDA mipmapped arrays, 0 otherwise + * - \ref ::cudaDeviceProp::hostRegisterReadOnlySupported "hostRegisterReadOnlySupported" + * is 1 if the device supports using the ::cudaHostRegister flag cudaHostRegisterReadOnly to register memory that must be mapped as + * read-only to the GPU + * - \ref ::cudaDeviceProp::timelineSemaphoreInteropSupported "timelineSemaphoreInteropSupported" + * is 1 if external timeline semaphore interop is supported on the device, 0 otherwise + * - \ref ::cudaDeviceProp::memoryPoolsSupported "memoryPoolsSupported" + * is 1 if the device supports using the cudaMallocAsync and cudaMemPool family of APIs, 0 otherwise + * - \ref ::cudaDeviceProp::gpuDirectRDMASupported "gpuDirectRDMASupported" + * is 1 if the device supports GPUDirect RDMA APIs, 0 otherwise + * - \ref ::cudaDeviceProp::gpuDirectRDMAFlushWritesOptions "gpuDirectRDMAFlushWritesOptions" + * is a bitmask to be interpreted according to the ::cudaFlushGPUDirectRDMAWritesOptions enum + * - \ref ::cudaDeviceProp::gpuDirectRDMAWritesOrdering "gpuDirectRDMAWritesOrdering" + * See the ::cudaGPUDirectRDMAWritesOrdering enum for numerical values + * - \ref ::cudaDeviceProp::memoryPoolSupportedHandleTypes "memoryPoolSupportedHandleTypes" + * is a bitmask of handle types supported with mempool-based IPC + * - \ref ::cudaDeviceProp::deferredMappingCudaArraySupported "deferredMappingCudaArraySupported" + * is 1 if the device supports deferred mapping CUDA arrays and CUDA mipmapped arrays + * - \ref ::cudaDeviceProp::ipcEventSupported "ipcEventSupported" + * is 1 if the device supports IPC Events, and 0 otherwise + * - \ref ::cudaDeviceProp::unifiedFunctionPointers "unifiedFunctionPointers" + * is 1 if the device support unified pointers, and 0 otherwise + * + * \param prop - Properties for the specified device + * \param device - Device number to get properties for + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaGetDeviceCount, ::cudaGetDevice, ::cudaSetDevice, ::cudaChooseDevice, + * ::cudaDeviceGetAttribute, + * ::cudaInitDevice, + * ::cuDeviceGetAttribute, + * ::cuDeviceGetName + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetDeviceProperties(struct cudaDeviceProp *prop, int device); + +/** + * \brief Returns information about the device + * + * Returns in \p *value the integer value of the attribute \p attr on device + * \p device. The supported attributes are: + * - ::cudaDevAttrMaxThreadsPerBlock: Maximum number of threads per block + * - ::cudaDevAttrMaxBlockDimX: Maximum x-dimension of a block + * - ::cudaDevAttrMaxBlockDimY: Maximum y-dimension of a block + * - ::cudaDevAttrMaxBlockDimZ: Maximum z-dimension of a block + * - ::cudaDevAttrMaxGridDimX: Maximum x-dimension of a grid + * - ::cudaDevAttrMaxGridDimY: Maximum y-dimension of a grid + * - ::cudaDevAttrMaxGridDimZ: Maximum z-dimension of a grid + * - ::cudaDevAttrMaxSharedMemoryPerBlock: Maximum amount of shared memory + * available to a thread block in bytes + * - ::cudaDevAttrTotalConstantMemory: Memory available on device for + * __constant__ variables in a CUDA C kernel in bytes + * - ::cudaDevAttrWarpSize: Warp size in threads + * - ::cudaDevAttrMaxPitch: Maximum pitch in bytes allowed by the memory copy + * functions that involve memory regions allocated through ::cudaMallocPitch() + * - ::cudaDevAttrMaxTexture1DWidth: Maximum 1D texture width + * - ::cudaDevAttrMaxTexture1DLinearWidth: Maximum width for a 1D texture bound + * to linear memory + * - ::cudaDevAttrMaxTexture1DMipmappedWidth: Maximum mipmapped 1D texture width + * - ::cudaDevAttrMaxTexture2DWidth: Maximum 2D texture width + * - ::cudaDevAttrMaxTexture2DHeight: Maximum 2D texture height + * - ::cudaDevAttrMaxTexture2DLinearWidth: Maximum width for a 2D texture + * bound to linear memory + * - ::cudaDevAttrMaxTexture2DLinearHeight: Maximum height for a 2D texture + * bound to linear memory + * - ::cudaDevAttrMaxTexture2DLinearPitch: Maximum pitch in bytes for a 2D + * texture bound to linear memory + * - ::cudaDevAttrMaxTexture2DMipmappedWidth: Maximum mipmapped 2D texture + * width + * - ::cudaDevAttrMaxTexture2DMipmappedHeight: Maximum mipmapped 2D texture + * height + * - ::cudaDevAttrMaxTexture3DWidth: Maximum 3D texture width + * - ::cudaDevAttrMaxTexture3DHeight: Maximum 3D texture height + * - ::cudaDevAttrMaxTexture3DDepth: Maximum 3D texture depth + * - ::cudaDevAttrMaxTexture3DWidthAlt: Alternate maximum 3D texture width, + * 0 if no alternate maximum 3D texture size is supported + * - ::cudaDevAttrMaxTexture3DHeightAlt: Alternate maximum 3D texture height, + * 0 if no alternate maximum 3D texture size is supported + * - ::cudaDevAttrMaxTexture3DDepthAlt: Alternate maximum 3D texture depth, + * 0 if no alternate maximum 3D texture size is supported + * - ::cudaDevAttrMaxTextureCubemapWidth: Maximum cubemap texture width or + * height + * - ::cudaDevAttrMaxTexture1DLayeredWidth: Maximum 1D layered texture width + * - ::cudaDevAttrMaxTexture1DLayeredLayers: Maximum layers in a 1D layered + * texture + * - ::cudaDevAttrMaxTexture2DLayeredWidth: Maximum 2D layered texture width + * - ::cudaDevAttrMaxTexture2DLayeredHeight: Maximum 2D layered texture height + * - ::cudaDevAttrMaxTexture2DLayeredLayers: Maximum layers in a 2D layered + * texture + * - ::cudaDevAttrMaxTextureCubemapLayeredWidth: Maximum cubemap layered + * texture width or height + * - ::cudaDevAttrMaxTextureCubemapLayeredLayers: Maximum layers in a cubemap + * layered texture + * - ::cudaDevAttrMaxSurface1DWidth: Maximum 1D surface width + * - ::cudaDevAttrMaxSurface2DWidth: Maximum 2D surface width + * - ::cudaDevAttrMaxSurface2DHeight: Maximum 2D surface height + * - ::cudaDevAttrMaxSurface3DWidth: Maximum 3D surface width + * - ::cudaDevAttrMaxSurface3DHeight: Maximum 3D surface height + * - ::cudaDevAttrMaxSurface3DDepth: Maximum 3D surface depth + * - ::cudaDevAttrMaxSurface1DLayeredWidth: Maximum 1D layered surface width + * - ::cudaDevAttrMaxSurface1DLayeredLayers: Maximum layers in a 1D layered + * surface + * - ::cudaDevAttrMaxSurface2DLayeredWidth: Maximum 2D layered surface width + * - ::cudaDevAttrMaxSurface2DLayeredHeight: Maximum 2D layered surface height + * - ::cudaDevAttrMaxSurface2DLayeredLayers: Maximum layers in a 2D layered + * surface + * - ::cudaDevAttrMaxSurfaceCubemapWidth: Maximum cubemap surface width + * - ::cudaDevAttrMaxSurfaceCubemapLayeredWidth: Maximum cubemap layered + * surface width + * - ::cudaDevAttrMaxSurfaceCubemapLayeredLayers: Maximum layers in a cubemap + * layered surface + * - ::cudaDevAttrMaxRegistersPerBlock: Maximum number of 32-bit registers + * available to a thread block + * - ::cudaDevAttrClockRate: Peak clock frequency in kilohertz + * - ::cudaDevAttrTextureAlignment: Alignment requirement; texture base + * addresses aligned to ::textureAlign bytes do not need an offset applied + * to texture fetches + * - ::cudaDevAttrTexturePitchAlignment: Pitch alignment requirement for 2D + * texture references bound to pitched memory + * - ::cudaDevAttrGpuOverlap: 1 if the device can concurrently copy memory + * between host and device while executing a kernel, or 0 if not + * - ::cudaDevAttrMultiProcessorCount: Number of multiprocessors on the device + * - ::cudaDevAttrKernelExecTimeout: 1 if there is a run time limit for kernels + * executed on the device, or 0 if not + * - ::cudaDevAttrIntegrated: 1 if the device is integrated with the memory + * subsystem, or 0 if not + * - ::cudaDevAttrCanMapHostMemory: 1 if the device can map host memory into + * the CUDA address space, or 0 if not + * - ::cudaDevAttrComputeMode: Compute mode is the compute mode that the device + * is currently in. Available modes are as follows: + * - ::cudaComputeModeDefault: Default mode - Device is not restricted and + * multiple threads can use ::cudaSetDevice() with this device. + * - ::cudaComputeModeProhibited: Compute-prohibited mode - No threads can use + * ::cudaSetDevice() with this device. + * - ::cudaComputeModeExclusiveProcess: Compute-exclusive-process mode - Many + * threads in one process will be able to use ::cudaSetDevice() with this + * device. + * - ::cudaDevAttrConcurrentKernels: 1 if the device supports executing + * multiple kernels within the same context simultaneously, or 0 if + * not. It is not guaranteed that multiple kernels will be resident on the + * device concurrently so this feature should not be relied upon for + * correctness. + * - ::cudaDevAttrEccEnabled: 1 if error correction is enabled on the device, + * 0 if error correction is disabled or not supported by the device + * - ::cudaDevAttrPciBusId: PCI bus identifier of the device + * - ::cudaDevAttrPciDeviceId: PCI device (also known as slot) identifier of + * the device + * - ::cudaDevAttrTccDriver: 1 if the device is using a TCC driver. TCC is only + * available on Tesla hardware running Windows Vista or later. + * - ::cudaDevAttrMemoryClockRate: Peak memory clock frequency in kilohertz + * - ::cudaDevAttrGlobalMemoryBusWidth: Global memory bus width in bits + * - ::cudaDevAttrL2CacheSize: Size of L2 cache in bytes. 0 if the device + * doesn't have L2 cache. + * - ::cudaDevAttrMaxThreadsPerMultiProcessor: Maximum resident threads per + * multiprocessor + * - ::cudaDevAttrUnifiedAddressing: 1 if the device shares a unified address + * space with the host, or 0 if not + * - ::cudaDevAttrComputeCapabilityMajor: Major compute capability version + * number + * - ::cudaDevAttrComputeCapabilityMinor: Minor compute capability version + * number + * - ::cudaDevAttrStreamPrioritiesSupported: 1 if the device supports stream + * priorities, or 0 if not + * - ::cudaDevAttrGlobalL1CacheSupported: 1 if device supports caching globals + * in L1 cache, 0 if not + * - ::cudaDevAttrLocalL1CacheSupported: 1 if device supports caching locals + * in L1 cache, 0 if not + * - ::cudaDevAttrMaxSharedMemoryPerMultiprocessor: Maximum amount of shared memory + * available to a multiprocessor in bytes; this amount is shared by all + * thread blocks simultaneously resident on a multiprocessor + * - ::cudaDevAttrMaxRegistersPerMultiprocessor: Maximum number of 32-bit registers + * available to a multiprocessor; this number is shared by all thread blocks + * simultaneously resident on a multiprocessor + * - ::cudaDevAttrManagedMemory: 1 if device supports allocating + * managed memory, 0 if not + * - ::cudaDevAttrIsMultiGpuBoard: 1 if device is on a multi-GPU board, 0 if not + * - ::cudaDevAttrMultiGpuBoardGroupID: Unique identifier for a group of devices on the + * same multi-GPU board + * - ::cudaDevAttrHostNativeAtomicSupported: 1 if the link between the device and the + * host supports native atomic operations + * - ::cudaDevAttrSingleToDoublePrecisionPerfRatio: Ratio of single precision performance + * (in floating-point operations per second) to double precision performance + * - ::cudaDevAttrPageableMemoryAccess: 1 if the device supports coherently accessing + * pageable memory without calling cudaHostRegister on it, and 0 otherwise + * - ::cudaDevAttrConcurrentManagedAccess: 1 if the device can coherently access managed + * memory concurrently with the CPU, and 0 otherwise + * - ::cudaDevAttrComputePreemptionSupported: 1 if the device supports + * Compute Preemption, 0 if not + * - ::cudaDevAttrCanUseHostPointerForRegisteredMem: 1 if the device can access host + * registered memory at the same virtual address as the CPU, and 0 otherwise + * - ::cudaDevAttrCooperativeLaunch: 1 if the device supports launching cooperative kernels + * via ::cudaLaunchCooperativeKernel, and 0 otherwise + * - ::cudaDevAttrCooperativeMultiDeviceLaunch: 1 if the device supports launching cooperative + * kernels via ::cudaLaunchCooperativeKernelMultiDevice, and 0 otherwise + * - ::cudaDevAttrCanFlushRemoteWrites: 1 if the device supports flushing of outstanding + * remote writes, and 0 otherwise + * - ::cudaDevAttrHostRegisterSupported: 1 if the device supports host memory registration + * via ::cudaHostRegister, and 0 otherwise + * - ::cudaDevAttrPageableMemoryAccessUsesHostPageTables: 1 if the device accesses pageable memory via the + * host's page tables, and 0 otherwise + * - ::cudaDevAttrDirectManagedMemAccessFromHost: 1 if the host can directly access managed memory on the device + * without migration, and 0 otherwise + * - ::cudaDevAttrMaxSharedMemoryPerBlockOptin: Maximum per block shared memory size on the device. This value can + * be opted into when using ::cudaFuncSetAttribute + * - ::cudaDevAttrMaxBlocksPerMultiprocessor: Maximum number of thread blocks that can reside on a multiprocessor + * - ::cudaDevAttrMaxPersistingL2CacheSize: Maximum L2 persisting lines capacity setting in bytes + * - ::cudaDevAttrMaxAccessPolicyWindowSize: Maximum value of cudaAccessPolicyWindow::num_bytes + * - ::cudaDevAttrReservedSharedMemoryPerBlock: Shared memory reserved by CUDA driver per block in bytes + * - ::cudaDevAttrSparseCudaArraySupported: 1 if the device supports sparse CUDA arrays and sparse CUDA mipmapped arrays. + * - ::cudaDevAttrHostRegisterReadOnlySupported: Device supports using the ::cudaHostRegister flag cudaHostRegisterReadOnly + * to register memory that must be mapped as read-only to the GPU + * - ::cudaDevAttrMemoryPoolsSupported: 1 if the device supports using the cudaMallocAsync and cudaMemPool family of APIs, and 0 otherwise + * - ::cudaDevAttrGPUDirectRDMASupported: 1 if the device supports GPUDirect RDMA APIs, and 0 otherwise + * - ::cudaDevAttrGPUDirectRDMAFlushWritesOptions: bitmask to be interpreted according to the ::cudaFlushGPUDirectRDMAWritesOptions enum + * - ::cudaDevAttrGPUDirectRDMAWritesOrdering: see the ::cudaGPUDirectRDMAWritesOrdering enum for numerical values + * - ::cudaDevAttrMemoryPoolSupportedHandleTypes: Bitmask of handle types supported with mempool based IPC + * - ::cudaDevAttrDeferredMappingCudaArraySupported : 1 if the device supports deferred mapping CUDA arrays and CUDA mipmapped arrays. + * - ::cudaDevAttrIpcEventSupport: 1 if the device supports IPC Events. + * + * \param value - Returned device attribute value + * \param attr - Device attribute to query + * \param device - Device number to query + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaGetDeviceCount, ::cudaGetDevice, ::cudaSetDevice, ::cudaChooseDevice, + * ::cudaGetDeviceProperties, + * ::cudaInitDevice, + * ::cuDeviceGetAttribute + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetAttribute(int *value, enum cudaDeviceAttr attr, int device); + +/** + * \brief Returns the default mempool of a device + * + * The default mempool of a device contains device memory from that device. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidValue + * ::cudaErrorNotSupported + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cuDeviceGetDefaultMemPool, ::cudaMallocAsync, ::cudaMemPoolTrimTo, ::cudaMemPoolGetAttribute, ::cudaDeviceSetMemPool, ::cudaMemPoolSetAttribute, ::cudaMemPoolSetAccess + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceGetDefaultMemPool(cudaMemPool_t *memPool, int device); + + +/** + * \brief Sets the current memory pool of a device + * + * The memory pool must be local to the specified device. + * Unless a mempool is specified in the ::cudaMallocAsync call, + * ::cudaMallocAsync allocates from the current mempool of the provided stream's device. + * By default, a device's current memory pool is its default memory pool. + * + * \note Use ::cudaMallocFromPoolAsync to specify asynchronous allocations from a device different + * than the one the stream runs on. + * + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * ::cudaErrorInvalidDevice + * ::cudaErrorNotSupported + * \notefnerr + * \note_callback + * + * \sa ::cuDeviceSetMemPool, ::cudaDeviceGetMemPool, ::cudaDeviceGetDefaultMemPool, ::cudaMemPoolCreate, ::cudaMemPoolDestroy, ::cudaMallocFromPoolAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceSetMemPool(int device, cudaMemPool_t memPool); + +/** + * \brief Gets the current mempool for a device + * + * Returns the last pool provided to ::cudaDeviceSetMemPool for this device + * or the device's default memory pool if ::cudaDeviceSetMemPool has never been called. + * By default the current mempool is the default mempool for a device, + * otherwise the returned pool must have been set with ::cuDeviceSetMemPool or ::cudaDeviceSetMemPool. + * + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * ::cudaErrorNotSupported + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cuDeviceGetMemPool, ::cudaDeviceGetDefaultMemPool, ::cudaDeviceSetMemPool + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceGetMemPool(cudaMemPool_t *memPool, int device); + +/** + * \brief Return NvSciSync attributes that this device can support. + * + * Returns in \p nvSciSyncAttrList, the properties of NvSciSync that + * this CUDA device, \p dev can support. The returned \p nvSciSyncAttrList + * can be used to create an NvSciSync that matches this device's capabilities. + * + * If NvSciSyncAttrKey_RequiredPerm field in \p nvSciSyncAttrList is + * already set this API will return ::cudaErrorInvalidValue. + * + * The applications should set \p nvSciSyncAttrList to a valid + * NvSciSyncAttrList failing which this API will return + * ::cudaErrorInvalidHandle. + * + * The \p flags controls how applications intends to use + * the NvSciSync created from the \p nvSciSyncAttrList. The valid flags are: + * - ::cudaNvSciSyncAttrSignal, specifies that the applications intends to + * signal an NvSciSync on this CUDA device. + * - ::cudaNvSciSyncAttrWait, specifies that the applications intends to + * wait on an NvSciSync on this CUDA device. + * + * At least one of these flags must be set, failing which the API + * returns ::cudaErrorInvalidValue. Both the flags are orthogonal + * to one another: a developer may set both these flags that allows to + * set both wait and signal specific attributes in the same \p nvSciSyncAttrList. + * + * Note that this API updates the input \p nvSciSyncAttrList with values equivalent + * to the following public attribute key-values: + * NvSciSyncAttrKey_RequiredPerm is set to + * - NvSciSyncAccessPerm_SignalOnly if ::cudaNvSciSyncAttrSignal is set in \p flags. + * - NvSciSyncAccessPerm_WaitOnly if ::cudaNvSciSyncAttrWait is set in \p flags. + * - NvSciSyncAccessPerm_WaitSignal if both ::cudaNvSciSyncAttrWait and + * ::cudaNvSciSyncAttrSignal are set in \p flags. + * NvSciSyncAttrKey_PrimitiveInfo is set to + * - NvSciSyncAttrValPrimitiveType_SysmemSemaphore on any valid \p device. + * - NvSciSyncAttrValPrimitiveType_Syncpoint if \p device is a Tegra device. + * - NvSciSyncAttrValPrimitiveType_SysmemSemaphorePayload64b if \p device is GA10X+. + * NvSciSyncAttrKey_GpuId is set to the same UUID that is returned in + * \p cudaDeviceProp.uuid from ::cudaDeviceGetProperties for this \p device. + * + * \param nvSciSyncAttrList - Return NvSciSync attributes supported. + * \param device - Valid Cuda Device to get NvSciSync attributes for. + * \param flags - flags describing NvSciSync usage. + * + * \return + * + * ::cudaSuccess, + * ::cudaErrorDeviceUninitialized, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidHandle, + * ::cudaErrorInvalidDevice, + * ::cudaErrorNotSupported, + * ::cudaErrorMemoryAllocation + * + * \sa + * ::cudaImportExternalSemaphore, + * ::cudaDestroyExternalSemaphore, + * ::cudaSignalExternalSemaphoresAsync, + * ::cudaWaitExternalSemaphoresAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceGetNvSciSyncAttributes(void *nvSciSyncAttrList, int device, int flags); + +/** + * \brief Queries attributes of the link between two devices. + * + * Returns in \p *value the value of the requested attribute \p attrib of the + * link between \p srcDevice and \p dstDevice. The supported attributes are: + * - ::cudaDevP2PAttrPerformanceRank: A relative value indicating the + * performance of the link between two devices. Lower value means better + * performance (0 being the value used for most performant link). + * - ::cudaDevP2PAttrAccessSupported: 1 if peer access is enabled. + * - ::cudaDevP2PAttrNativeAtomicSupported: 1 if native atomic operations over + * the link are supported. + * - ::cudaDevP2PAttrCudaArrayAccessSupported: 1 if accessing CUDA arrays over + * the link is supported. + * + * Returns ::cudaErrorInvalidDevice if \p srcDevice or \p dstDevice are not valid + * or if they represent the same device. + * + * Returns ::cudaErrorInvalidValue if \p attrib is not valid or if \p value is + * a null pointer. + * + * \param value - Returned value of the requested attribute + * \param attrib - The requested attribute of the link between \p srcDevice and \p dstDevice. + * \param srcDevice - The source device of the target link. + * \param dstDevice - The destination device of the target link. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceEnablePeerAccess, + * ::cudaDeviceDisablePeerAccess, + * ::cudaDeviceCanAccessPeer, + * ::cuDeviceGetP2PAttribute + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetP2PAttribute(int *value, enum cudaDeviceP2PAttr attr, int srcDevice, int dstDevice); + +/** + * \brief Select compute-device which best matches criteria + * + * Returns in \p *device the device which has properties that best match + * \p *prop. + * + * \param device - Device with best match + * \param prop - Desired device properties + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaGetDeviceCount, ::cudaGetDevice, ::cudaSetDevice, + * ::cudaGetDeviceProperties, + * ::cudaInitDevice + */ +extern __host__ cudaError_t CUDARTAPI cudaChooseDevice(int *device, const struct cudaDeviceProp *prop); +/** + * \brief Initialize device to be used for GPU executions + * + * This function will initialize the CUDA Runtime structures and primary context on \p device when called, + * but the context will not be made current to \p device. + * + * When ::cudaInitDeviceFlagsAreValid is set in \p flags, deviceFlags are applied to the requested device. + * The values of deviceFlags match those of the flags parameters in ::cudaSetDeviceFlags. + * The effect may be verified by ::cudaGetDeviceFlags. + * + * This function will return an error if the device is in ::cudaComputeModeExclusiveProcess + * and is occupied by another process or if the device is in ::cudaComputeModeProhibited. + * + * \param device - Device on which the runtime will initialize itself. + * \param deviceFlags - Parameters for device operation. + * \param flags - Flags for controlling the device initialization. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaGetDeviceCount, ::cudaGetDevice, ::cudaGetDeviceProperties, + * ::cudaChooseDevice, ::cudaSetDevice + * ::cuCtxSetCurrent + */ +extern __host__ cudaError_t CUDARTAPI cudaInitDevice(int device, unsigned int deviceFlags, unsigned int flags); +/** + * \brief Set device to be used for GPU executions + * + * Sets \p device as the current device for the calling host thread. + * Valid device id's are 0 to (::cudaGetDeviceCount() - 1). + * + * Any device memory subsequently allocated from this host thread + * using ::cudaMalloc(), ::cudaMallocPitch() or ::cudaMallocArray() + * will be physically resident on \p device. Any host memory allocated + * from this host thread using ::cudaMallocHost() or ::cudaHostAlloc() + * or ::cudaHostRegister() will have its lifetime associated with + * \p device. Any streams or events created from this host thread will + * be associated with \p device. Any kernels launched from this host + * thread using the <<<>>> operator or ::cudaLaunchKernel() will be executed + * on \p device. + * + * This call may be made from any host thread, to any device, and at + * any time. This function will do no synchronization with the previous + * or new device, + * and should only take significant time when it initializes the runtime's context state. + * This call will bind the primary context of the specified device to the calling thread and all the + * subsequent memory allocations, stream and event creations, and kernel launches + * will be associated with the primary context. + * This function will also immediately initialize the runtime state on the primary context, + * and the context will be current on \p device immediately. This function will return an + * error if the device is in ::cudaComputeModeExclusiveProcess and is occupied by another + * process or if the device is in ::cudaComputeModeProhibited. + * + * It is not required to call ::cudaInitDevice before using this function. + * \param device - Device on which the active host thread should execute the + * device code. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorDeviceUnavailable, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaGetDeviceCount, ::cudaGetDevice, ::cudaGetDeviceProperties, + * ::cudaChooseDevice, + * ::cudaInitDevice, + * ::cuCtxSetCurrent + */ +extern __host__ cudaError_t CUDARTAPI cudaSetDevice(int device); + +/** + * \brief Returns which device is currently being used + * + * Returns in \p *device the current device for the calling host thread. + * + * \param device - Returns the device on which the active host thread + * executes the device code. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorDeviceUnavailable, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaGetDeviceCount, ::cudaSetDevice, ::cudaGetDeviceProperties, + * ::cudaChooseDevice, + * ::cuCtxGetCurrent + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetDevice(int *device); + +/** + * \brief Set a list of devices that can be used for CUDA + * + * Sets a list of devices for CUDA execution in priority order using + * \p device_arr. The parameter \p len specifies the number of elements in the + * list. CUDA will try devices from the list sequentially until it finds one + * that works. If this function is not called, or if it is called with a \p len + * of 0, then CUDA will go back to its default behavior of trying devices + * sequentially from a default list containing all of the available CUDA + * devices in the system. If a specified device ID in the list does not exist, + * this function will return ::cudaErrorInvalidDevice. If \p len is not 0 and + * \p device_arr is NULL or if \p len exceeds the number of devices in + * the system, then ::cudaErrorInvalidValue is returned. + * + * \param device_arr - List of devices to try + * \param len - Number of devices in specified list + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidDevice + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaGetDeviceCount, ::cudaSetDevice, ::cudaGetDeviceProperties, + * ::cudaSetDeviceFlags, + * ::cudaChooseDevice + */ +extern __host__ cudaError_t CUDARTAPI cudaSetValidDevices(int *device_arr, int len); + +/** + * \brief Sets flags to be used for device executions + * + * Records \p flags as the flags for the current device. If the current device + * has been set and that device has already been initialized, the previous flags + * are overwritten. If the current device has not been initialized, it is + * initialized with the provided flags. If no device has been made current to + * the calling thread, a default device is selected and initialized with the + * provided flags. + * + * The two LSBs of the \p flags parameter can be used to control how the CPU + * thread interacts with the OS scheduler when waiting for results from the + * device. + * + * - ::cudaDeviceScheduleAuto: The default value if the \p flags parameter is + * zero, uses a heuristic based on the number of active CUDA contexts in the + * process \p C and the number of logical processors in the system \p P. If + * \p C \> \p P, then CUDA will yield to other OS threads when waiting for the + * device, otherwise CUDA will not yield while waiting for results and + * actively spin on the processor. Additionally, on Tegra devices, + * ::cudaDeviceScheduleAuto uses a heuristic based on the power profile of + * the platform and may choose ::cudaDeviceScheduleBlockingSync for low-powered + * devices. + * - ::cudaDeviceScheduleSpin: Instruct CUDA to actively spin when waiting for + * results from the device. This can decrease latency when waiting for the + * device, but may lower the performance of CPU threads if they are performing + * work in parallel with the CUDA thread. + * - ::cudaDeviceScheduleYield: Instruct CUDA to yield its thread when waiting + * for results from the device. This can increase latency when waiting for the + * device, but can increase the performance of CPU threads performing work in + * parallel with the device. + * - ::cudaDeviceScheduleBlockingSync: Instruct CUDA to block the CPU thread + * on a synchronization primitive when waiting for the device to finish work. + * - ::cudaDeviceBlockingSync: Instruct CUDA to block the CPU thread on a + * synchronization primitive when waiting for the device to finish work.
+ * \ref deprecated "Deprecated:" This flag was deprecated as of CUDA 4.0 and + * replaced with ::cudaDeviceScheduleBlockingSync. + * - ::cudaDeviceMapHost: This flag enables allocating pinned + * host memory that is accessible to the device. It is implicit for the + * runtime but may be absent if a context is created using the driver API. + * If this flag is not set, ::cudaHostGetDevicePointer() will always return + * a failure code. + * - ::cudaDeviceLmemResizeToMax: Instruct CUDA to not reduce local memory + * after resizing local memory for a kernel. This can prevent thrashing by + * local memory allocations when launching many kernels with high local + * memory usage at the cost of potentially increased memory usage.
+ * \ref deprecated "Deprecated:" This flag is deprecated and the behavior enabled + * by this flag is now the default and cannot be disabled. + * + * \param flags - Parameters for device operation + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaGetDeviceFlags, ::cudaGetDeviceCount, ::cudaGetDevice, ::cudaGetDeviceProperties, + * ::cudaSetDevice, ::cudaSetValidDevices, + * ::cudaInitDevice, + * ::cudaChooseDevice, + * ::cuDevicePrimaryCtxSetFlags + */ +extern __host__ cudaError_t CUDARTAPI cudaSetDeviceFlags( unsigned int flags ); + +/** + * \brief Gets the flags for the current device + * + * + * Returns in \p flags the flags for the current device. If there is a current + * device for the calling thread, the flags for the device are returned. If + * there is no current device, the flags for the first device are returned, + * which may be the default flags. Compare to the behavior of + * ::cudaSetDeviceFlags. + * + * Typically, the flags returned should match the behavior that will be seen + * if the calling thread uses a device after this call, without any change to + * the flags or current device inbetween by this or another thread. Note that + * if the device is not initialized, it is possible for another thread to + * change the flags for the current device before it is initialized. + * Additionally, when using exclusive mode, if this thread has not requested a + * specific device, it may use a device other than the first device, contrary + * to the assumption made by this function. + * + * If a context has been created via the driver API and is current to the + * calling thread, the flags for that context are always returned. + * + * Flags returned by this function may specifically include ::cudaDeviceMapHost + * even though it is not accepted by ::cudaSetDeviceFlags because it is + * implicit in runtime API flags. The reason for this is that the current + * context may have been created via the driver API in which case the flag is + * not implicit and may be unset. + * + * \param flags - Pointer to store the device flags + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaGetDevice, ::cudaGetDeviceProperties, + * ::cudaSetDevice, ::cudaSetDeviceFlags, + * ::cudaInitDevice, + * ::cuCtxGetFlags, + * ::cuDevicePrimaryCtxGetState + */ +extern __host__ cudaError_t CUDARTAPI cudaGetDeviceFlags( unsigned int *flags ); +/** @} */ /* END CUDART_DEVICE */ + +/** + * \defgroup CUDART_STREAM Stream Management + * + * ___MANBRIEF___ stream management functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the stream management functions of the CUDA runtime + * application programming interface. + * + * @{ + */ + +/** + * \brief Create an asynchronous stream + * + * Creates a new asynchronous stream. + * + * \param pStream - Pointer to new stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreateWithPriority, + * ::cudaStreamCreateWithFlags, + * ::cudaStreamGetPriority, + * ::cudaStreamGetFlags, + * ::cudaStreamQuery, + * ::cudaStreamSynchronize, + * ::cudaStreamWaitEvent, + * ::cudaStreamAddCallback, + * ::cudaStreamDestroy, + * ::cuStreamCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaStreamCreate(cudaStream_t *pStream); + +/** + * \brief Create an asynchronous stream + * + * Creates a new asynchronous stream. The \p flags argument determines the + * behaviors of the stream. Valid values for \p flags are + * - ::cudaStreamDefault: Default stream creation flag. + * - ::cudaStreamNonBlocking: Specifies that work running in the created + * stream may run concurrently with work in stream 0 (the NULL stream), and that + * the created stream should perform no implicit synchronization with stream 0. + * + * \param pStream - Pointer to new stream identifier + * \param flags - Parameters for stream creation + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreate, + * ::cudaStreamCreateWithPriority, + * ::cudaStreamGetFlags, + * ::cudaStreamQuery, + * ::cudaStreamSynchronize, + * ::cudaStreamWaitEvent, + * ::cudaStreamAddCallback, + * ::cudaStreamDestroy, + * ::cuStreamCreate + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamCreateWithFlags(cudaStream_t *pStream, unsigned int flags); + +/** + * \brief Create an asynchronous stream with the specified priority + * + * Creates a stream with the specified priority and returns a handle in \p pStream. + * This API alters the scheduler priority of work in the stream. Work in a higher + * priority stream may preempt work already executing in a low priority stream. + * + * \p priority follows a convention where lower numbers represent higher priorities. + * '0' represents default priority. The range of meaningful numerical priorities can + * be queried using ::cudaDeviceGetStreamPriorityRange. If the specified priority is + * outside the numerical range returned by ::cudaDeviceGetStreamPriorityRange, + * it will automatically be clamped to the lowest or the highest number in the range. + * + * \param pStream - Pointer to new stream identifier + * \param flags - Flags for stream creation. See ::cudaStreamCreateWithFlags for a list of valid flags that can be passed + * \param priority - Priority of the stream. Lower numbers represent higher priorities. + * See ::cudaDeviceGetStreamPriorityRange for more information about + * the meaningful stream priorities that can be passed. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \note Stream priorities are supported only on GPUs + * with compute capability 3.5 or higher. + * + * \note In the current implementation, only compute kernels launched in + * priority streams are affected by the stream's priority. Stream priorities have + * no effect on host-to-device and device-to-host memory operations. + * + * \sa ::cudaStreamCreate, + * ::cudaStreamCreateWithFlags, + * ::cudaDeviceGetStreamPriorityRange, + * ::cudaStreamGetPriority, + * ::cudaStreamQuery, + * ::cudaStreamWaitEvent, + * ::cudaStreamAddCallback, + * ::cudaStreamSynchronize, + * ::cudaStreamDestroy, + * ::cuStreamCreateWithPriority + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamCreateWithPriority(cudaStream_t *pStream, unsigned int flags, int priority); + +/** + * \brief Query the priority of a stream + * + * Query the priority of a stream. The priority is returned in in \p priority. + * Note that if the stream was created with a priority outside the meaningful + * numerical range returned by ::cudaDeviceGetStreamPriorityRange, + * this function returns the clamped priority. + * See ::cudaStreamCreateWithPriority for details about priority clamping. + * + * \param hStream - Handle to the stream to be queried + * \param priority - Pointer to a signed integer in which the stream's priority is returned + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreateWithPriority, + * ::cudaDeviceGetStreamPriorityRange, + * ::cudaStreamGetFlags, + * ::cuStreamGetPriority + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamGetPriority(cudaStream_t hStream, int *priority); + +/** + * \brief Query the flags of a stream + * + * Query the flags of a stream. The flags are returned in \p flags. + * See ::cudaStreamCreateWithFlags for a list of valid flags. + * + * \param hStream - Handle to the stream to be queried + * \param flags - Pointer to an unsigned integer in which the stream's flags are returned + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreateWithPriority, + * ::cudaStreamCreateWithFlags, + * ::cudaStreamGetPriority, + * ::cuStreamGetFlags + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamGetFlags(cudaStream_t hStream, unsigned int *flags); + +/** + * \brief Query the Id of a stream + * + * Query the Id of a stream. The Id is returned in \p streamId. + * The Id is unique for the life of the program. + * + * The stream handle \p hStream can refer to any of the following: + *
    + *
  • a stream created via any of the CUDA runtime APIs such as ::cudaStreamCreate, + * ::cudaStreamCreateWithFlags and ::cudaStreamCreateWithPriority, or their driver + * API equivalents such as ::cuStreamCreate or ::cuStreamCreateWithPriority. + * Passing an invalid handle will result in undefined behavior.
  • + *
  • any of the special streams such as the NULL stream, ::cudaStreamLegacy + * and ::cudaStreamPerThread respectively. The driver API equivalents of these + * are also accepted which are NULL, ::CU_STREAM_LEGACY and ::CU_STREAM_PER_THREAD.
  • + *
+ * + * \param hStream - Handle to the stream to be queried + * \param streamId - Pointer to an unsigned long long in which the stream Id is returned + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreateWithPriority, + * ::cudaStreamCreateWithFlags, + * ::cudaStreamGetPriority, + * ::cudaStreamGetFlags, + * ::cuStreamGetId + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamGetId(cudaStream_t hStream, unsigned long long *streamId); + +/** + * \brief Resets all persisting lines in cache to normal status. + * + * Resets all persisting lines in cache to normal status. + * Takes effect on function return. + * + * \return + * ::cudaSuccess, + * \notefnerr + * + * \sa + * ::cudaAccessPolicyWindow + */ +extern __host__ cudaError_t CUDARTAPI cudaCtxResetPersistingL2Cache(void); + +/** + * \brief Copies attributes from source stream to destination stream. + * + * Copies attributes from source stream \p src to destination stream \p dst. + * Both streams must have the same context. + * + * \param[out] dst Destination stream + * \param[in] src Source stream + * For attributes see ::cudaStreamAttrID + * + * \return + * ::cudaSuccess, + * ::cudaErrorNotSupported + * \notefnerr + * + * \sa + * ::cudaAccessPolicyWindow + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamCopyAttributes(cudaStream_t dst, cudaStream_t src); + + /** + * \brief Queries stream attribute. + * + * Queries attribute \p attr from \p hStream and stores it in corresponding + * member of \p value_out. + * + * \param[in] hStream + * \param[in] attr + * \param[out] value_out + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * + * \sa + * ::cudaAccessPolicyWindow + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamGetAttribute( + cudaStream_t hStream, cudaStreamAttrID attr, + cudaStreamAttrValue *value_out); + + /** + * \brief Sets stream attribute. + * + * Sets attribute \p attr on \p hStream from corresponding attribute of + * \p value. The updated attribute will be applied to subsequent work + * submitted to the stream. It will not affect previously submitted work. + * + * \param[out] hStream + * \param[in] attr + * \param[in] value + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * + * \sa + * ::cudaAccessPolicyWindow + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamSetAttribute( + cudaStream_t hStream, cudaStreamAttrID attr, + const cudaStreamAttrValue *value); + + /** + * \brief Destroys and cleans up an asynchronous stream + * + * Destroys and cleans up the asynchronous stream specified by \p stream. + * + * In case the device is still doing work in the stream \p stream + * when ::cudaStreamDestroy() is called, the function will return immediately + * and the resources associated with \p stream will be released automatically + * once the device has completed all work in \p stream. + * + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * \note_destroy_ub + * + * \sa ::cudaStreamCreate, + * ::cudaStreamCreateWithFlags, + * ::cudaStreamQuery, + * ::cudaStreamWaitEvent, + * ::cudaStreamSynchronize, + * ::cudaStreamAddCallback, + * ::cuStreamDestroy + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamDestroy(cudaStream_t stream); + +/** + * \brief Make a compute stream wait on an event + * + * Makes all future work submitted to \p stream wait for all work captured in + * \p event. See ::cudaEventRecord() for details on what is captured by an event. + * The synchronization will be performed efficiently on the device when applicable. + * \p event may be from a different device than \p stream. + * + * flags include: + * - ::cudaEventWaitDefault: Default event creation flag. + * - ::cudaEventWaitExternal: Event is captured in the graph as an external + * event node when performing stream capture. + * + * \param stream - Stream to wait + * \param event - Event to wait on + * \param flags - Parameters for the operation(See above) + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreate, ::cudaStreamCreateWithFlags, ::cudaStreamQuery, ::cudaStreamSynchronize, ::cudaStreamAddCallback, ::cudaStreamDestroy, + * ::cuStreamWaitEvent + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamWaitEvent(cudaStream_t stream, cudaEvent_t event, unsigned int flags __dv(0)); + +/** + * Type of stream callback functions. + * \param stream The stream as passed to ::cudaStreamAddCallback, may be NULL. + * \param status ::cudaSuccess or any persistent error on the stream. + * \param userData User parameter provided at registration. + */ +typedef void (CUDART_CB *cudaStreamCallback_t)(cudaStream_t stream, cudaError_t status, void *userData); + +/** + * \brief Add a callback to a compute stream + * + * \note This function is slated for eventual deprecation and removal. If + * you do not require the callback to execute in case of a device error, + * consider using ::cudaLaunchHostFunc. Additionally, this function is not + * supported with ::cudaStreamBeginCapture and ::cudaStreamEndCapture, unlike + * ::cudaLaunchHostFunc. + * + * Adds a callback to be called on the host after all currently enqueued + * items in the stream have completed. For each + * cudaStreamAddCallback call, a callback will be executed exactly once. + * The callback will block later work in the stream until it is finished. + * + * The callback may be passed ::cudaSuccess or an error code. In the event + * of a device error, all subsequently executed callbacks will receive an + * appropriate ::cudaError_t. + * + * Callbacks must not make any CUDA API calls. Attempting to use CUDA APIs + * may result in ::cudaErrorNotPermitted. Callbacks must not perform any + * synchronization that may depend on outstanding device work or other callbacks + * that are not mandated to run earlier. Callbacks without a mandated order + * (in independent streams) execute in undefined order and may be serialized. + * + * For the purposes of Unified Memory, callback execution makes a number of + * guarantees: + *
    + *
  • The callback stream is considered idle for the duration of the + * callback. Thus, for example, a callback may always use memory attached + * to the callback stream.
  • + *
  • The start of execution of a callback has the same effect as + * synchronizing an event recorded in the same stream immediately prior to + * the callback. It thus synchronizes streams which have been "joined" + * prior to the callback.
  • + *
  • Adding device work to any stream does not have the effect of making + * the stream active until all preceding callbacks have executed. Thus, for + * example, a callback might use global attached memory even if work has + * been added to another stream, if it has been properly ordered with an + * event.
  • + *
  • Completion of a callback does not cause a stream to become + * active except as described above. The callback stream will remain idle + * if no device work follows the callback, and will remain idle across + * consecutive callbacks without device work in between. Thus, for example, + * stream synchronization can be done by signaling from a callback at the + * end of the stream.
  • + *
+ * + * \param stream - Stream to add callback to + * \param callback - The function to call once preceding stream operations are complete + * \param userData - User specified data to be passed to the callback function + * \param flags - Reserved for future use, must be 0 + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorInvalidValue, + * ::cudaErrorNotSupported + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreate, ::cudaStreamCreateWithFlags, ::cudaStreamQuery, ::cudaStreamSynchronize, ::cudaStreamWaitEvent, ::cudaStreamDestroy, ::cudaMallocManaged, ::cudaStreamAttachMemAsync, + * ::cudaLaunchHostFunc, ::cuStreamAddCallback + */ +extern __host__ cudaError_t CUDARTAPI cudaStreamAddCallback(cudaStream_t stream, + cudaStreamCallback_t callback, void *userData, unsigned int flags); + +/** + * \brief Waits for stream tasks to complete + * + * Blocks until \p stream has completed all operations. If the + * ::cudaDeviceScheduleBlockingSync flag was set for this device, + * the host thread will block until the stream is finished with + * all of its tasks. + * + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreate, ::cudaStreamCreateWithFlags, ::cudaStreamQuery, ::cudaStreamWaitEvent, ::cudaStreamAddCallback, ::cudaStreamDestroy, + * ::cuStreamSynchronize + */ +extern __host__ cudaError_t CUDARTAPI cudaStreamSynchronize(cudaStream_t stream); + +/** + * \brief Queries an asynchronous stream for completion status + * + * Returns ::cudaSuccess if all operations in \p stream have + * completed, or ::cudaErrorNotReady if not. + * + * For the purposes of Unified Memory, a return value of ::cudaSuccess + * is equivalent to having called ::cudaStreamSynchronize(). + * + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorNotReady, + * ::cudaErrorInvalidResourceHandle + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreate, ::cudaStreamCreateWithFlags, ::cudaStreamWaitEvent, ::cudaStreamSynchronize, ::cudaStreamAddCallback, ::cudaStreamDestroy, + * ::cuStreamQuery + */ +extern __host__ cudaError_t CUDARTAPI cudaStreamQuery(cudaStream_t stream); + +/** + * \brief Attach memory to a stream asynchronously + * + * Enqueues an operation in \p stream to specify stream association of + * \p length bytes of memory starting from \p devPtr. This function is a + * stream-ordered operation, meaning that it is dependent on, and will + * only take effect when, previous work in stream has completed. Any + * previous association is automatically replaced. + * + * \p devPtr must point to an one of the following types of memories: + * - managed memory declared using the __managed__ keyword or allocated with + * ::cudaMallocManaged. + * - a valid host-accessible region of system-allocated pageable memory. This + * type of memory may only be specified if the device associated with the + * stream reports a non-zero value for the device attribute + * ::cudaDevAttrPageableMemoryAccess. + * + * For managed allocations, \p length must be either zero or the entire + * allocation's size. Both indicate that the entire allocation's stream + * association is being changed. Currently, it is not possible to change stream + * association for a portion of a managed allocation. + * + * For pageable allocations, \p length must be non-zero. + * + * The stream association is specified using \p flags which must be + * one of ::cudaMemAttachGlobal, ::cudaMemAttachHost or ::cudaMemAttachSingle. + * The default value for \p flags is ::cudaMemAttachSingle + * If the ::cudaMemAttachGlobal flag is specified, the memory can be accessed + * by any stream on any device. + * If the ::cudaMemAttachHost flag is specified, the program makes a guarantee + * that it won't access the memory on the device from any stream on a device that + * has a zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess. + * If the ::cudaMemAttachSingle flag is specified and \p stream is associated with + * a device that has a zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess, + * the program makes a guarantee that it will only access the memory on the device + * from \p stream. It is illegal to attach singly to the NULL stream, because the + * NULL stream is a virtual global stream and not a specific stream. An error will + * be returned in this case. + * + * When memory is associated with a single stream, the Unified Memory system will + * allow CPU access to this memory region so long as all operations in \p stream + * have completed, regardless of whether other streams are active. In effect, + * this constrains exclusive ownership of the managed memory region by + * an active GPU to per-stream activity instead of whole-GPU activity. + * + * Accessing memory on the device from streams that are not associated with + * it will produce undefined results. No error checking is performed by the + * Unified Memory system to ensure that kernels launched into other streams + * do not access this region. + * + * It is a program's responsibility to order calls to ::cudaStreamAttachMemAsync + * via events, synchronization or other means to ensure legal access to memory + * at all times. Data visibility and coherency will be changed appropriately + * for all kernels which follow a stream-association change. + * + * If \p stream is destroyed while data is associated with it, the association is + * removed and the association reverts to the default visibility of the allocation + * as specified at ::cudaMallocManaged. For __managed__ variables, the default + * association is always ::cudaMemAttachGlobal. Note that destroying a stream is an + * asynchronous operation, and as a result, the change to default association won't + * happen until all work in the stream has completed. + * + * \param stream - Stream in which to enqueue the attach operation + * \param devPtr - Pointer to memory (must be a pointer to managed memory or + * to a valid host-accessible region of system-allocated + * memory) + * \param length - Length of memory (defaults to zero) + * \param flags - Must be one of ::cudaMemAttachGlobal, ::cudaMemAttachHost or ::cudaMemAttachSingle (defaults to ::cudaMemAttachSingle) + * + * \return + * ::cudaSuccess, + * ::cudaErrorNotReady, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreate, ::cudaStreamCreateWithFlags, ::cudaStreamWaitEvent, ::cudaStreamSynchronize, ::cudaStreamAddCallback, ::cudaStreamDestroy, ::cudaMallocManaged, + * ::cuStreamAttachMemAsync + */ +#if defined(__cplusplus) +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamAttachMemAsync(cudaStream_t stream, void *devPtr, size_t length __dv(0), unsigned int flags = cudaMemAttachSingle); +#else +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamAttachMemAsync(cudaStream_t stream, void *devPtr, size_t length __dv(0), unsigned int flags); +#endif + +/** + * \brief Begins graph capture on a stream + * + * Begin graph capture on \p stream. When a stream is in capture mode, all operations + * pushed into the stream will not be executed, but will instead be captured into + * a graph, which will be returned via ::cudaStreamEndCapture. Capture may not be initiated + * if \p stream is ::cudaStreamLegacy. Capture must be ended on the same stream in which + * it was initiated, and it may only be initiated if the stream is not already in capture + * mode. The capture mode may be queried via ::cudaStreamIsCapturing. A unique id + * representing the capture sequence may be queried via ::cudaStreamGetCaptureInfo. + * + * If \p mode is not ::cudaStreamCaptureModeRelaxed, ::cudaStreamEndCapture must be + * called on this stream from the same thread. + * + * \note Kernels captured using this API must not use texture and surface references. + * Reading or writing through any texture or surface reference is undefined + * behavior. This restriction does not apply to texture and surface objects. + * + * \param stream - Stream in which to initiate capture + * \param mode - Controls the interaction of this capture sequence with other API + * calls that are potentially unsafe. For more details see + * ::cudaThreadExchangeStreamCaptureMode. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * + * \sa + * ::cudaStreamCreate, + * ::cudaStreamIsCapturing, + * ::cudaStreamEndCapture, + * ::cudaThreadExchangeStreamCaptureMode + */ +extern __host__ cudaError_t CUDARTAPI cudaStreamBeginCapture(cudaStream_t stream, enum cudaStreamCaptureMode mode); + +/** + * \brief Swaps the stream capture interaction mode for a thread + * + * Sets the calling thread's stream capture interaction mode to the value contained + * in \p *mode, and overwrites \p *mode with the previous mode for the thread. To + * facilitate deterministic behavior across function or module boundaries, callers + * are encouraged to use this API in a push-pop fashion: \code + cudaStreamCaptureMode mode = desiredMode; + cudaThreadExchangeStreamCaptureMode(&mode); + ... + cudaThreadExchangeStreamCaptureMode(&mode); // restore previous mode + * \endcode + * + * During stream capture (see ::cudaStreamBeginCapture), some actions, such as a call + * to ::cudaMalloc, may be unsafe. In the case of ::cudaMalloc, the operation is + * not enqueued asynchronously to a stream, and is not observed by stream capture. + * Therefore, if the sequence of operations captured via ::cudaStreamBeginCapture + * depended on the allocation being replayed whenever the graph is launched, the + * captured graph would be invalid. + * + * Therefore, stream capture places restrictions on API calls that can be made within + * or concurrently to a ::cudaStreamBeginCapture-::cudaStreamEndCapture sequence. This + * behavior can be controlled via this API and flags to ::cudaStreamBeginCapture. + * + * A thread's mode is one of the following: + * - \p cudaStreamCaptureModeGlobal: This is the default mode. If the local thread has + * an ongoing capture sequence that was not initiated with + * \p cudaStreamCaptureModeRelaxed at \p cuStreamBeginCapture, or if any other thread + * has a concurrent capture sequence initiated with \p cudaStreamCaptureModeGlobal, + * this thread is prohibited from potentially unsafe API calls. + * - \p cudaStreamCaptureModeThreadLocal: If the local thread has an ongoing capture + * sequence not initiated with \p cudaStreamCaptureModeRelaxed, it is prohibited + * from potentially unsafe API calls. Concurrent capture sequences in other threads + * are ignored. + * - \p cudaStreamCaptureModeRelaxed: The local thread is not prohibited from potentially + * unsafe API calls. Note that the thread is still prohibited from API calls which + * necessarily conflict with stream capture, for example, attempting ::cudaEventQuery + * on an event that was last recorded inside a capture sequence. + * + * \param mode - Pointer to mode value to swap with the current mode + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * + * \sa + * ::cudaStreamBeginCapture + */ +extern __host__ cudaError_t CUDARTAPI cudaThreadExchangeStreamCaptureMode(enum cudaStreamCaptureMode *mode); + +/** + * \brief Ends capture on a stream, returning the captured graph + * + * End capture on \p stream, returning the captured graph via \p pGraph. + * Capture must have been initiated on \p stream via a call to ::cudaStreamBeginCapture. + * If capture was invalidated, due to a violation of the rules of stream capture, then + * a NULL graph will be returned. + * + * If the \p mode argument to ::cudaStreamBeginCapture was not + * ::cudaStreamCaptureModeRelaxed, this call must be from the same thread as + * ::cudaStreamBeginCapture. + * + * \param stream - Stream to query + * \param pGraph - The captured graph + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorStreamCaptureWrongThread + * \notefnerr + * + * \sa + * ::cudaStreamCreate, + * ::cudaStreamBeginCapture, + * ::cudaStreamIsCapturing + */ +extern __host__ cudaError_t CUDARTAPI cudaStreamEndCapture(cudaStream_t stream, cudaGraph_t *pGraph); + +/** + * \brief Returns a stream's capture status + * + * Return the capture status of \p stream via \p pCaptureStatus. After a successful + * call, \p *pCaptureStatus will contain one of the following: + * - ::cudaStreamCaptureStatusNone: The stream is not capturing. + * - ::cudaStreamCaptureStatusActive: The stream is capturing. + * - ::cudaStreamCaptureStatusInvalidated: The stream was capturing but an error + * has invalidated the capture sequence. The capture sequence must be terminated + * with ::cudaStreamEndCapture on the stream where it was initiated in order to + * continue using \p stream. + * + * Note that, if this is called on ::cudaStreamLegacy (the "null stream") while + * a blocking stream on the same device is capturing, it will return + * ::cudaErrorStreamCaptureImplicit and \p *pCaptureStatus is unspecified + * after the call. The blocking stream capture is not invalidated. + * + * When a blocking stream is capturing, the legacy stream is in an + * unusable state until the blocking stream capture is terminated. The legacy + * stream is not supported for stream capture, but attempted use would have an + * implicit dependency on the capturing stream(s). + * + * \param stream - Stream to query + * \param pCaptureStatus - Returns the stream's capture status + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorStreamCaptureImplicit + * \notefnerr + * + * \sa + * ::cudaStreamCreate, + * ::cudaStreamBeginCapture, + * ::cudaStreamEndCapture + */ +extern __host__ cudaError_t CUDARTAPI cudaStreamIsCapturing(cudaStream_t stream, enum cudaStreamCaptureStatus *pCaptureStatus); + +/** + * \brief Query a stream's capture state + * + * Query stream state related to stream capture. + * + * If called on ::cudaStreamLegacy (the "null stream") while a stream not created + * with ::cudaStreamNonBlocking is capturing, returns ::cudaErrorStreamCaptureImplicit. + * + * Valid data (other than capture status) is returned only if both of the following are true: + * - the call returns cudaSuccess + * - the returned capture status is ::cudaStreamCaptureStatusActive + * + * \param stream - The stream to query + * \param captureStatus_out - Location to return the capture status of the stream; required + * \param id_out - Optional location to return an id for the capture sequence, which is + * unique over the lifetime of the process + * \param graph_out - Optional location to return the graph being captured into. All + * operations other than destroy and node removal are permitted on the graph + * while the capture sequence is in progress. This API does not transfer + * ownership of the graph, which is transferred or destroyed at + * ::cudaStreamEndCapture. Note that the graph handle may be invalidated before + * end of capture for certain errors. Nodes that are or become + * unreachable from the original stream at ::cudaStreamEndCapture due to direct + * actions on the graph do not trigger ::cudaErrorStreamCaptureUnjoined. + * \param dependencies_out - Optional location to store a pointer to an array of nodes. + * The next node to be captured in the stream will depend on this set of nodes, + * absent operations such as event wait which modify this set. The array pointer + * is valid until the next API call which operates on the stream or until end of + * capture. The node handles may be copied out and are valid until they or the + * graph is destroyed. The driver-owned array may also be passed directly to + * APIs that operate on the graph (not the stream) without copying. + * \param numDependencies_out - Optional location to store the size of the array + * returned in dependencies_out. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorStreamCaptureImplicit + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cudaStreamBeginCapture, + * ::cudaStreamIsCapturing, + * ::cudaStreamUpdateCaptureDependencies + */ +extern __host__ cudaError_t CUDARTAPI cudaStreamGetCaptureInfo(cudaStream_t stream, enum cudaStreamCaptureStatus *captureStatus_out, unsigned long long *id_out __dv(0), cudaGraph_t *graph_out __dv(0), const cudaGraphNode_t **dependencies_out __dv(0), size_t *numDependencies_out __dv(0)); + +/** + * \brief Update the set of dependencies in a capturing stream (11.3+) + * + * Modifies the dependency set of a capturing stream. The dependency set is the set + * of nodes that the next captured node in the stream will depend on. + * + * Valid flags are ::cudaStreamAddCaptureDependencies and + * ::cudaStreamSetCaptureDependencies. These control whether the set passed to + * the API is added to the existing set or replaces it. A flags value of 0 defaults + * to ::cudaStreamAddCaptureDependencies. + * + * Nodes that are removed from the dependency set via this API do not result in + * ::cudaErrorStreamCaptureUnjoined if they are unreachable from the stream at + * ::cudaStreamEndCapture. + * + * Returns ::cudaErrorIllegalState if the stream is not capturing. + * + * This API is new in CUDA 11.3. Developers requiring compatibility across minor + * versions of the CUDA driver to 11.0 should not use this API or provide a fallback. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorIllegalState + * \notefnerr + * + * \sa + * ::cudaStreamBeginCapture, + * ::cudaStreamGetCaptureInfo, + */ +extern __host__ cudaError_t CUDARTAPI cudaStreamUpdateCaptureDependencies(cudaStream_t stream, cudaGraphNode_t *dependencies, size_t numDependencies, unsigned int flags __dv(0)); +/** @} */ /* END CUDART_STREAM */ + +/** + * \defgroup CUDART_EVENT Event Management + * + * ___MANBRIEF___ event management functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the event management functions of the CUDA runtime + * application programming interface. + * + * @{ + */ + +/** + * \brief Creates an event object + * + * Creates an event object for the current device using ::cudaEventDefault. + * + * \param event - Newly created event + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorLaunchFailure, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa \ref ::cudaEventCreate(cudaEvent_t*, unsigned int) "cudaEventCreate (C++ API)", + * ::cudaEventCreateWithFlags, ::cudaEventRecord, ::cudaEventQuery, + * ::cudaEventSynchronize, ::cudaEventDestroy, ::cudaEventElapsedTime, + * ::cudaStreamWaitEvent, + * ::cuEventCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaEventCreate(cudaEvent_t *event); + +/** + * \brief Creates an event object with the specified flags + * + * Creates an event object for the current device with the specified flags. Valid + * flags include: + * - ::cudaEventDefault: Default event creation flag. + * - ::cudaEventBlockingSync: Specifies that event should use blocking + * synchronization. A host thread that uses ::cudaEventSynchronize() to wait + * on an event created with this flag will block until the event actually + * completes. + * - ::cudaEventDisableTiming: Specifies that the created event does not need + * to record timing data. Events created with this flag specified and + * the ::cudaEventBlockingSync flag not specified will provide the best + * performance when used with ::cudaStreamWaitEvent() and ::cudaEventQuery(). + * - ::cudaEventInterprocess: Specifies that the created event may be used as an + * interprocess event by ::cudaIpcGetEventHandle(). ::cudaEventInterprocess must + * be specified along with ::cudaEventDisableTiming. + * + * \param event - Newly created event + * \param flags - Flags for new event + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorLaunchFailure, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa \ref ::cudaEventCreate(cudaEvent_t*) "cudaEventCreate (C API)", + * ::cudaEventSynchronize, ::cudaEventDestroy, ::cudaEventElapsedTime, + * ::cudaStreamWaitEvent, + * ::cuEventCreate + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventCreateWithFlags(cudaEvent_t *event, unsigned int flags); + +/** + * \brief Records an event + * + * Captures in \p event the contents of \p stream at the time of this call. + * \p event and \p stream must be on the same CUDA context. + * Calls such as ::cudaEventQuery() or ::cudaStreamWaitEvent() will then + * examine or wait for completion of the work that was captured. Uses of + * \p stream after this call do not modify \p event. See note on default + * stream behavior for what is captured in the default case. + * + * ::cudaEventRecord() can be called multiple times on the same event and + * will overwrite the previously captured state. Other APIs such as + * ::cudaStreamWaitEvent() use the most recently captured state at the time + * of the API call, and are not affected by later calls to + * ::cudaEventRecord(). Before the first call to ::cudaEventRecord(), an + * event represents an empty set of work, so for example ::cudaEventQuery() + * would return ::cudaSuccess. + * + * \param event - Event to record + * \param stream - Stream in which to record event + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorLaunchFailure + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa \ref ::cudaEventCreate(cudaEvent_t*) "cudaEventCreate (C API)", + * ::cudaEventCreateWithFlags, ::cudaEventQuery, + * ::cudaEventSynchronize, ::cudaEventDestroy, ::cudaEventElapsedTime, + * ::cudaStreamWaitEvent, + * ::cudaEventRecordWithFlags, + * ::cuEventRecord + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecord(cudaEvent_t event, cudaStream_t stream __dv(0)); + +/** + * \brief Records an event + * + * Captures in \p event the contents of \p stream at the time of this call. + * \p event and \p stream must be on the same CUDA context. + * Calls such as ::cudaEventQuery() or ::cudaStreamWaitEvent() will then + * examine or wait for completion of the work that was captured. Uses of + * \p stream after this call do not modify \p event. See note on default + * stream behavior for what is captured in the default case. + * + * ::cudaEventRecordWithFlags() can be called multiple times on the same event and + * will overwrite the previously captured state. Other APIs such as + * ::cudaStreamWaitEvent() use the most recently captured state at the time + * of the API call, and are not affected by later calls to + * ::cudaEventRecordWithFlags(). Before the first call to ::cudaEventRecordWithFlags(), an + * event represents an empty set of work, so for example ::cudaEventQuery() + * would return ::cudaSuccess. + * + * flags include: + * - ::cudaEventRecordDefault: Default event creation flag. + * - ::cudaEventRecordExternal: Event is captured in the graph as an external + * event node when performing stream capture. + * + * \param event - Event to record + * \param stream - Stream in which to record event + * \param flags - Parameters for the operation(See above) + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorLaunchFailure + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa \ref ::cudaEventCreate(cudaEvent_t*) "cudaEventCreate (C API)", + * ::cudaEventCreateWithFlags, ::cudaEventQuery, + * ::cudaEventSynchronize, ::cudaEventDestroy, ::cudaEventElapsedTime, + * ::cudaStreamWaitEvent, + * ::cudaEventRecord, + * ::cuEventRecord, + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecordWithFlags(cudaEvent_t event, cudaStream_t stream __dv(0), unsigned int flags __dv(0)); +#endif + +/** + * \brief Queries an event's status + * + * Queries the status of all work currently captured by \p event. See + * ::cudaEventRecord() for details on what is captured by an event. + * + * Returns ::cudaSuccess if all captured work has been completed, or + * ::cudaErrorNotReady if any captured work is incomplete. + * + * For the purposes of Unified Memory, a return value of ::cudaSuccess + * is equivalent to having called ::cudaEventSynchronize(). + * + * \param event - Event to query + * + * \return + * ::cudaSuccess, + * ::cudaErrorNotReady, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorLaunchFailure + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa \ref ::cudaEventCreate(cudaEvent_t*) "cudaEventCreate (C API)", + * ::cudaEventCreateWithFlags, ::cudaEventRecord, + * ::cudaEventSynchronize, ::cudaEventDestroy, ::cudaEventElapsedTime, + * ::cuEventQuery + */ +extern __host__ cudaError_t CUDARTAPI cudaEventQuery(cudaEvent_t event); + +/** + * \brief Waits for an event to complete + * + * Waits until the completion of all work currently captured in \p event. + * See ::cudaEventRecord() for details on what is captured by an event. + * + * Waiting for an event that was created with the ::cudaEventBlockingSync + * flag will cause the calling CPU thread to block until the event has + * been completed by the device. If the ::cudaEventBlockingSync flag has + * not been set, then the CPU thread will busy-wait until the event has + * been completed by the device. + * + * \param event - Event to wait for + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorLaunchFailure + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa \ref ::cudaEventCreate(cudaEvent_t*) "cudaEventCreate (C API)", + * ::cudaEventCreateWithFlags, ::cudaEventRecord, + * ::cudaEventQuery, ::cudaEventDestroy, ::cudaEventElapsedTime, + * ::cuEventSynchronize + */ +extern __host__ cudaError_t CUDARTAPI cudaEventSynchronize(cudaEvent_t event); + +/** + * \brief Destroys an event object + * + * Destroys the event specified by \p event. + * + * An event may be destroyed before it is complete (i.e., while + * ::cudaEventQuery() would return ::cudaErrorNotReady). In this case, the + * call does not block on completion of the event, and any associated + * resources will automatically be released asynchronously at completion. + * + * \param event - Event to destroy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorLaunchFailure + * \notefnerr + * \note_init_rt + * \note_callback + * \note_destroy_ub + * + * \sa \ref ::cudaEventCreate(cudaEvent_t*) "cudaEventCreate (C API)", + * ::cudaEventCreateWithFlags, ::cudaEventQuery, + * ::cudaEventSynchronize, ::cudaEventRecord, ::cudaEventElapsedTime, + * ::cuEventDestroy + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventDestroy(cudaEvent_t event); + +/** + * \brief Computes the elapsed time between events + * + * Computes the elapsed time between two events (in milliseconds with a + * resolution of around 0.5 microseconds). + * + * If either event was last recorded in a non-NULL stream, the resulting time + * may be greater than expected (even if both used the same stream handle). This + * happens because the ::cudaEventRecord() operation takes place asynchronously + * and there is no guarantee that the measured latency is actually just between + * the two events. Any number of other different stream operations could execute + * in between the two measured events, thus altering the timing in a significant + * way. + * + * If ::cudaEventRecord() has not been called on either event, then + * ::cudaErrorInvalidResourceHandle is returned. If ::cudaEventRecord() has been + * called on both events but one or both of them has not yet been completed + * (that is, ::cudaEventQuery() would return ::cudaErrorNotReady on at least one + * of the events), ::cudaErrorNotReady is returned. If either event was created + * with the ::cudaEventDisableTiming flag, then this function will return + * ::cudaErrorInvalidResourceHandle. + * + * \param ms - Time between \p start and \p end in ms + * \param start - Starting event + * \param end - Ending event + * + * \return + * ::cudaSuccess, + * ::cudaErrorNotReady, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorLaunchFailure, + * ::cudaErrorUnknown + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa \ref ::cudaEventCreate(cudaEvent_t*) "cudaEventCreate (C API)", + * ::cudaEventCreateWithFlags, ::cudaEventQuery, + * ::cudaEventSynchronize, ::cudaEventDestroy, ::cudaEventRecord, + * ::cuEventElapsedTime + */ +extern __host__ cudaError_t CUDARTAPI cudaEventElapsedTime(float *ms, cudaEvent_t start, cudaEvent_t end); + +/** @} */ /* END CUDART_EVENT */ + +/** + * \defgroup CUDART_EXTRES_INTEROP External Resource Interoperability + * + * ___MANBRIEF___ External resource interoperability functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the external resource interoperability functions of the CUDA + * runtime application programming interface. + * + * @{ + */ + +/** + * \brief Imports an external memory object + * + * Imports an externally allocated memory object and returns + * a handle to that in \p extMem_out. + * + * The properties of the handle being imported must be described in + * \p memHandleDesc. The ::cudaExternalMemoryHandleDesc structure + * is defined as follows: + * + * \code + typedef struct cudaExternalMemoryHandleDesc_st { + cudaExternalMemoryHandleType type; + union { + int fd; + struct { + void *handle; + const void *name; + } win32; + const void *nvSciBufObject; + } handle; + unsigned long long size; + unsigned int flags; + } cudaExternalMemoryHandleDesc; + * \endcode + * + * where ::cudaExternalMemoryHandleDesc::type specifies the type + * of handle being imported. ::cudaExternalMemoryHandleType is + * defined as: + * + * \code + typedef enum cudaExternalMemoryHandleType_enum { + cudaExternalMemoryHandleTypeOpaqueFd = 1, + cudaExternalMemoryHandleTypeOpaqueWin32 = 2, + cudaExternalMemoryHandleTypeOpaqueWin32Kmt = 3, + cudaExternalMemoryHandleTypeD3D12Heap = 4, + cudaExternalMemoryHandleTypeD3D12Resource = 5, + cudaExternalMemoryHandleTypeD3D11Resource = 6, + cudaExternalMemoryHandleTypeD3D11ResourceKmt = 7, + cudaExternalMemoryHandleTypeNvSciBuf = 8 + } cudaExternalMemoryHandleType; + * \endcode + * + * If ::cudaExternalMemoryHandleDesc::type is + * ::cudaExternalMemoryHandleTypeOpaqueFd, then + * ::cudaExternalMemoryHandleDesc::handle::fd must be a valid + * file descriptor referencing a memory object. Ownership of + * the file descriptor is transferred to the CUDA driver when the + * handle is imported successfully. Performing any operations on the + * file descriptor after it is imported results in undefined behavior. + * + * If ::cudaExternalMemoryHandleDesc::type is + * ::cudaExternalMemoryHandleTypeOpaqueWin32, then exactly one + * of ::cudaExternalMemoryHandleDesc::handle::win32::handle and + * ::cudaExternalMemoryHandleDesc::handle::win32::name must not be + * NULL. If ::cudaExternalMemoryHandleDesc::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * references a memory object. Ownership of this handle is + * not transferred to CUDA after the import operation, so the + * application must release the handle using the appropriate system + * call. If ::cudaExternalMemoryHandleDesc::handle::win32::name + * is not NULL, then it must point to a NULL-terminated array of + * UTF-16 characters that refers to a memory object. + * + * If ::cudaExternalMemoryHandleDesc::type is + * ::cudaExternalMemoryHandleTypeOpaqueWin32Kmt, then + * ::cudaExternalMemoryHandleDesc::handle::win32::handle must + * be non-NULL and + * ::cudaExternalMemoryHandleDesc::handle::win32::name + * must be NULL. The handle specified must be a globally shared KMT + * handle. This handle does not hold a reference to the underlying + * object, and thus will be invalid when all references to the + * memory object are destroyed. + * + * If ::cudaExternalMemoryHandleDesc::type is + * ::cudaExternalMemoryHandleTypeD3D12Heap, then exactly one + * of ::cudaExternalMemoryHandleDesc::handle::win32::handle and + * ::cudaExternalMemoryHandleDesc::handle::win32::name must not be + * NULL. If ::cudaExternalMemoryHandleDesc::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * is returned by ID3D12Device::CreateSharedHandle when referring to a + * ID3D12Heap object. This handle holds a reference to the underlying + * object. If ::cudaExternalMemoryHandleDesc::handle::win32::name + * is not NULL, then it must point to a NULL-terminated array of + * UTF-16 characters that refers to a ID3D12Heap object. + * + * If ::cudaExternalMemoryHandleDesc::type is + * ::cudaExternalMemoryHandleTypeD3D12Resource, then exactly one + * of ::cudaExternalMemoryHandleDesc::handle::win32::handle and + * ::cudaExternalMemoryHandleDesc::handle::win32::name must not be + * NULL. If ::cudaExternalMemoryHandleDesc::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * is returned by ID3D12Device::CreateSharedHandle when referring to a + * ID3D12Resource object. This handle holds a reference to the + * underlying object. If + * ::cudaExternalMemoryHandleDesc::handle::win32::name + * is not NULL, then it must point to a NULL-terminated array of + * UTF-16 characters that refers to a ID3D12Resource object. + * + * If ::cudaExternalMemoryHandleDesc::type is + * ::cudaExternalMemoryHandleTypeD3D11Resource,then exactly one + * of ::cudaExternalMemoryHandleDesc::handle::win32::handle and + * ::cudaExternalMemoryHandleDesc::handle::win32::name must not be + * NULL. If ::cudaExternalMemoryHandleDesc::handle::win32::handle is + * not NULL, then it must represent a valid shared NT handle that is + * returned by IDXGIResource1::CreateSharedHandle when referring to a + * ID3D11Resource object. If + * ::cudaExternalMemoryHandleDesc::handle::win32::name + * is not NULL, then it must point to a NULL-terminated array of + * UTF-16 characters that refers to a ID3D11Resource object. + * + * If ::cudaExternalMemoryHandleDesc::type is + * ::cudaExternalMemoryHandleTypeD3D11ResourceKmt, then + * ::cudaExternalMemoryHandleDesc::handle::win32::handle must + * be non-NULL and ::cudaExternalMemoryHandleDesc::handle::win32::name + * must be NULL. The handle specified must be a valid shared KMT + * handle that is returned by IDXGIResource::GetSharedHandle when + * referring to a ID3D11Resource object. + * + * If ::cudaExternalMemoryHandleDesc::type is + * ::cudaExternalMemoryHandleTypeNvSciBuf, then + * ::cudaExternalMemoryHandleDesc::handle::nvSciBufObject must be NON-NULL + * and reference a valid NvSciBuf object. + * If the NvSciBuf object imported into CUDA is also mapped by other drivers, then the + * application must use ::cudaWaitExternalSemaphoresAsync or ::cudaSignalExternalSemaphoresAsync + * as approprriate barriers to maintain coherence between CUDA and the other drivers. + * See ::cudaExternalSemaphoreWaitSkipNvSciBufMemSync and ::cudaExternalSemaphoreSignalSkipNvSciBufMemSync + * for memory synchronization. + * + * The size of the memory object must be specified in + * ::cudaExternalMemoryHandleDesc::size. + * + * Specifying the flag ::cudaExternalMemoryDedicated in + * ::cudaExternalMemoryHandleDesc::flags indicates that the + * resource is a dedicated resource. The definition of what a + * dedicated resource is outside the scope of this extension. + * This flag must be set if ::cudaExternalMemoryHandleDesc::type + * is one of the following: + * ::cudaExternalMemoryHandleTypeD3D12Resource + * ::cudaExternalMemoryHandleTypeD3D11Resource + * ::cudaExternalMemoryHandleTypeD3D11ResourceKmt + * + * \param extMem_out - Returned handle to an external memory object + * \param memHandleDesc - Memory import handle descriptor + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorOperatingSystem + * \notefnerr + * \note_init_rt + * \note_callback + * + * \note If the Vulkan memory imported into CUDA is mapped on the CPU then the + * application must use vkInvalidateMappedMemoryRanges/vkFlushMappedMemoryRanges + * as well as appropriate Vulkan pipeline barriers to maintain coherence between + * CPU and GPU. For more information on these APIs, please refer to "Synchronization + * and Cache Control" chapter from Vulkan specification. + * + * + * \sa ::cudaDestroyExternalMemory, + * ::cudaExternalMemoryGetMappedBuffer, + * ::cudaExternalMemoryGetMappedMipmappedArray + */ +extern __host__ cudaError_t CUDARTAPI cudaImportExternalMemory(cudaExternalMemory_t *extMem_out, const struct cudaExternalMemoryHandleDesc *memHandleDesc); + +/** + * \brief Maps a buffer onto an imported memory object + * + * Maps a buffer onto an imported memory object and returns a device + * pointer in \p devPtr. + * + * The properties of the buffer being mapped must be described in + * \p bufferDesc. The ::cudaExternalMemoryBufferDesc structure is + * defined as follows: + * + * \code + typedef struct cudaExternalMemoryBufferDesc_st { + unsigned long long offset; + unsigned long long size; + unsigned int flags; + } cudaExternalMemoryBufferDesc; + * \endcode + * + * where ::cudaExternalMemoryBufferDesc::offset is the offset in + * the memory object where the buffer's base address is. + * ::cudaExternalMemoryBufferDesc::size is the size of the buffer. + * ::cudaExternalMemoryBufferDesc::flags must be zero. + * + * The offset and size have to be suitably aligned to match the + * requirements of the external API. Mapping two buffers whose ranges + * overlap may or may not result in the same virtual address being + * returned for the overlapped portion. In such cases, the application + * must ensure that all accesses to that region from the GPU are + * volatile. Otherwise writes made via one address are not guaranteed + * to be visible via the other address, even if they're issued by the + * same thread. It is recommended that applications map the combined + * range instead of mapping separate buffers and then apply the + * appropriate offsets to the returned pointer to derive the + * individual buffers. + * + * The returned pointer \p devPtr must be freed using ::cudaFree. + * + * \param devPtr - Returned device pointer to buffer + * \param extMem - Handle to external memory object + * \param bufferDesc - Buffer descriptor + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaImportExternalMemory, + * ::cudaDestroyExternalMemory, + * ::cudaExternalMemoryGetMappedMipmappedArray + */ +extern __host__ cudaError_t CUDARTAPI cudaExternalMemoryGetMappedBuffer(void **devPtr, cudaExternalMemory_t extMem, const struct cudaExternalMemoryBufferDesc *bufferDesc); + +/** + * \brief Maps a CUDA mipmapped array onto an external memory object + * + * Maps a CUDA mipmapped array onto an external object and returns a + * handle to it in \p mipmap. + * + * The properties of the CUDA mipmapped array being mapped must be + * described in \p mipmapDesc. The structure + * ::cudaExternalMemoryMipmappedArrayDesc is defined as follows: + * + * \code + typedef struct cudaExternalMemoryMipmappedArrayDesc_st { + unsigned long long offset; + cudaChannelFormatDesc formatDesc; + cudaExtent extent; + unsigned int flags; + unsigned int numLevels; + } cudaExternalMemoryMipmappedArrayDesc; + * \endcode + * + * where ::cudaExternalMemoryMipmappedArrayDesc::offset is the + * offset in the memory object where the base level of the mipmap + * chain is. + * ::cudaExternalMemoryMipmappedArrayDesc::formatDesc describes the + * format of the data. + * ::cudaExternalMemoryMipmappedArrayDesc::extent specifies the + * dimensions of the base level of the mipmap chain. + * ::cudaExternalMemoryMipmappedArrayDesc::flags are flags associated + * with CUDA mipmapped arrays. For further details, please refer to + * the documentation for ::cudaMalloc3DArray. Note that if the mipmapped + * array is bound as a color target in the graphics API, then the flag + * ::cudaArrayColorAttachment must be specified in + * ::cudaExternalMemoryMipmappedArrayDesc::flags. + * ::cudaExternalMemoryMipmappedArrayDesc::numLevels specifies + * the total number of levels in the mipmap chain. + * + * The returned CUDA mipmapped array must be freed using ::cudaFreeMipmappedArray. + * + * \param mipmap - Returned CUDA mipmapped array + * \param extMem - Handle to external memory object + * \param mipmapDesc - CUDA array descriptor + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaImportExternalMemory, + * ::cudaDestroyExternalMemory, + * ::cudaExternalMemoryGetMappedBuffer + * + * \note If ::cudaExternalMemoryHandleDesc::type is + * ::cudaExternalMemoryHandleTypeNvSciBuf, then + * ::cudaExternalMemoryMipmappedArrayDesc::numLevels must not be greater than 1. + */ +extern __host__ cudaError_t CUDARTAPI cudaExternalMemoryGetMappedMipmappedArray(cudaMipmappedArray_t *mipmap, cudaExternalMemory_t extMem, const struct cudaExternalMemoryMipmappedArrayDesc *mipmapDesc); + +/** + * \brief Destroys an external memory object. + * + * Destroys the specified external memory object. Any existing buffers + * and CUDA mipmapped arrays mapped onto this object must no longer be + * used and must be explicitly freed using ::cudaFree and + * ::cudaFreeMipmappedArray respectively. + * + * \param extMem - External memory object to be destroyed + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * \note_init_rt + * \note_callback + * \note_destroy_ub + * + * \sa ::cudaImportExternalMemory, + * ::cudaExternalMemoryGetMappedBuffer, + * ::cudaExternalMemoryGetMappedMipmappedArray + */ +extern __host__ cudaError_t CUDARTAPI cudaDestroyExternalMemory(cudaExternalMemory_t extMem); + +/** + * \brief Imports an external semaphore + * + * Imports an externally allocated synchronization object and returns + * a handle to that in \p extSem_out. + * + * The properties of the handle being imported must be described in + * \p semHandleDesc. The ::cudaExternalSemaphoreHandleDesc is defined + * as follows: + * + * \code + typedef struct cudaExternalSemaphoreHandleDesc_st { + cudaExternalSemaphoreHandleType type; + union { + int fd; + struct { + void *handle; + const void *name; + } win32; + const void* NvSciSyncObj; + } handle; + unsigned int flags; + } cudaExternalSemaphoreHandleDesc; + * \endcode + * + * where ::cudaExternalSemaphoreHandleDesc::type specifies the type of + * handle being imported. ::cudaExternalSemaphoreHandleType is defined + * as: + * + * \code + typedef enum cudaExternalSemaphoreHandleType_enum { + cudaExternalSemaphoreHandleTypeOpaqueFd = 1, + cudaExternalSemaphoreHandleTypeOpaqueWin32 = 2, + cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt = 3, + cudaExternalSemaphoreHandleTypeD3D12Fence = 4, + cudaExternalSemaphoreHandleTypeD3D11Fence = 5, + cudaExternalSemaphoreHandleTypeNvSciSync = 6, + cudaExternalSemaphoreHandleTypeKeyedMutex = 7, + cudaExternalSemaphoreHandleTypeKeyedMutexKmt = 8, + cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd = 9, + cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32 = 10 + } cudaExternalSemaphoreHandleType; + * \endcode + * + * If ::cudaExternalSemaphoreHandleDesc::type is + * ::cudaExternalSemaphoreHandleTypeOpaqueFd, then + * ::cudaExternalSemaphoreHandleDesc::handle::fd must be a valid file + * descriptor referencing a synchronization object. Ownership of the + * file descriptor is transferred to the CUDA driver when the handle + * is imported successfully. Performing any operations on the file + * descriptor after it is imported results in undefined behavior. + * + * If ::cudaExternalSemaphoreHandleDesc::type is + * ::cudaExternalSemaphoreHandleTypeOpaqueWin32, then exactly one of + * ::cudaExternalSemaphoreHandleDesc::handle::win32::handle and + * ::cudaExternalSemaphoreHandleDesc::handle::win32::name must not be + * NULL. If ::cudaExternalSemaphoreHandleDesc::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * references a synchronization object. Ownership of this handle is + * not transferred to CUDA after the import operation, so the + * application must release the handle using the appropriate system + * call. If ::cudaExternalSemaphoreHandleDesc::handle::win32::name is + * not NULL, then it must name a valid synchronization object. + * + * If ::cudaExternalSemaphoreHandleDesc::type is + * ::cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt, then + * ::cudaExternalSemaphoreHandleDesc::handle::win32::handle must be + * non-NULL and ::cudaExternalSemaphoreHandleDesc::handle::win32::name + * must be NULL. The handle specified must be a globally shared KMT + * handle. This handle does not hold a reference to the underlying + * object, and thus will be invalid when all references to the + * synchronization object are destroyed. + * + * If ::cudaExternalSemaphoreHandleDesc::type is + * ::cudaExternalSemaphoreHandleTypeD3D12Fence, then exactly one of + * ::cudaExternalSemaphoreHandleDesc::handle::win32::handle and + * ::cudaExternalSemaphoreHandleDesc::handle::win32::name must not be + * NULL. If ::cudaExternalSemaphoreHandleDesc::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * is returned by ID3D12Device::CreateSharedHandle when referring to a + * ID3D12Fence object. This handle holds a reference to the underlying + * object. If ::cudaExternalSemaphoreHandleDesc::handle::win32::name + * is not NULL, then it must name a valid synchronization object that + * refers to a valid ID3D12Fence object. + * + * If ::cudaExternalSemaphoreHandleDesc::type is + * ::cudaExternalSemaphoreHandleTypeD3D11Fence, then exactly one of + * ::cudaExternalSemaphoreHandleDesc::handle::win32::handle and + * ::cudaExternalSemaphoreHandleDesc::handle::win32::name must not be + * NULL. If ::cudaExternalSemaphoreHandleDesc::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * is returned by ID3D11Fence::CreateSharedHandle. If + * ::cudaExternalSemaphoreHandleDesc::handle::win32::name + * is not NULL, then it must name a valid synchronization object that + * refers to a valid ID3D11Fence object. + * + * If ::cudaExternalSemaphoreHandleDesc::type is + * ::cudaExternalSemaphoreHandleTypeNvSciSync, then + * ::cudaExternalSemaphoreHandleDesc::handle::nvSciSyncObj + * represents a valid NvSciSyncObj. + * + * ::cudaExternalSemaphoreHandleTypeKeyedMutex, then exactly one of + * ::cudaExternalSemaphoreHandleDesc::handle::win32::handle and + * ::cudaExternalSemaphoreHandleDesc::handle::win32::name must not be + * NULL. If ::cudaExternalSemaphoreHandleDesc::handle::win32::handle + * is not NULL, then it represent a valid shared NT handle that + * is returned by IDXGIResource1::CreateSharedHandle when referring to + * a IDXGIKeyedMutex object. + * + * If ::cudaExternalSemaphoreHandleDesc::type is + * ::cudaExternalSemaphoreHandleTypeKeyedMutexKmt, then + * ::cudaExternalSemaphoreHandleDesc::handle::win32::handle must be + * non-NULL and ::cudaExternalSemaphoreHandleDesc::handle::win32::name + * must be NULL. The handle specified must represent a valid KMT + * handle that is returned by IDXGIResource::GetSharedHandle when + * referring to a IDXGIKeyedMutex object. + * + * If ::cudaExternalSemaphoreHandleDesc::type is + * ::cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd, then + * ::cudaExternalSemaphoreHandleDesc::handle::fd must be a valid file + * descriptor referencing a synchronization object. Ownership of the + * file descriptor is transferred to the CUDA driver when the handle + * is imported successfully. Performing any operations on the file + * descriptor after it is imported results in undefined behavior. + * + * If ::cudaExternalSemaphoreHandleDesc::type is + * ::cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32, then exactly one of + * ::cudaExternalSemaphoreHandleDesc::handle::win32::handle and + * ::cudaExternalSemaphoreHandleDesc::handle::win32::name must not be + * NULL. If ::cudaExternalSemaphoreHandleDesc::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * references a synchronization object. Ownership of this handle is + * not transferred to CUDA after the import operation, so the + * application must release the handle using the appropriate system + * call. If ::cudaExternalSemaphoreHandleDesc::handle::win32::name is + * not NULL, then it must name a valid synchronization object. + * + * \param extSem_out - Returned handle to an external semaphore + * \param semHandleDesc - Semaphore import handle descriptor + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorOperatingSystem + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDestroyExternalSemaphore, + * ::cudaSignalExternalSemaphoresAsync, + * ::cudaWaitExternalSemaphoresAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaImportExternalSemaphore(cudaExternalSemaphore_t *extSem_out, const struct cudaExternalSemaphoreHandleDesc *semHandleDesc); + +/** + * \brief Signals a set of external semaphore objects + * + * Enqueues a signal operation on a set of externally allocated + * semaphore object in the specified stream. The operations will be + * executed when all prior operations in the stream complete. + * + * The exact semantics of signaling a semaphore depends on the type of + * the object. + * + * If the semaphore object is any one of the following types: + * ::cudaExternalSemaphoreHandleTypeOpaqueFd, + * ::cudaExternalSemaphoreHandleTypeOpaqueWin32, + * ::cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt + * then signaling the semaphore will set it to the signaled state. + * + * If the semaphore object is any one of the following types: + * ::cudaExternalSemaphoreHandleTypeD3D12Fence, + * ::cudaExternalSemaphoreHandleTypeD3D11Fence, + * ::cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd, + * ::cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32 + * then the semaphore will be set to the value specified in + * ::cudaExternalSemaphoreSignalParams::params::fence::value. + * + * If the semaphore object is of the type ::cudaExternalSemaphoreHandleTypeNvSciSync + * this API sets ::cudaExternalSemaphoreSignalParams::params::nvSciSync::fence to a + * value that can be used by subsequent waiters of the same NvSciSync object to + * order operations with those currently submitted in \p stream. Such an update + * will overwrite previous contents of + * ::cudaExternalSemaphoreSignalParams::params::nvSciSync::fence. By default, + * signaling such an external semaphore object causes appropriate memory synchronization + * operations to be performed over all the external memory objects that are imported as + * ::cudaExternalMemoryHandleTypeNvSciBuf. This ensures that any subsequent accesses + * made by other importers of the same set of NvSciBuf memory object(s) are coherent. + * These operations can be skipped by specifying the flag + * ::cudaExternalSemaphoreSignalSkipNvSciBufMemSync, which can be used as a + * performance optimization when data coherency is not required. But specifying this + * flag in scenarios where data coherency is required results in undefined behavior. + * Also, for semaphore object of the type ::cudaExternalSemaphoreHandleTypeNvSciSync, + * if the NvSciSyncAttrList used to create the NvSciSyncObj had not set the flags in + * ::cudaDeviceGetNvSciSyncAttributes to cudaNvSciSyncAttrSignal, this API will return + * cudaErrorNotSupported. + * + * ::cudaExternalSemaphoreSignalParams::params::nvSciSync::fence associated with + * semaphore object of the type ::cudaExternalSemaphoreHandleTypeNvSciSync can be + * deterministic. For this the NvSciSyncAttrList used to create the semaphore object + * must have value of NvSciSyncAttrKey_RequireDeterministicFences key set to true. + * Deterministic fences allow users to enqueue a wait over the semaphore object even + * before corresponding signal is enqueued. For such a semaphore object, CUDA guarantees + * that each signal operation will increment the fence value by '1'. Users are expected + * to track count of signals enqueued on the semaphore object and insert waits accordingly. + * When such a semaphore object is signaled from multiple streams, due to concurrent + * stream execution, it is possible that the order in which the semaphore gets signaled + * is indeterministic. This could lead to waiters of the semaphore getting unblocked + * incorrectly. Users are expected to handle such situations, either by not using the + * same semaphore object with deterministic fence support enabled in different streams + * or by adding explicit dependency amongst such streams so that the semaphore is + * signaled in order. + * + * If the semaphore object is any one of the following types: + * ::cudaExternalSemaphoreHandleTypeKeyedMutex, + * ::cudaExternalSemaphoreHandleTypeKeyedMutexKmt, + * then the keyed mutex will be released with the key specified in + * ::cudaExternalSemaphoreSignalParams::params::keyedmutex::key. + * + * \param extSemArray - Set of external semaphores to be signaled + * \param paramsArray - Array of semaphore parameters + * \param numExtSems - Number of semaphores to signal + * \param stream - Stream to enqueue the signal operations in + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaImportExternalSemaphore, + * ::cudaDestroyExternalSemaphore, + * ::cudaWaitExternalSemaphoresAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaSignalExternalSemaphoresAsync(const cudaExternalSemaphore_t *extSemArray, const struct cudaExternalSemaphoreSignalParams *paramsArray, unsigned int numExtSems, cudaStream_t stream __dv(0)); + +/** + * \brief Waits on a set of external semaphore objects + * + * Enqueues a wait operation on a set of externally allocated + * semaphore object in the specified stream. The operations will be + * executed when all prior operations in the stream complete. + * + * The exact semantics of waiting on a semaphore depends on the type + * of the object. + * + * If the semaphore object is any one of the following types: + * ::cudaExternalSemaphoreHandleTypeOpaqueFd, + * ::cudaExternalSemaphoreHandleTypeOpaqueWin32, + * ::cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt + * then waiting on the semaphore will wait until the semaphore reaches + * the signaled state. The semaphore will then be reset to the + * unsignaled state. Therefore for every signal operation, there can + * only be one wait operation. + * + * If the semaphore object is any one of the following types: + * ::cudaExternalSemaphoreHandleTypeD3D12Fence, + * ::cudaExternalSemaphoreHandleTypeD3D11Fence, + * ::cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd, + * ::cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32 + * then waiting on the semaphore will wait until the value of the + * semaphore is greater than or equal to + * ::cudaExternalSemaphoreWaitParams::params::fence::value. + * + * If the semaphore object is of the type ::cudaExternalSemaphoreHandleTypeNvSciSync + * then, waiting on the semaphore will wait until the + * ::cudaExternalSemaphoreSignalParams::params::nvSciSync::fence is signaled by the + * signaler of the NvSciSyncObj that was associated with this semaphore object. + * By default, waiting on such an external semaphore object causes appropriate + * memory synchronization operations to be performed over all external memory objects + * that are imported as ::cudaExternalMemoryHandleTypeNvSciBuf. This ensures that + * any subsequent accesses made by other importers of the same set of NvSciBuf memory + * object(s) are coherent. These operations can be skipped by specifying the flag + * ::cudaExternalSemaphoreWaitSkipNvSciBufMemSync, which can be used as a + * performance optimization when data coherency is not required. But specifying this + * flag in scenarios where data coherency is required results in undefined behavior. + * Also, for semaphore object of the type ::cudaExternalSemaphoreHandleTypeNvSciSync, + * if the NvSciSyncAttrList used to create the NvSciSyncObj had not set the flags in + * ::cudaDeviceGetNvSciSyncAttributes to cudaNvSciSyncAttrWait, this API will return + * cudaErrorNotSupported. + * + * If the semaphore object is any one of the following types: + * ::cudaExternalSemaphoreHandleTypeKeyedMutex, + * ::cudaExternalSemaphoreHandleTypeKeyedMutexKmt, + * then the keyed mutex will be acquired when it is released with the key specified + * in ::cudaExternalSemaphoreSignalParams::params::keyedmutex::key or + * until the timeout specified by + * ::cudaExternalSemaphoreSignalParams::params::keyedmutex::timeoutMs + * has lapsed. The timeout interval can either be a finite value + * specified in milliseconds or an infinite value. In case an infinite + * value is specified the timeout never elapses. The windows INFINITE + * macro must be used to specify infinite timeout + * + * \param extSemArray - External semaphores to be waited on + * \param paramsArray - Array of semaphore parameters + * \param numExtSems - Number of semaphores to wait on + * \param stream - Stream to enqueue the wait operations in + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle + * ::cudaErrorTimeout + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaImportExternalSemaphore, + * ::cudaDestroyExternalSemaphore, + * ::cudaSignalExternalSemaphoresAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaWaitExternalSemaphoresAsync(const cudaExternalSemaphore_t *extSemArray, const struct cudaExternalSemaphoreWaitParams *paramsArray, unsigned int numExtSems, cudaStream_t stream __dv(0)); + +/** + * \brief Destroys an external semaphore + * + * Destroys an external semaphore object and releases any references + * to the underlying resource. Any outstanding signals or waits must + * have completed before the semaphore is destroyed. + * + * \param extSem - External semaphore to be destroyed + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * \note_init_rt + * \note_callback + * \note_destroy_ub + * + * \sa ::cudaImportExternalSemaphore, + * ::cudaSignalExternalSemaphoresAsync, + * ::cudaWaitExternalSemaphoresAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaDestroyExternalSemaphore(cudaExternalSemaphore_t extSem); + +/** @} */ /* END CUDART_EXTRES_INTEROP */ + +/** + * \defgroup CUDART_EXECUTION Execution Control + * + * ___MANBRIEF___ execution control functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the execution control functions of the CUDA runtime + * application programming interface. + * + * Some functions have overloaded C++ API template versions documented separately in the + * \ref CUDART_HIGHLEVEL "C++ API Routines" module. + * + * @{ + */ + +/** + * \brief Launches a device function + * + * The function invokes kernel \p func on \p gridDim (\p gridDim.x × \p gridDim.y + * × \p gridDim.z) grid of blocks. Each block contains \p blockDim (\p blockDim.x × + * \p blockDim.y × \p blockDim.z) threads. + * + * If the kernel has N parameters the \p args should point to array of N pointers. + * Each pointer, from args[0] to args[N - 1], point to the region + * of memory from which the actual parameter will be copied. + * + * For templated functions, pass the function symbol as follows: + * func_name + * + * \p sharedMem sets the amount of dynamic shared memory that will be available to + * each thread block. + * + * \p stream specifies a stream the invocation is associated to. + * + * \param func - Device function symbol + * \param gridDim - Grid dimentions + * \param blockDim - Block dimentions + * \param args - Arguments + * \param sharedMem - Shared memory + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidConfiguration, + * ::cudaErrorLaunchFailure, + * ::cudaErrorLaunchTimeout, + * ::cudaErrorLaunchOutOfResources, + * ::cudaErrorSharedObjectInitFailed, + * ::cudaErrorInvalidPtx, + * ::cudaErrorUnsupportedPtxVersion, + * ::cudaErrorNoKernelImageForDevice, + * ::cudaErrorJitCompilerNotFound, + * ::cudaErrorJitCompilationDisabled + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * \ref ::cudaLaunchKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C++ API)", + * ::cuLaunchKernel + */ +extern __host__ cudaError_t CUDARTAPI cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream); + +/** + * \brief Launches a CUDA function with launch-time configuration + * + * Note that the functionally equivalent variadic template ::cudaLaunchKernelEx + * is available for C++11 and newer. + * + * Invokes the kernel \p func on \p config->gridDim (\p config->gridDim.x + * × \p config->gridDim.y × \p config->gridDim.z) grid of blocks. + * Each block contains \p config->blockDim (\p config->blockDim.x × + * \p config->blockDim.y × \p config->blockDim.z) threads. + * + * \p config->dynamicSmemBytes sets the amount of dynamic shared memory that + * will be available to each thread block. + * + * \p config->stream specifies a stream the invocation is associated to. + * + * Configuration beyond grid and block dimensions, dynamic shared memory size, + * and stream can be provided with the following two fields of \p config: + * + * \p config->attrs is an array of \p config->numAttrs contiguous + * ::cudaLaunchAttribute elements. The value of this pointer is not considered + * if \p config->numAttrs is zero. However, in that case, it is recommended to + * set the pointer to NULL. + * \p config->numAttrs is the number of attributes populating the first + * \p config->numAttrs positions of the \p config->attrs array. + * + * If the kernel has N parameters the \p args should point to array of N + * pointers. Each pointer, from args[0] to args[N - 1], point + * to the region of memory from which the actual parameter will be copied. + * + * N.B. This function is so named to avoid unintentionally invoking the + * templated version, \p cudaLaunchKernelEx, for kernels taking a single + * void** or void* parameter. + * + * \param config - Launch configuration + * \param func - Kernel to launch + * \param args - Array of pointers to kernel parameters + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidConfiguration, + * ::cudaErrorLaunchFailure, + * ::cudaErrorLaunchTimeout, + * ::cudaErrorLaunchOutOfResources, + * ::cudaErrorSharedObjectInitFailed, + * ::cudaErrorInvalidPtx, + * ::cudaErrorUnsupportedPtxVersion, + * ::cudaErrorNoKernelImageForDevice, + * ::cudaErrorJitCompilerNotFound, + * ::cudaErrorJitCompilationDisabled + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * \ref ::cudaLaunchKernelEx(const cudaLaunchConfig_t *config, void (*kernel)(ExpTypes...), ActTypes &&... args) "cudaLaunchKernelEx (C++ API)", + * ::cuLaunchKernelEx + */ +extern __host__ cudaError_t CUDARTAPI cudaLaunchKernelExC(const cudaLaunchConfig_t *config, const void *func, void **args); + +/** + * \brief Launches a device function where thread blocks can cooperate and synchronize as they execute + * + * The function invokes kernel \p func on \p gridDim (\p gridDim.x × \p gridDim.y + * × \p gridDim.z) grid of blocks. Each block contains \p blockDim (\p blockDim.x × + * \p blockDim.y × \p blockDim.z) threads. + * + * The device on which this kernel is invoked must have a non-zero value for + * the device attribute ::cudaDevAttrCooperativeLaunch. + * + * The total number of blocks launched cannot exceed the maximum number of blocks per + * multiprocessor as returned by ::cudaOccupancyMaxActiveBlocksPerMultiprocessor (or + * ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times the number of multiprocessors + * as specified by the device attribute ::cudaDevAttrMultiProcessorCount. + * + * The kernel cannot make use of CUDA dynamic parallelism. + * + * If the kernel has N parameters the \p args should point to array of N pointers. + * Each pointer, from args[0] to args[N - 1], point to the region + * of memory from which the actual parameter will be copied. + * + * For templated functions, pass the function symbol as follows: + * func_name + * + * \p sharedMem sets the amount of dynamic shared memory that will be available to + * each thread block. + * + * \p stream specifies a stream the invocation is associated to. + * + * \param func - Device function symbol + * \param gridDim - Grid dimentions + * \param blockDim - Block dimentions + * \param args - Arguments + * \param sharedMem - Shared memory + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidConfiguration, + * ::cudaErrorLaunchFailure, + * ::cudaErrorLaunchTimeout, + * ::cudaErrorLaunchOutOfResources, + * ::cudaErrorCooperativeLaunchTooLarge, + * ::cudaErrorSharedObjectInitFailed + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * \ref ::cudaLaunchCooperativeKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchCooperativeKernel (C++ API)", + * ::cudaLaunchCooperativeKernelMultiDevice, + * ::cuLaunchCooperativeKernel + */ +extern __host__ cudaError_t CUDARTAPI cudaLaunchCooperativeKernel(const void *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream); + +/** + * \brief Launches device functions on multiple devices where thread blocks can cooperate and synchronize as they execute + * + * \deprecated This function is deprecated as of CUDA 11.3. + * + * Invokes kernels as specified in the \p launchParamsList array where each element + * of the array specifies all the parameters required to perform a single kernel launch. + * These kernels can cooperate and synchronize as they execute. The size of the array is + * specified by \p numDevices. + * + * No two kernels can be launched on the same device. All the devices targeted by this + * multi-device launch must be identical. All devices must have a non-zero value for the + * device attribute ::cudaDevAttrCooperativeMultiDeviceLaunch. + * + * The same kernel must be launched on all devices. Note that any __device__ or __constant__ + * variables are independently instantiated on every device. It is the application's + * responsiblity to ensure these variables are initialized and used appropriately. + * + * The size of the grids as specified in blocks, the size of the blocks themselves and the + * amount of shared memory used by each thread block must also match across all launched kernels. + * + * The streams used to launch these kernels must have been created via either ::cudaStreamCreate + * or ::cudaStreamCreateWithPriority or ::cudaStreamCreateWithPriority. The NULL stream or + * ::cudaStreamLegacy or ::cudaStreamPerThread cannot be used. + * + * The total number of blocks launched per kernel cannot exceed the maximum number of blocks + * per multiprocessor as returned by ::cudaOccupancyMaxActiveBlocksPerMultiprocessor (or + * ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times the number of multiprocessors + * as specified by the device attribute ::cudaDevAttrMultiProcessorCount. Since the + * total number of blocks launched per device has to match across all devices, the maximum + * number of blocks that can be launched per device will be limited by the device with the + * least number of multiprocessors. + * + * The kernel cannot make use of CUDA dynamic parallelism. + * + * The ::cudaLaunchParams structure is defined as: + * \code + struct cudaLaunchParams + { + void *func; + dim3 gridDim; + dim3 blockDim; + void **args; + size_t sharedMem; + cudaStream_t stream; + }; + * \endcode + * where: + * - ::cudaLaunchParams::func specifies the kernel to be launched. This same functions must + * be launched on all devices. For templated functions, pass the function symbol as follows: + * func_name + * - ::cudaLaunchParams::gridDim specifies the width, height and depth of the grid in blocks. + * This must match across all kernels launched. + * - ::cudaLaunchParams::blockDim is the width, height and depth of each thread block. This + * must match across all kernels launched. + * - ::cudaLaunchParams::args specifies the arguments to the kernel. If the kernel has + * N parameters then ::cudaLaunchParams::args should point to array of N pointers. Each + * pointer, from ::cudaLaunchParams::args[0] to ::cudaLaunchParams::args[N - 1], + * point to the region of memory from which the actual parameter will be copied. + * - ::cudaLaunchParams::sharedMem is the dynamic shared-memory size per thread block in bytes. + * This must match across all kernels launched. + * - ::cudaLaunchParams::stream is the handle to the stream to perform the launch in. This cannot + * be the NULL stream or ::cudaStreamLegacy or ::cudaStreamPerThread. + * + * By default, the kernel won't begin execution on any GPU until all prior work in all the specified + * streams has completed. This behavior can be overridden by specifying the flag + * ::cudaCooperativeLaunchMultiDeviceNoPreSync. When this flag is specified, each kernel + * will only wait for prior work in the stream corresponding to that GPU to complete before it begins + * execution. + * + * Similarly, by default, any subsequent work pushed in any of the specified streams will not begin + * execution until the kernels on all GPUs have completed. This behavior can be overridden by specifying + * the flag ::cudaCooperativeLaunchMultiDeviceNoPostSync. When this flag is specified, + * any subsequent work pushed in any of the specified streams will only wait for the kernel launched + * on the GPU corresponding to that stream to complete before it begins execution. + * + * \param launchParamsList - List of launch parameters, one per device + * \param numDevices - Size of the \p launchParamsList array + * \param flags - Flags to control launch behavior + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidConfiguration, + * ::cudaErrorLaunchFailure, + * ::cudaErrorLaunchTimeout, + * ::cudaErrorLaunchOutOfResources, + * ::cudaErrorCooperativeLaunchTooLarge, + * ::cudaErrorSharedObjectInitFailed + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * \ref ::cudaLaunchCooperativeKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchCooperativeKernel (C++ API)", + * ::cudaLaunchCooperativeKernel, + * ::cuLaunchCooperativeKernelMultiDevice + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaLaunchCooperativeKernelMultiDevice(struct cudaLaunchParams *launchParamsList, unsigned int numDevices, unsigned int flags __dv(0)); + +/** + * \brief Sets the preferred cache configuration for a device function + * + * On devices where the L1 cache and shared memory use the same hardware + * resources, this sets through \p cacheConfig the preferred cache configuration + * for the function specified via \p func. This is only a preference. The + * runtime will use the requested configuration if possible, but it is free to + * choose a different configuration if required to execute \p func. + * + * \p func is a device function symbol and must be declared as a + * \c __global__ function. If the specified function does not exist, + * then ::cudaErrorInvalidDeviceFunction is returned. For templated functions, + * pass the function symbol as follows: func_name + * + * This setting does nothing on devices where the size of the L1 cache and + * shared memory are fixed. + * + * Launching a kernel with a different preference than the most recent + * preference setting may insert a device-side synchronization point. + * + * The supported cache configurations are: + * - ::cudaFuncCachePreferNone: no preference for shared memory or L1 (default) + * - ::cudaFuncCachePreferShared: prefer larger shared memory and smaller L1 cache + * - ::cudaFuncCachePreferL1: prefer larger L1 cache and smaller shared memory + * - ::cudaFuncCachePreferEqual: prefer equal size L1 cache and shared memory + * + * \param func - Device function symbol + * \param cacheConfig - Requested cache configuration + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction + * \notefnerr + * \note_string_api_deprecation2 + * \note_init_rt + * \note_callback + * + * \sa + * \ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C++ API)", + * \ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, const void*) "cudaFuncGetAttributes (C API)", + * \ref ::cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C API)", + * ::cuFuncSetCacheConfig + */ +extern __host__ cudaError_t CUDARTAPI cudaFuncSetCacheConfig(const void *func, enum cudaFuncCache cacheConfig); + +/** + * \brief Sets the shared memory configuration for a device function + * + * On devices with configurable shared memory banks, this function will + * force all subsequent launches of the specified device function to have + * the given shared memory bank size configuration. On any given launch of the + * function, the shared memory configuration of the device will be temporarily + * changed if needed to suit the function's preferred configuration. Changes in + * shared memory configuration between subsequent launches of functions, + * may introduce a device side synchronization point. + * + * Any per-function setting of shared memory bank size set via + * ::cudaFuncSetSharedMemConfig will override the device wide setting set by + * ::cudaDeviceSetSharedMemConfig. + * + * Changing the shared memory bank size will not increase shared memory usage + * or affect occupancy of kernels, but may have major effects on performance. + * Larger bank sizes will allow for greater potential bandwidth to shared memory, + * but will change what kinds of accesses to shared memory will result in bank + * conflicts. + * + * This function will do nothing on devices with fixed shared memory bank size. + * + * For templated functions, pass the function symbol as follows: + * func_name + * + * The supported bank configurations are: + * - ::cudaSharedMemBankSizeDefault: use the device's shared memory configuration + * when launching this function. + * - ::cudaSharedMemBankSizeFourByte: set shared memory bank width to be + * four bytes natively when launching this function. + * - ::cudaSharedMemBankSizeEightByte: set shared memory bank width to be eight + * bytes natively when launching this function. + * + * \param func - Device function symbol + * \param config - Requested shared memory configuration + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * \notefnerr + * \note_string_api_deprecation2 + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceSetSharedMemConfig, + * ::cudaDeviceGetSharedMemConfig, + * ::cudaDeviceSetCacheConfig, + * ::cudaDeviceGetCacheConfig, + * ::cudaFuncSetCacheConfig, + * ::cuFuncSetSharedMemConfig + */ +extern __host__ cudaError_t CUDARTAPI cudaFuncSetSharedMemConfig(const void *func, enum cudaSharedMemConfig config); + +/** + * \brief Find out attributes for a given function + * + * This function obtains the attributes of a function specified via \p func. + * \p func is a device function symbol and must be declared as a + * \c __global__ function. The fetched attributes are placed in \p attr. + * If the specified function does not exist, then + * ::cudaErrorInvalidDeviceFunction is returned. For templated functions, pass + * the function symbol as follows: func_name + * + * Note that some function attributes such as + * \ref ::cudaFuncAttributes::maxThreadsPerBlock "maxThreadsPerBlock" + * may vary based on the device that is currently being used. + * + * \param attr - Return pointer to function's attributes + * \param func - Device function symbol + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction + * \notefnerr + * \note_string_api_deprecation2 + * \note_init_rt + * \note_callback + * + * \sa + * \ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C API)", + * \ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, T*) "cudaFuncGetAttributes (C++ API)", + * \ref ::cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C API)", + * ::cuFuncGetAttribute + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaFuncGetAttributes(struct cudaFuncAttributes *attr, const void *func); + + +/** + * \brief Set attributes for a given function + * + * This function sets the attributes of a function specified via \p func. + * The parameter \p func must be a pointer to a function that executes + * on the device. The parameter specified by \p func must be declared as a \p __global__ + * function. The enumeration defined by \p attr is set to the value defined by \p value. + * If the specified function does not exist, then ::cudaErrorInvalidDeviceFunction is returned. + * If the specified attribute cannot be written, or if the value is incorrect, + * then ::cudaErrorInvalidValue is returned. + * + * Valid values for \p attr are: + * - ::cudaFuncAttributeMaxDynamicSharedMemorySize - The requested maximum size in bytes of dynamically-allocated shared memory. The sum of this value and the function attribute ::sharedSizeBytes + * cannot exceed the device attribute ::cudaDevAttrMaxSharedMemoryPerBlockOptin. The maximal size of requestable dynamic shared memory may differ by GPU architecture. + * - ::cudaFuncAttributePreferredSharedMemoryCarveout - On devices where the L1 cache and shared memory use the same hardware resources, + * this sets the shared memory carveout preference, in percent of the total shared memory. See ::cudaDevAttrMaxSharedMemoryPerMultiprocessor. + * This is only a hint, and the driver can choose a different ratio if required to execute the function. + * + * \param func - Function to get attributes of + * \param attr - Attribute to set + * \param value - Value to set + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \ref ::cudaLaunchKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C++ API)", + * \ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C++ API)", + * \ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, const void*) "cudaFuncGetAttributes (C API)", + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaFuncSetAttribute(const void *func, enum cudaFuncAttribute attr, int value); + +/** + * \brief Converts a double argument to be executed on a device + * + * \param d - Double to convert + * + * \deprecated This function is deprecated as of CUDA 7.5 + * + * Converts the double value of \p d to an internal float representation if + * the device does not support double arithmetic. If the device does natively + * support doubles, then this function does nothing. + * + * \return + * ::cudaSuccess + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * \ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C API)", + * \ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, const void*) "cudaFuncGetAttributes (C API)", + * ::cudaSetDoubleForHost + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaSetDoubleForDevice(double *d); + +/** + * \brief Converts a double argument after execution on a device + * + * \deprecated This function is deprecated as of CUDA 7.5 + * + * Converts the double value of \p d from a potentially internal float + * representation if the device does not support double arithmetic. If the + * device does natively support doubles, then this function does nothing. + * + * \param d - Double to convert + * + * \return + * ::cudaSuccess + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * \ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C API)", + * \ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, const void*) "cudaFuncGetAttributes (C API)", + * ::cudaSetDoubleForDevice + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaSetDoubleForHost(double *d); + +/** + * \brief Enqueues a host function call in a stream + * + * Enqueues a host function to run in a stream. The function will be called + * after currently enqueued work and will block work added after it. + * + * The host function must not make any CUDA API calls. Attempting to use a + * CUDA API may result in ::cudaErrorNotPermitted, but this is not required. + * The host function must not perform any synchronization that may depend on + * outstanding CUDA work not mandated to run earlier. Host functions without a + * mandated order (such as in independent streams) execute in undefined order + * and may be serialized. + * + * For the purposes of Unified Memory, execution makes a number of guarantees: + *
    + *
  • The stream is considered idle for the duration of the function's + * execution. Thus, for example, the function may always use memory attached + * to the stream it was enqueued in.
  • + *
  • The start of execution of the function has the same effect as + * synchronizing an event recorded in the same stream immediately prior to + * the function. It thus synchronizes streams which have been "joined" + * prior to the function.
  • + *
  • Adding device work to any stream does not have the effect of making + * the stream active until all preceding host functions and stream callbacks + * have executed. Thus, for + * example, a function might use global attached memory even if work has + * been added to another stream, if the work has been ordered behind the + * function call with an event.
  • + *
  • Completion of the function does not cause a stream to become + * active except as described above. The stream will remain idle + * if no device work follows the function, and will remain idle across + * consecutive host functions or stream callbacks without device work in + * between. Thus, for example, + * stream synchronization can be done by signaling from a host function at the + * end of the stream.
  • + *
+ * + * Note that, in constrast to ::cuStreamAddCallback, the function will not be + * called in the event of an error in the CUDA context. + * + * \param hStream - Stream to enqueue function call in + * \param fn - The function to call once preceding stream operations are complete + * \param userData - User-specified data to be passed to the function + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorInvalidValue, + * ::cudaErrorNotSupported + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreate, + * ::cudaStreamQuery, + * ::cudaStreamSynchronize, + * ::cudaStreamWaitEvent, + * ::cudaStreamDestroy, + * ::cudaMallocManaged, + * ::cudaStreamAttachMemAsync, + * ::cudaStreamAddCallback, + * ::cuLaunchHostFunc + */ +extern __host__ cudaError_t CUDARTAPI cudaLaunchHostFunc(cudaStream_t stream, cudaHostFn_t fn, void *userData); + +/** @} */ /* END CUDART_EXECUTION */ + +/** + * \defgroup CUDART_OCCUPANCY Occupancy + * + * ___MANBRIEF___ occupancy calculation functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the occupancy calculation functions of the CUDA runtime + * application programming interface. + * + * Besides the occupancy calculator functions + * (\ref ::cudaOccupancyMaxActiveBlocksPerMultiprocessor and \ref ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags), + * there are also C++ only occupancy-based launch configuration functions documented in + * \ref CUDART_HIGHLEVEL "C++ API Routines" module. + * + * See + * \ref ::cudaOccupancyMaxPotentialBlockSize(int*, int*, T, size_t, int) "cudaOccupancyMaxPotentialBlockSize (C++ API)", + * \ref ::cudaOccupancyMaxPotentialBlockSizeWithFlags(int*, int*, T, size_t, int, unsigned int) "cudaOccupancyMaxPotentialBlockSize (C++ API)", + * \ref ::cudaOccupancyMaxPotentialBlockSizeVariableSMem(int*, int*, T, UnaryFunction, int) "cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API)", + * \ref ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(int*, int*, T, UnaryFunction, int, unsigned int) "cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API)" + * \ref ::cudaOccupancyAvailableDynamicSMemPerBlock(size_t*, T, int, int) "cudaOccupancyAvailableDynamicSMemPerBlock (C++ API)", + * + * @{ + */ + +/** + * \brief Returns occupancy for a device function + * + * Returns in \p *numBlocks the maximum number of active blocks per + * streaming multiprocessor for the device function. + * + * \param numBlocks - Returned occupancy + * \param func - Kernel function for which occupancy is calculated + * \param blockSize - Block size the kernel is intended to be launched with + * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags, + * \ref ::cudaOccupancyMaxPotentialBlockSize(int*, int*, T, size_t, int) "cudaOccupancyMaxPotentialBlockSize (C++ API)", + * \ref ::cudaOccupancyMaxPotentialBlockSizeWithFlags(int*, int*, T, size_t, int, unsigned int) "cudaOccupancyMaxPotentialBlockSizeWithFlags (C++ API)", + * \ref ::cudaOccupancyMaxPotentialBlockSizeVariableSMem(int*, int*, T, UnaryFunction, int) "cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API)", + * \ref ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(int*, int*, T, UnaryFunction, int, unsigned int) "cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags (C++ API)", + * \ref ::cudaOccupancyAvailableDynamicSMemPerBlock(size_t*, T, int, int) "cudaOccupancyAvailableDynamicSMemPerBlock (C++ API)", + * ::cuOccupancyMaxActiveBlocksPerMultiprocessor + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, const void *func, int blockSize, size_t dynamicSMemSize); + +/** + * \brief Returns dynamic shared memory available per block when launching \p numBlocks blocks on SM. + * + * Returns in \p *dynamicSmemSize the maximum size of dynamic shared memory to allow \p numBlocks blocks per SM. + * + * \param dynamicSmemSize - Returned maximum dynamic shared memory + * \param func - Kernel function for which occupancy is calculated + * \param numBlocks - Number of blocks to fit on SM + * \param blockSize - Size of the block + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags, + * \ref ::cudaOccupancyMaxPotentialBlockSize(int*, int*, T, size_t, int) "cudaOccupancyMaxPotentialBlockSize (C++ API)", + * \ref ::cudaOccupancyMaxPotentialBlockSizeWithFlags(int*, int*, T, size_t, int, unsigned int) "cudaOccupancyMaxPotentialBlockSizeWithFlags (C++ API)", + * \ref ::cudaOccupancyMaxPotentialBlockSizeVariableSMem(int*, int*, T, UnaryFunction, int) "cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API)", + * \ref ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(int*, int*, T, UnaryFunction, int, unsigned int) "cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags (C++ API)", + * ::cudaOccupancyAvailableDynamicSMemPerBlock + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyAvailableDynamicSMemPerBlock(size_t *dynamicSmemSize, const void *func, int numBlocks, int blockSize); + +/** + * \brief Returns occupancy for a device function with the specified flags + * + * Returns in \p *numBlocks the maximum number of active blocks per + * streaming multiprocessor for the device function. + * + * The \p flags parameter controls how special cases are handled. Valid flags include: + * + * - ::cudaOccupancyDefault: keeps the default behavior as + * ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + * + * - ::cudaOccupancyDisableCachingOverride: This flag suppresses the default behavior + * on platform where global caching affects occupancy. On such platforms, if caching + * is enabled, but per-block SM resource usage would result in zero occupancy, the + * occupancy calculator will calculate the occupancy as if caching is disabled. + * Setting this flag makes the occupancy calculator to return 0 in such cases. + * More information can be found about this feature in the "Unified L1/Texture Cache" + * section of the Maxwell tuning guide. + * + * \param numBlocks - Returned occupancy + * \param func - Kernel function for which occupancy is calculated + * \param blockSize - Block size the kernel is intended to be launched with + * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes + * \param flags - Requested behavior for the occupancy calculator + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor, + * \ref ::cudaOccupancyMaxPotentialBlockSize(int*, int*, T, size_t, int) "cudaOccupancyMaxPotentialBlockSize (C++ API)", + * \ref ::cudaOccupancyMaxPotentialBlockSizeWithFlags(int*, int*, T, size_t, int, unsigned int) "cudaOccupancyMaxPotentialBlockSizeWithFlags (C++ API)", + * \ref ::cudaOccupancyMaxPotentialBlockSizeVariableSMem(int*, int*, T, UnaryFunction, int) "cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API)", + * \ref ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(int*, int*, T, UnaryFunction, int, unsigned int) "cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags (C++ API)", + * \ref ::cudaOccupancyAvailableDynamicSMemPerBlock(size_t*, T, int, int) "cudaOccupancyAvailableDynamicSMemPerBlock (C++ API)", + * ::cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, const void *func, int blockSize, size_t dynamicSMemSize, unsigned int flags); + +/** + * \brief Given the kernel function (\p func) and launch configuration + * (\p config), return the maximum cluster size in \p *clusterSize. + * + * The cluster dimensions in \p config are ignored. If func has a required + * cluster size set (see ::cudaFuncGetAttributes),\p *clusterSize will reflect + * the required cluster size. + * + * By default this function will always return a value that's portable on + * future hardware. A higher value may be returned if the kernel function + * allows non-portable cluster sizes. + * + * This function will respect the compile time launch bounds. + * + * \param clusterSize - Returned maximum cluster size that can be launched + * for the given kernel function and launch configuration + * \param func - Kernel function for which maximum cluster + * size is calculated + * \param config - Launch configuration for the given kernel function + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaFuncGetAttributes + * \ref ::cudaOccupancyMaxPotentialClusterSize(int*, T, const cudaLaunchConfig_t*) "cudaOccupancyMaxPotentialClusterSize (C++ API)", + * ::cuOccupancyMaxPotentialClusterSize + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyMaxPotentialClusterSize(int *clusterSize, const void *func, const cudaLaunchConfig_t *launchConfig); + + +/** + * \brief Given the kernel function (\p func) and launch configuration + * (\p config), return the maximum number of clusters that could co-exist + * on the target device in \p *numClusters. + * + * If the function has required cluster size already set (see + * ::cudaFuncGetAttributes), the cluster size from config must either be + * unspecified or match the required size. + * Without required sizes, the cluster size must be specified in config, + * else the function will return an error. + * + * Note that various attributes of the kernel function may affect occupancy + * calculation. Runtime environment may affect how the hardware schedules + * the clusters, so the calculated occupancy is not guaranteed to be achievable. + * + * \param numClusters - Returned maximum number of clusters that + * could co-exist on the target device + * \param func - Kernel function for which maximum number + * of clusters are calculated + * \param config - Launch configuration for the given kernel function + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidClusterSize, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaFuncGetAttributes + * \ref ::cudaOccupancyMaxActiveClusters(int*, T, const cudaLaunchConfig_t*) "cudaOccupancyMaxActiveClusters (C++ API)", + * ::cuOccupancyMaxActiveClusters + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveClusters(int *numClusters, const void *func, const cudaLaunchConfig_t *launchConfig); +/** @} */ /* END CUDA_OCCUPANCY */ + +/** + * \defgroup CUDART_MEMORY Memory Management + * + * ___MANBRIEF___ memory management functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the memory management functions of the CUDA runtime + * application programming interface. + * + * Some functions have overloaded C++ API template versions documented separately in the + * \ref CUDART_HIGHLEVEL "C++ API Routines" module. + * + * @{ + */ + +/** + * \brief Allocates memory that will be automatically managed by the Unified Memory system + * + * Allocates \p size bytes of managed memory on the device and returns in + * \p *devPtr a pointer to the allocated memory. If the device doesn't support + * allocating managed memory, ::cudaErrorNotSupported is returned. Support + * for managed memory can be queried using the device attribute + * ::cudaDevAttrManagedMemory. The allocated memory is suitably + * aligned for any kind of variable. The memory is not cleared. If \p size + * is 0, ::cudaMallocManaged returns ::cudaErrorInvalidValue. The pointer + * is valid on the CPU and on all GPUs in the system that support managed memory. + * All accesses to this pointer must obey the Unified Memory programming model. + * + * \p flags specifies the default stream association for this allocation. + * \p flags must be one of ::cudaMemAttachGlobal or ::cudaMemAttachHost. The + * default value for \p flags is ::cudaMemAttachGlobal. + * If ::cudaMemAttachGlobal is specified, then this memory is accessible from + * any stream on any device. If ::cudaMemAttachHost is specified, then the + * allocation should not be accessed from devices that have a zero value for the + * device attribute ::cudaDevAttrConcurrentManagedAccess; an explicit call to + * ::cudaStreamAttachMemAsync will be required to enable access on such devices. + * + * If the association is later changed via ::cudaStreamAttachMemAsync to + * a single stream, the default association, as specifed during ::cudaMallocManaged, + * is restored when that stream is destroyed. For __managed__ variables, the + * default association is always ::cudaMemAttachGlobal. Note that destroying a + * stream is an asynchronous operation, and as a result, the change to default + * association won't happen until all work in the stream has completed. + * + * Memory allocated with ::cudaMallocManaged should be released with ::cudaFree. + * + * Device memory oversubscription is possible for GPUs that have a non-zero value for the + * device attribute ::cudaDevAttrConcurrentManagedAccess. Managed memory on + * such GPUs may be evicted from device memory to host memory at any time by the Unified + * Memory driver in order to make room for other allocations. + * + * In a multi-GPU system where all GPUs have a non-zero value for the device attribute + * ::cudaDevAttrConcurrentManagedAccess, managed memory may not be populated when this + * API returns and instead may be populated on access. In such systems, managed memory can + * migrate to any processor's memory at any time. The Unified Memory driver will employ heuristics to + * maintain data locality and prevent excessive page faults to the extent possible. The application + * can also guide the driver about memory usage patterns via ::cudaMemAdvise. The application + * can also explicitly migrate memory to a desired processor's memory via + * ::cudaMemPrefetchAsync. + * + * In a multi-GPU system where all of the GPUs have a zero value for the device attribute + * ::cudaDevAttrConcurrentManagedAccess and all the GPUs have peer-to-peer support + * with each other, the physical storage for managed memory is created on the GPU which is active + * at the time ::cudaMallocManaged is called. All other GPUs will reference the data at reduced + * bandwidth via peer mappings over the PCIe bus. The Unified Memory driver does not migrate + * memory among such GPUs. + * + * In a multi-GPU system where not all GPUs have peer-to-peer support with each other and + * where the value of the device attribute ::cudaDevAttrConcurrentManagedAccess + * is zero for at least one of those GPUs, the location chosen for physical storage of managed + * memory is system-dependent. + * - On Linux, the location chosen will be device memory as long as the current set of active + * contexts are on devices that either have peer-to-peer support with each other or have a + * non-zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess. + * If there is an active context on a GPU that does not have a non-zero value for that device + * attribute and it does not have peer-to-peer support with the other devices that have active + * contexts on them, then the location for physical storage will be 'zero-copy' or host memory. + * Note that this means that managed memory that is located in device memory is migrated to + * host memory if a new context is created on a GPU that doesn't have a non-zero value for + * the device attribute and does not support peer-to-peer with at least one of the other devices + * that has an active context. This in turn implies that context creation may fail if there is + * insufficient host memory to migrate all managed allocations. + * - On Windows, the physical storage is always created in 'zero-copy' or host memory. + * All GPUs will reference the data at reduced bandwidth over the PCIe bus. In these + * circumstances, use of the environment variable CUDA_VISIBLE_DEVICES is recommended to + * restrict CUDA to only use those GPUs that have peer-to-peer support. + * Alternatively, users can also set CUDA_MANAGED_FORCE_DEVICE_ALLOC to a non-zero + * value to force the driver to always use device memory for physical storage. + * When this environment variable is set to a non-zero value, all devices used in + * that process that support managed memory have to be peer-to-peer compatible + * with each other. The error ::cudaErrorInvalidDevice will be returned if a device + * that supports managed memory is used and it is not peer-to-peer compatible with + * any of the other managed memory supporting devices that were previously used in + * that process, even if ::cudaDeviceReset has been called on those devices. These + * environment variables are described in the CUDA programming guide under the + * "CUDA environment variables" section. + * + * \param devPtr - Pointer to allocated device memory + * \param size - Requested allocation size in bytes + * \param flags - Must be either ::cudaMemAttachGlobal or ::cudaMemAttachHost (defaults to ::cudaMemAttachGlobal) + * + * \return + * ::cudaSuccess, + * ::cudaErrorMemoryAllocation, + * ::cudaErrorNotSupported, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMallocPitch, ::cudaFree, ::cudaMallocArray, ::cudaFreeArray, + * ::cudaMalloc3D, ::cudaMalloc3DArray, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaHostAlloc, ::cudaDeviceGetAttribute, ::cudaStreamAttachMemAsync, + * ::cuMemAllocManaged + */ +#if defined(__cplusplus) +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMallocManaged(void **devPtr, size_t size, unsigned int flags = cudaMemAttachGlobal); +#else +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMallocManaged(void **devPtr, size_t size, unsigned int flags); +#endif + +/** + * \brief Allocate memory on the device + * + * Allocates \p size bytes of linear memory on the device and returns in + * \p *devPtr a pointer to the allocated memory. The allocated memory is + * suitably aligned for any kind of variable. The memory is not cleared. + * ::cudaMalloc() returns ::cudaErrorMemoryAllocation in case of failure. + * + * The device version of ::cudaFree cannot be used with a \p *devPtr + * allocated using the host API, and vice versa. + * + * \param devPtr - Pointer to allocated device memory + * \param size - Requested allocation size in bytes + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMallocPitch, ::cudaFree, ::cudaMallocArray, ::cudaFreeArray, + * ::cudaMalloc3D, ::cudaMalloc3DArray, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaHostAlloc, + * ::cuMemAlloc + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMalloc(void **devPtr, size_t size); + +/** + * \brief Allocates page-locked memory on the host + * + * Allocates \p size bytes of host memory that is page-locked and accessible + * to the device. The driver tracks the virtual memory ranges allocated with + * this function and automatically accelerates calls to functions such as + * ::cudaMemcpy*(). Since the memory can be accessed directly by the device, + * it can be read or written with much higher bandwidth than pageable memory + * obtained with functions such as ::malloc(). Allocating excessive amounts of + * memory with ::cudaMallocHost() may degrade system performance, since it + * reduces the amount of memory available to the system for paging. As a + * result, this function is best used sparingly to allocate staging areas for + * data exchange between host and device. + * + * \param ptr - Pointer to allocated host memory + * \param size - Requested allocation size in bytes + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMalloc, ::cudaMallocPitch, ::cudaMallocArray, ::cudaMalloc3D, + * ::cudaMalloc3DArray, ::cudaHostAlloc, ::cudaFree, ::cudaFreeArray, + * \ref ::cudaMallocHost(void**, size_t, unsigned int) "cudaMallocHost (C++ API)", + * ::cudaFreeHost, ::cudaHostAlloc, + * ::cuMemAllocHost + */ +extern __host__ cudaError_t CUDARTAPI cudaMallocHost(void **ptr, size_t size); + +/** + * \brief Allocates pitched memory on the device + * + * Allocates at least \p width (in bytes) * \p height bytes of linear memory + * on the device and returns in \p *devPtr a pointer to the allocated memory. + * The function may pad the allocation to ensure that corresponding pointers + * in any given row will continue to meet the alignment requirements for + * coalescing as the address is updated from row to row. The pitch returned in + * \p *pitch by ::cudaMallocPitch() is the width in bytes of the allocation. + * The intended usage of \p pitch is as a separate parameter of the allocation, + * used to compute addresses within the 2D array. Given the row and column of + * an array element of type \p T, the address is computed as: + * \code + T* pElement = (T*)((char*)BaseAddress + Row * pitch) + Column; + \endcode + * + * For allocations of 2D arrays, it is recommended that programmers consider + * performing pitch allocations using ::cudaMallocPitch(). Due to pitch + * alignment restrictions in the hardware, this is especially true if the + * application will be performing 2D memory copies between different regions + * of device memory (whether linear memory or CUDA arrays). + * + * \param devPtr - Pointer to allocated pitched device memory + * \param pitch - Pitch for allocation + * \param width - Requested pitched allocation width (in bytes) + * \param height - Requested pitched allocation height + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMalloc, ::cudaFree, ::cudaMallocArray, ::cudaFreeArray, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaMalloc3D, ::cudaMalloc3DArray, + * ::cudaHostAlloc, + * ::cuMemAllocPitch + */ +extern __host__ cudaError_t CUDARTAPI cudaMallocPitch(void **devPtr, size_t *pitch, size_t width, size_t height); + +/** + * \brief Allocate an array on the device + * + * Allocates a CUDA array according to the ::cudaChannelFormatDesc structure + * \p desc and returns a handle to the new CUDA array in \p *array. + * + * The ::cudaChannelFormatDesc is defined as: + * \code + struct cudaChannelFormatDesc { + int x, y, z, w; + enum cudaChannelFormatKind f; + }; + \endcode + * where ::cudaChannelFormatKind is one of ::cudaChannelFormatKindSigned, + * ::cudaChannelFormatKindUnsigned, or ::cudaChannelFormatKindFloat. + * + * The \p flags parameter enables different options to be specified that affect + * the allocation, as follows. + * - ::cudaArrayDefault: This flag's value is defined to be 0 and provides default array allocation + * - ::cudaArraySurfaceLoadStore: Allocates an array that can be read from or written to using a surface reference + * - ::cudaArrayTextureGather: This flag indicates that texture gather operations will be performed on the array. + * - ::cudaArraySparse: Allocates a CUDA array without physical backing memory. The subregions within this sparse array + * can later be mapped onto a physical memory allocation by calling ::cuMemMapArrayAsync. + * The physical backing memory must be allocated via ::cuMemCreate. + * - ::cudaArrayDeferredMapping: Allocates a CUDA array without physical backing memory. The entire array can + * later be mapped onto a physical memory allocation by calling ::cuMemMapArrayAsync. + * The physical backing memory must be allocated via ::cuMemCreate. + * + * \p width and \p height must meet certain size requirements. See ::cudaMalloc3DArray() for more details. + * + * \param array - Pointer to allocated array in device memory + * \param desc - Requested channel format + * \param width - Requested array allocation width + * \param height - Requested array allocation height + * \param flags - Requested properties of allocated array + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMalloc, ::cudaMallocPitch, ::cudaFree, ::cudaFreeArray, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaMalloc3D, ::cudaMalloc3DArray, + * ::cudaHostAlloc, + * ::cuArrayCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaMallocArray(cudaArray_t *array, const struct cudaChannelFormatDesc *desc, size_t width, size_t height __dv(0), unsigned int flags __dv(0)); + +/** + * \brief Frees memory on the device + * + * Frees the memory space pointed to by \p devPtr, which must have been + * returned by a previous call to one of the following memory allocation APIs - + * ::cudaMalloc(), ::cudaMallocPitch(), ::cudaMallocManaged(), ::cudaMallocAsync(), + * ::cudaMallocFromPoolAsync(). + * + * Note - This API will not perform any implicit synchronization when the pointer was + * allocated with ::cudaMallocAsync or ::cudaMallocFromPoolAsync. Callers must ensure + * that all accesses to the pointer have completed before invoking ::cudaFree. For + * best performance and memory reuse, users should use ::cudaFreeAsync to free memory + * allocated via the stream ordered memory allocator. + * + * If ::cudaFree(\p devPtr) has already been called before, + * an error is returned. If \p devPtr is 0, no operation is performed. + * ::cudaFree() returns ::cudaErrorValue in case of failure. + * + * The device version of ::cudaFree cannot be used with a \p *devPtr + * allocated using the host API, and vice versa. + * + * \param devPtr - Device pointer to memory to free + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMalloc, ::cudaMallocPitch, ::cudaMallocManaged, ::cudaMallocArray, ::cudaFreeArray, ::cudaMallocAsync, ::cudaMallocFromPoolAsync + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaMalloc3D, ::cudaMalloc3DArray, ::cudaFreeAsync + * ::cudaHostAlloc, + * ::cuMemFree + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaFree(void *devPtr); + +/** + * \brief Frees page-locked memory + * + * Frees the memory space pointed to by \p hostPtr, which must have been + * returned by a previous call to ::cudaMallocHost() or ::cudaHostAlloc(). + * + * \param ptr - Pointer to memory to free + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMalloc, ::cudaMallocPitch, ::cudaFree, ::cudaMallocArray, + * ::cudaFreeArray, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaMalloc3D, ::cudaMalloc3DArray, ::cudaHostAlloc, + * ::cuMemFreeHost + */ +extern __host__ cudaError_t CUDARTAPI cudaFreeHost(void *ptr); + +/** + * \brief Frees an array on the device + * + * Frees the CUDA array \p array, which must have been returned by a + * previous call to ::cudaMallocArray(). If \p devPtr is 0, + * no operation is performed. + * + * \param array - Pointer to array to free + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMalloc, ::cudaMallocPitch, ::cudaFree, ::cudaMallocArray, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaHostAlloc, + * ::cuArrayDestroy + */ +extern __host__ cudaError_t CUDARTAPI cudaFreeArray(cudaArray_t array); + +/** + * \brief Frees a mipmapped array on the device + * + * Frees the CUDA mipmapped array \p mipmappedArray, which must have been + * returned by a previous call to ::cudaMallocMipmappedArray(). If \p devPtr + * is 0, no operation is performed. + * + * \param mipmappedArray - Pointer to mipmapped array to free + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMalloc, ::cudaMallocPitch, ::cudaFree, ::cudaMallocArray, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaHostAlloc, + * ::cuMipmappedArrayDestroy + */ +extern __host__ cudaError_t CUDARTAPI cudaFreeMipmappedArray(cudaMipmappedArray_t mipmappedArray); + + +/** + * \brief Allocates page-locked memory on the host + * + * Allocates \p size bytes of host memory that is page-locked and accessible + * to the device. The driver tracks the virtual memory ranges allocated with + * this function and automatically accelerates calls to functions such as + * ::cudaMemcpy(). Since the memory can be accessed directly by the device, it + * can be read or written with much higher bandwidth than pageable memory + * obtained with functions such as ::malloc(). Allocating excessive amounts of + * pinned memory may degrade system performance, since it reduces the amount + * of memory available to the system for paging. As a result, this function is + * best used sparingly to allocate staging areas for data exchange between host + * and device. + * + * The \p flags parameter enables different options to be specified that affect + * the allocation, as follows. + * - ::cudaHostAllocDefault: This flag's value is defined to be 0 and causes + * ::cudaHostAlloc() to emulate ::cudaMallocHost(). + * - ::cudaHostAllocPortable: The memory returned by this call will be + * considered as pinned memory by all CUDA contexts, not just the one that + * performed the allocation. + * - ::cudaHostAllocMapped: Maps the allocation into the CUDA address space. + * The device pointer to the memory may be obtained by calling + * ::cudaHostGetDevicePointer(). + * - ::cudaHostAllocWriteCombined: Allocates the memory as write-combined (WC). + * WC memory can be transferred across the PCI Express bus more quickly on some + * system configurations, but cannot be read efficiently by most CPUs. WC + * memory is a good option for buffers that will be written by the CPU and read + * by the device via mapped pinned memory or host->device transfers. + * + * All of these flags are orthogonal to one another: a developer may allocate + * memory that is portable, mapped and/or write-combined with no restrictions. + * + * In order for the ::cudaHostAllocMapped flag to have any effect, the CUDA context + * must support the ::cudaDeviceMapHost flag, which can be checked via + * ::cudaGetDeviceFlags(). The ::cudaDeviceMapHost flag is implicitly set for + * contexts created via the runtime API. + * + * The ::cudaHostAllocMapped flag may be specified on CUDA contexts for devices + * that do not support mapped pinned memory. The failure is deferred to + * ::cudaHostGetDevicePointer() because the memory may be mapped into other + * CUDA contexts via the ::cudaHostAllocPortable flag. + * + * Memory allocated by this function must be freed with ::cudaFreeHost(). + * + * \param pHost - Device pointer to allocated memory + * \param size - Requested allocation size in bytes + * \param flags - Requested properties of allocated memory + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaSetDeviceFlags, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, + * ::cudaGetDeviceFlags, + * ::cuMemHostAlloc + */ +extern __host__ cudaError_t CUDARTAPI cudaHostAlloc(void **pHost, size_t size, unsigned int flags); + +/** + * \brief Registers an existing host memory range for use by CUDA + * + * Page-locks the memory range specified by \p ptr and \p size and maps it + * for the device(s) as specified by \p flags. This memory range also is added + * to the same tracking mechanism as ::cudaHostAlloc() to automatically accelerate + * calls to functions such as ::cudaMemcpy(). Since the memory can be accessed + * directly by the device, it can be read or written with much higher bandwidth + * than pageable memory that has not been registered. Page-locking excessive + * amounts of memory may degrade system performance, since it reduces the amount + * of memory available to the system for paging. As a result, this function is + * best used sparingly to register staging areas for data exchange between + * host and device. + * + * On systems where ::pageableMemoryAccessUsesHostPageTables is true, ::cudaHostRegister + * will not page-lock the memory range specified by \p ptr but only populate + * unpopulated pages. + * + * ::cudaHostRegister is supported only on I/O coherent devices that have a non-zero + * value for the device attribute ::cudaDevAttrHostRegisterSupported. + * + * The \p flags parameter enables different options to be specified that + * affect the allocation, as follows. + * + * - ::cudaHostRegisterDefault: On a system with unified virtual addressing, + * the memory will be both mapped and portable. On a system with no unified + * virtual addressing, the memory will be neither mapped nor portable. + * + * - ::cudaHostRegisterPortable: The memory returned by this call will be + * considered as pinned memory by all CUDA contexts, not just the one that + * performed the allocation. + * + * - ::cudaHostRegisterMapped: Maps the allocation into the CUDA address + * space. The device pointer to the memory may be obtained by calling + * ::cudaHostGetDevicePointer(). + * + * - ::cudaHostRegisterIoMemory: The passed memory pointer is treated as + * pointing to some memory-mapped I/O space, e.g. belonging to a + * third-party PCIe device, and it will marked as non cache-coherent and + * contiguous. + * + * - ::cudaHostRegisterReadOnly: The passed memory pointer is treated as + * pointing to memory that is considered read-only by the device. On + * platforms without ::cudaDevAttrPageableMemoryAccessUsesHostPageTables, this + * flag is required in order to register memory mapped to the CPU as + * read-only. Support for the use of this flag can be queried from the device + * attribute cudaDeviceAttrReadOnlyHostRegisterSupported. Using this flag with + * a current context associated with a device that does not have this attribute + * set will cause ::cudaHostRegister to error with cudaErrorNotSupported. + * + * All of these flags are orthogonal to one another: a developer may page-lock + * memory that is portable or mapped with no restrictions. + * + * The CUDA context must have been created with the ::cudaMapHost flag in + * order for the ::cudaHostRegisterMapped flag to have any effect. + * + * The ::cudaHostRegisterMapped flag may be specified on CUDA contexts for + * devices that do not support mapped pinned memory. The failure is deferred + * to ::cudaHostGetDevicePointer() because the memory may be mapped into + * other CUDA contexts via the ::cudaHostRegisterPortable flag. + * + * For devices that have a non-zero value for the device attribute + * ::cudaDevAttrCanUseHostPointerForRegisteredMem, the memory + * can also be accessed from the device using the host pointer \p ptr. + * The device pointer returned by ::cudaHostGetDevicePointer() may or may not + * match the original host pointer \p ptr and depends on the devices visible to the + * application. If all devices visible to the application have a non-zero value for the + * device attribute, the device pointer returned by ::cudaHostGetDevicePointer() + * will match the original pointer \p ptr. If any device visible to the application + * has a zero value for the device attribute, the device pointer returned by + * ::cudaHostGetDevicePointer() will not match the original host pointer \p ptr, + * but it will be suitable for use on all devices provided Unified Virtual Addressing + * is enabled. In such systems, it is valid to access the memory using either pointer + * on devices that have a non-zero value for the device attribute. Note however that + * such devices should access the memory using only of the two pointers and not both. + * + * The memory page-locked by this function must be unregistered with ::cudaHostUnregister(). + * + * \param ptr - Host pointer to memory to page-lock + * \param size - Size in bytes of the address range to page-lock in bytes + * \param flags - Flags for allocation request + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation, + * ::cudaErrorHostMemoryAlreadyRegistered, + * ::cudaErrorNotSupported + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaHostUnregister, ::cudaHostGetFlags, ::cudaHostGetDevicePointer, + * ::cuMemHostRegister + */ +extern __host__ cudaError_t CUDARTAPI cudaHostRegister(void *ptr, size_t size, unsigned int flags); + +/** + * \brief Unregisters a memory range that was registered with cudaHostRegister + * + * Unmaps the memory range whose base address is specified by \p ptr, and makes + * it pageable again. + * + * The base address must be the same one specified to ::cudaHostRegister(). + * + * \param ptr - Host pointer to memory to unregister + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorHostMemoryNotRegistered + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaHostUnregister, + * ::cuMemHostUnregister + */ +extern __host__ cudaError_t CUDARTAPI cudaHostUnregister(void *ptr); + +/** + * \brief Passes back device pointer of mapped host memory allocated by + * cudaHostAlloc or registered by cudaHostRegister + * + * Passes back the device pointer corresponding to the mapped, pinned host + * buffer allocated by ::cudaHostAlloc() or registered by ::cudaHostRegister(). + * + * ::cudaHostGetDevicePointer() will fail if the ::cudaDeviceMapHost flag was + * not specified before deferred context creation occurred, or if called on a + * device that does not support mapped, pinned memory. + * + * For devices that have a non-zero value for the device attribute + * ::cudaDevAttrCanUseHostPointerForRegisteredMem, the memory + * can also be accessed from the device using the host pointer \p pHost. + * The device pointer returned by ::cudaHostGetDevicePointer() may or may not + * match the original host pointer \p pHost and depends on the devices visible to the + * application. If all devices visible to the application have a non-zero value for the + * device attribute, the device pointer returned by ::cudaHostGetDevicePointer() + * will match the original pointer \p pHost. If any device visible to the application + * has a zero value for the device attribute, the device pointer returned by + * ::cudaHostGetDevicePointer() will not match the original host pointer \p pHost, + * but it will be suitable for use on all devices provided Unified Virtual Addressing + * is enabled. In such systems, it is valid to access the memory using either pointer + * on devices that have a non-zero value for the device attribute. Note however that + * such devices should access the memory using only of the two pointers and not both. + * + * \p flags provides for future releases. For now, it must be set to 0. + * + * \param pDevice - Returned device pointer for mapped memory + * \param pHost - Requested host pointer mapping + * \param flags - Flags for extensions (must be 0 for now) + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaSetDeviceFlags, ::cudaHostAlloc, + * ::cuMemHostGetDevicePointer + */ +extern __host__ cudaError_t CUDARTAPI cudaHostGetDevicePointer(void **pDevice, void *pHost, unsigned int flags); + +/** + * \brief Passes back flags used to allocate pinned host memory allocated by + * cudaHostAlloc + * + * ::cudaHostGetFlags() will fail if the input pointer does not + * reside in an address range allocated by ::cudaHostAlloc(). + * + * \param pFlags - Returned flags word + * \param pHost - Host pointer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaHostAlloc, + * ::cuMemHostGetFlags + */ +extern __host__ cudaError_t CUDARTAPI cudaHostGetFlags(unsigned int *pFlags, void *pHost); + +/** + * \brief Allocates logical 1D, 2D, or 3D memory objects on the device + * + * Allocates at least \p width * \p height * \p depth bytes of linear memory + * on the device and returns a ::cudaPitchedPtr in which \p ptr is a pointer + * to the allocated memory. The function may pad the allocation to ensure + * hardware alignment requirements are met. The pitch returned in the \p pitch + * field of \p pitchedDevPtr is the width in bytes of the allocation. + * + * The returned ::cudaPitchedPtr contains additional fields \p xsize and + * \p ysize, the logical width and height of the allocation, which are + * equivalent to the \p width and \p height \p extent parameters provided by + * the programmer during allocation. + * + * For allocations of 2D and 3D objects, it is highly recommended that + * programmers perform allocations using ::cudaMalloc3D() or + * ::cudaMallocPitch(). Due to alignment restrictions in the hardware, this is + * especially true if the application will be performing memory copies + * involving 2D or 3D objects (whether linear memory or CUDA arrays). + * + * \param pitchedDevPtr - Pointer to allocated pitched device memory + * \param extent - Requested allocation size (\p width field in bytes) + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMallocPitch, ::cudaFree, ::cudaMemcpy3D, ::cudaMemset3D, + * ::cudaMalloc3DArray, ::cudaMallocArray, ::cudaFreeArray, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaHostAlloc, ::make_cudaPitchedPtr, ::make_cudaExtent, + * ::cuMemAllocPitch + */ +extern __host__ cudaError_t CUDARTAPI cudaMalloc3D(struct cudaPitchedPtr* pitchedDevPtr, struct cudaExtent extent); + +/** + * \brief Allocate an array on the device + * + * Allocates a CUDA array according to the ::cudaChannelFormatDesc structure + * \p desc and returns a handle to the new CUDA array in \p *array. + * + * The ::cudaChannelFormatDesc is defined as: + * \code + struct cudaChannelFormatDesc { + int x, y, z, w; + enum cudaChannelFormatKind f; + }; + \endcode + * where ::cudaChannelFormatKind is one of ::cudaChannelFormatKindSigned, + * ::cudaChannelFormatKindUnsigned, or ::cudaChannelFormatKindFloat. + * + * ::cudaMalloc3DArray() can allocate the following: + * + * - A 1D array is allocated if the height and depth extents are both zero. + * - A 2D array is allocated if only the depth extent is zero. + * - A 3D array is allocated if all three extents are non-zero. + * - A 1D layered CUDA array is allocated if only the height extent is zero and + * the cudaArrayLayered flag is set. Each layer is a 1D array. The number of layers is + * determined by the depth extent. + * - A 2D layered CUDA array is allocated if all three extents are non-zero and + * the cudaArrayLayered flag is set. Each layer is a 2D array. The number of layers is + * determined by the depth extent. + * - A cubemap CUDA array is allocated if all three extents are non-zero and the + * cudaArrayCubemap flag is set. Width must be equal to height, and depth must be six. A cubemap is + * a special type of 2D layered CUDA array, where the six layers represent the six faces of a cube. + * The order of the six layers in memory is the same as that listed in ::cudaGraphicsCubeFace. + * - A cubemap layered CUDA array is allocated if all three extents are non-zero, and both, + * cudaArrayCubemap and cudaArrayLayered flags are set. Width must be equal to height, and depth must be + * a multiple of six. A cubemap layered CUDA array is a special type of 2D layered CUDA array that consists + * of a collection of cubemaps. The first six layers represent the first cubemap, the next six layers form + * the second cubemap, and so on. + * + * + * The \p flags parameter enables different options to be specified that affect + * the allocation, as follows. + * - ::cudaArrayDefault: This flag's value is defined to be 0 and provides default array allocation + * - ::cudaArrayLayered: Allocates a layered CUDA array, with the depth extent indicating the number of layers + * - ::cudaArrayCubemap: Allocates a cubemap CUDA array. Width must be equal to height, and depth must be six. + * If the cudaArrayLayered flag is also set, depth must be a multiple of six. + * - ::cudaArraySurfaceLoadStore: Allocates a CUDA array that could be read from or written to using a surface + * reference. + * - ::cudaArrayTextureGather: This flag indicates that texture gather operations will be performed on the CUDA + * array. Texture gather can only be performed on 2D CUDA arrays. + * - ::cudaArraySparse: Allocates a CUDA array without physical backing memory. The subregions within this sparse array + * can later be mapped onto a physical memory allocation by calling ::cuMemMapArrayAsync. This flag can only be used for + * creating 2D, 3D or 2D layered sparse CUDA arrays. The physical backing memory must be allocated via ::cuMemCreate. + * - ::cudaArrayDeferredMapping: Allocates a CUDA array without physical backing memory. The entire array can + * later be mapped onto a physical memory allocation by calling ::cuMemMapArrayAsync. The physical backing memory must be allocated + * via ::cuMemCreate. + * + * The width, height and depth extents must meet certain size requirements as listed in the following table. + * All values are specified in elements. + * + * Note that 2D CUDA arrays have different size requirements if the ::cudaArrayTextureGather flag is set. In that + * case, the valid range for (width, height, depth) is ((1,maxTexture2DGather[0]), (1,maxTexture2DGather[1]), 0). + * + * \xmlonly + * + * + * + * + * + * + * + * CUDA array type + * Valid extents that must always be met {(width range in elements), + * (height range), (depth range)} + * Valid extents with cudaArraySurfaceLoadStore set {(width range in + * elements), (height range), (depth range)} + * + * + * + * + * 1D + * { (1,maxTexture1D), 0, 0 } + * { (1,maxSurface1D), 0, 0 } + * + * + * 2D + * { (1,maxTexture2D[0]), (1,maxTexture2D[1]), 0 } + * { (1,maxSurface2D[0]), (1,maxSurface2D[1]), 0 } + * + * + * 3D + * { (1,maxTexture3D[0]), (1,maxTexture3D[1]), (1,maxTexture3D[2]) } + * OR { (1,maxTexture3DAlt[0]), (1,maxTexture3DAlt[1]), + * (1,maxTexture3DAlt[2]) } + * { (1,maxSurface3D[0]), (1,maxSurface3D[1]), (1,maxSurface3D[2]) } + * + * + * 1D Layered + * { (1,maxTexture1DLayered[0]), 0, (1,maxTexture1DLayered[1]) } + * { (1,maxSurface1DLayered[0]), 0, (1,maxSurface1DLayered[1]) } + * + * + * 2D Layered + * { (1,maxTexture2DLayered[0]), (1,maxTexture2DLayered[1]), + * (1,maxTexture2DLayered[2]) } + * { (1,maxSurface2DLayered[0]), (1,maxSurface2DLayered[1]), + * (1,maxSurface2DLayered[2]) } + * + * + * Cubemap + * { (1,maxTextureCubemap), (1,maxTextureCubemap), 6 } + * { (1,maxSurfaceCubemap), (1,maxSurfaceCubemap), 6 } + * + * + * Cubemap Layered + * { (1,maxTextureCubemapLayered[0]), (1,maxTextureCubemapLayered[0]), + * (1,maxTextureCubemapLayered[1]) } + * { (1,maxSurfaceCubemapLayered[0]), (1,maxSurfaceCubemapLayered[0]), + * (1,maxSurfaceCubemapLayered[1]) } + * + * + * + *
+ * \endxmlonly + * + * \param array - Pointer to allocated array in device memory + * \param desc - Requested channel format + * \param extent - Requested allocation size (\p width field in elements) + * \param flags - Flags for extensions + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMalloc3D, ::cudaMalloc, ::cudaMallocPitch, ::cudaFree, + * ::cudaFreeArray, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaHostAlloc, + * ::make_cudaExtent, + * ::cuArray3DCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaMalloc3DArray(cudaArray_t *array, const struct cudaChannelFormatDesc* desc, struct cudaExtent extent, unsigned int flags __dv(0)); + +/** + * \brief Allocate a mipmapped array on the device + * + * Allocates a CUDA mipmapped array according to the ::cudaChannelFormatDesc structure + * \p desc and returns a handle to the new CUDA mipmapped array in \p *mipmappedArray. + * \p numLevels specifies the number of mipmap levels to be allocated. This value is + * clamped to the range [1, 1 + floor(log2(max(width, height, depth)))]. + * + * The ::cudaChannelFormatDesc is defined as: + * \code + struct cudaChannelFormatDesc { + int x, y, z, w; + enum cudaChannelFormatKind f; + }; + \endcode + * where ::cudaChannelFormatKind is one of ::cudaChannelFormatKindSigned, + * ::cudaChannelFormatKindUnsigned, or ::cudaChannelFormatKindFloat. + * + * ::cudaMallocMipmappedArray() can allocate the following: + * + * - A 1D mipmapped array is allocated if the height and depth extents are both zero. + * - A 2D mipmapped array is allocated if only the depth extent is zero. + * - A 3D mipmapped array is allocated if all three extents are non-zero. + * - A 1D layered CUDA mipmapped array is allocated if only the height extent is zero and + * the cudaArrayLayered flag is set. Each layer is a 1D mipmapped array. The number of layers is + * determined by the depth extent. + * - A 2D layered CUDA mipmapped array is allocated if all three extents are non-zero and + * the cudaArrayLayered flag is set. Each layer is a 2D mipmapped array. The number of layers is + * determined by the depth extent. + * - A cubemap CUDA mipmapped array is allocated if all three extents are non-zero and the + * cudaArrayCubemap flag is set. Width must be equal to height, and depth must be six. + * The order of the six layers in memory is the same as that listed in ::cudaGraphicsCubeFace. + * - A cubemap layered CUDA mipmapped array is allocated if all three extents are non-zero, and both, + * cudaArrayCubemap and cudaArrayLayered flags are set. Width must be equal to height, and depth must be + * a multiple of six. A cubemap layered CUDA mipmapped array is a special type of 2D layered CUDA mipmapped + * array that consists of a collection of cubemap mipmapped arrays. The first six layers represent the + * first cubemap mipmapped array, the next six layers form the second cubemap mipmapped array, and so on. + * + * + * The \p flags parameter enables different options to be specified that affect + * the allocation, as follows. + * - ::cudaArrayDefault: This flag's value is defined to be 0 and provides default mipmapped array allocation + * - ::cudaArrayLayered: Allocates a layered CUDA mipmapped array, with the depth extent indicating the number of layers + * - ::cudaArrayCubemap: Allocates a cubemap CUDA mipmapped array. Width must be equal to height, and depth must be six. + * If the cudaArrayLayered flag is also set, depth must be a multiple of six. + * - ::cudaArraySurfaceLoadStore: This flag indicates that individual mipmap levels of the CUDA mipmapped array + * will be read from or written to using a surface reference. + * - ::cudaArrayTextureGather: This flag indicates that texture gather operations will be performed on the CUDA + * array. Texture gather can only be performed on 2D CUDA mipmapped arrays, and the gather operations are + * performed only on the most detailed mipmap level. + * - ::cudaArraySparse: Allocates a CUDA mipmapped array without physical backing memory. The subregions within this sparse array + * can later be mapped onto a physical memory allocation by calling ::cuMemMapArrayAsync. This flag can only be used for creating + * 2D, 3D or 2D layered sparse CUDA mipmapped arrays. The physical backing memory must be allocated via ::cuMemCreate. + * - ::cudaArrayDeferredMapping: Allocates a CUDA mipmapped array without physical backing memory. The entire array can + * later be mapped onto a physical memory allocation by calling ::cuMemMapArrayAsync. The physical backing memory must be allocated + * via ::cuMemCreate. + * + * The width, height and depth extents must meet certain size requirements as listed in the following table. + * All values are specified in elements. + * + * \xmlonly + * + * + * + * + * + * + * + * CUDA array type + * Valid extents that must always be met {(width range in elements), + * (height range), (depth range)} + * Valid extents with cudaArraySurfaceLoadStore set {(width range in + * elements), (height range), (depth range)} + * + * + * + * + * 1D + * { (1,maxTexture1DMipmap), 0, 0 } + * { (1,maxSurface1D), 0, 0 } + * + * + * 2D + * { (1,maxTexture2DMipmap[0]), (1,maxTexture2DMipmap[1]), 0 } + * { (1,maxSurface2D[0]), (1,maxSurface2D[1]), 0 } + * + * + * 3D + * { (1,maxTexture3D[0]), (1,maxTexture3D[1]), (1,maxTexture3D[2]) } + * OR { (1,maxTexture3DAlt[0]), (1,maxTexture3DAlt[1]), + * (1,maxTexture3DAlt[2]) } + * { (1,maxSurface3D[0]), (1,maxSurface3D[1]), (1,maxSurface3D[2]) } + * + * + * 1D Layered + * { (1,maxTexture1DLayered[0]), 0, (1,maxTexture1DLayered[1]) } + * { (1,maxSurface1DLayered[0]), 0, (1,maxSurface1DLayered[1]) } + * + * + * 2D Layered + * { (1,maxTexture2DLayered[0]), (1,maxTexture2DLayered[1]), + * (1,maxTexture2DLayered[2]) } + * { (1,maxSurface2DLayered[0]), (1,maxSurface2DLayered[1]), + * (1,maxSurface2DLayered[2]) } + * + * + * Cubemap + * { (1,maxTextureCubemap), (1,maxTextureCubemap), 6 } + * { (1,maxSurfaceCubemap), (1,maxSurfaceCubemap), 6 } + * + * + * Cubemap Layered + * { (1,maxTextureCubemapLayered[0]), (1,maxTextureCubemapLayered[0]), + * (1,maxTextureCubemapLayered[1]) } + * { (1,maxSurfaceCubemapLayered[0]), (1,maxSurfaceCubemapLayered[0]), + * (1,maxSurfaceCubemapLayered[1]) } + * + * + * + *
+ * \endxmlonly + * + * \param mipmappedArray - Pointer to allocated mipmapped array in device memory + * \param desc - Requested channel format + * \param extent - Requested allocation size (\p width field in elements) + * \param numLevels - Number of mipmap levels to allocate + * \param flags - Flags for extensions + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMalloc3D, ::cudaMalloc, ::cudaMallocPitch, ::cudaFree, + * ::cudaFreeArray, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaHostAlloc, + * ::make_cudaExtent, + * ::cuMipmappedArrayCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaMallocMipmappedArray(cudaMipmappedArray_t *mipmappedArray, const struct cudaChannelFormatDesc* desc, struct cudaExtent extent, unsigned int numLevels, unsigned int flags __dv(0)); + +/** + * \brief Gets a mipmap level of a CUDA mipmapped array + * + * Returns in \p *levelArray a CUDA array that represents a single mipmap level + * of the CUDA mipmapped array \p mipmappedArray. + * + * If \p level is greater than the maximum number of levels in this mipmapped array, + * ::cudaErrorInvalidValue is returned. + * + * If \p mipmappedArray is NULL, + * ::cudaErrorInvalidResourceHandle is returned. + * + * \param levelArray - Returned mipmap level CUDA array + * \param mipmappedArray - CUDA mipmapped array + * \param level - Mipmap level + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMalloc3D, ::cudaMalloc, ::cudaMallocPitch, ::cudaFree, + * ::cudaFreeArray, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaHostAlloc, + * ::make_cudaExtent, + * ::cuMipmappedArrayGetLevel + */ +extern __host__ cudaError_t CUDARTAPI cudaGetMipmappedArrayLevel(cudaArray_t *levelArray, cudaMipmappedArray_const_t mipmappedArray, unsigned int level); + +/** + * \brief Copies data between 3D objects + * +\code +struct cudaExtent { + size_t width; + size_t height; + size_t depth; +}; +struct cudaExtent make_cudaExtent(size_t w, size_t h, size_t d); + +struct cudaPos { + size_t x; + size_t y; + size_t z; +}; +struct cudaPos make_cudaPos(size_t x, size_t y, size_t z); + +struct cudaMemcpy3DParms { + cudaArray_t srcArray; + struct cudaPos srcPos; + struct cudaPitchedPtr srcPtr; + cudaArray_t dstArray; + struct cudaPos dstPos; + struct cudaPitchedPtr dstPtr; + struct cudaExtent extent; + enum cudaMemcpyKind kind; +}; +\endcode + * + * ::cudaMemcpy3D() copies data betwen two 3D objects. The source and + * destination objects may be in either host memory, device memory, or a CUDA + * array. The source, destination, extent, and kind of copy performed is + * specified by the ::cudaMemcpy3DParms struct which should be initialized to + * zero before use: +\code +cudaMemcpy3DParms myParms = {0}; +\endcode + * + * The struct passed to ::cudaMemcpy3D() must specify one of \p srcArray or + * \p srcPtr and one of \p dstArray or \p dstPtr. Passing more than one + * non-zero source or destination will cause ::cudaMemcpy3D() to return an + * error. + * + * The \p srcPos and \p dstPos fields are optional offsets into the source and + * destination objects and are defined in units of each object's elements. The + * element for a host or device pointer is assumed to be unsigned char. + * + * The \p extent field defines the dimensions of the transferred area in + * elements. If a CUDA array is participating in the copy, the extent is + * defined in terms of that array's elements. If no CUDA array is + * participating in the copy then the extents are defined in elements of + * unsigned char. + * + * The \p kind field defines the direction of the copy. It must be one of + * ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * For ::cudaMemcpyHostToHost or ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost + * passed as kind and cudaArray type passed as source or destination, if the kind + * implies cudaArray type to be present on the host, ::cudaMemcpy3D() will + * disregard that implication and silently correct the kind based on the fact that + * cudaArray type can only be present on the device. + * + * If the source and destination are both arrays, ::cudaMemcpy3D() will return + * an error if they do not have the same element size. + * + * The source and destination object may not overlap. If overlapping source + * and destination objects are specified, undefined behavior will result. + * + * The source object must entirely contain the region defined by \p srcPos + * and \p extent. The destination object must entirely contain the region + * defined by \p dstPos and \p extent. + * + * ::cudaMemcpy3D() returns an error if the pitch of \p srcPtr or \p dstPtr + * exceeds the maximum allowed. The pitch of a ::cudaPitchedPtr allocated + * with ::cudaMalloc3D() will always be valid. + * + * \param p - 3D memory copy parameters + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidPitchValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_sync + * \note_init_rt + * \note_callback + * + * \sa ::cudaMalloc3D, ::cudaMalloc3DArray, ::cudaMemset3D, ::cudaMemcpy3DAsync, + * ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::make_cudaExtent, ::make_cudaPos, + * ::cuMemcpy3D + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpy3D(const struct cudaMemcpy3DParms *p); + +/** + * \brief Copies memory between devices + * + * Perform a 3D memory copy according to the parameters specified in + * \p p. See the definition of the ::cudaMemcpy3DPeerParms structure + * for documentation of its parameters. + * + * Note that this function is synchronous with respect to the host only if + * the source or destination of the transfer is host memory. Note also + * that this copy is serialized with respect to all pending and future + * asynchronous work in to the current device, the copy's source device, + * and the copy's destination device (use ::cudaMemcpy3DPeerAsync to avoid + * this synchronization). + * + * \param p - Parameters for the memory copy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidPitchValue + * \notefnerr + * \note_sync + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpyPeer, ::cudaMemcpyAsync, ::cudaMemcpyPeerAsync, + * ::cudaMemcpy3DPeerAsync, + * ::cuMemcpy3DPeer + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpy3DPeer(const struct cudaMemcpy3DPeerParms *p); + +/** + * \brief Copies data between 3D objects + * +\code +struct cudaExtent { + size_t width; + size_t height; + size_t depth; +}; +struct cudaExtent make_cudaExtent(size_t w, size_t h, size_t d); + +struct cudaPos { + size_t x; + size_t y; + size_t z; +}; +struct cudaPos make_cudaPos(size_t x, size_t y, size_t z); + +struct cudaMemcpy3DParms { + cudaArray_t srcArray; + struct cudaPos srcPos; + struct cudaPitchedPtr srcPtr; + cudaArray_t dstArray; + struct cudaPos dstPos; + struct cudaPitchedPtr dstPtr; + struct cudaExtent extent; + enum cudaMemcpyKind kind; +}; +\endcode + * + * ::cudaMemcpy3DAsync() copies data betwen two 3D objects. The source and + * destination objects may be in either host memory, device memory, or a CUDA + * array. The source, destination, extent, and kind of copy performed is + * specified by the ::cudaMemcpy3DParms struct which should be initialized to + * zero before use: +\code +cudaMemcpy3DParms myParms = {0}; +\endcode + * + * The struct passed to ::cudaMemcpy3DAsync() must specify one of \p srcArray + * or \p srcPtr and one of \p dstArray or \p dstPtr. Passing more than one + * non-zero source or destination will cause ::cudaMemcpy3DAsync() to return an + * error. + * + * The \p srcPos and \p dstPos fields are optional offsets into the source and + * destination objects and are defined in units of each object's elements. The + * element for a host or device pointer is assumed to be unsigned char. + * For CUDA arrays, positions must be in the range [0, 2048) for any + * dimension. + * + * The \p extent field defines the dimensions of the transferred area in + * elements. If a CUDA array is participating in the copy, the extent is + * defined in terms of that array's elements. If no CUDA array is + * participating in the copy then the extents are defined in elements of + * unsigned char. + * + * The \p kind field defines the direction of the copy. It must be one of + * ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * For ::cudaMemcpyHostToHost or ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost + * passed as kind and cudaArray type passed as source or destination, if the kind + * implies cudaArray type to be present on the host, ::cudaMemcpy3DAsync() will + * disregard that implication and silently correct the kind based on the fact that + * cudaArray type can only be present on the device. + * + * If the source and destination are both arrays, ::cudaMemcpy3DAsync() will + * return an error if they do not have the same element size. + * + * The source and destination object may not overlap. If overlapping source + * and destination objects are specified, undefined behavior will result. + * + * The source object must lie entirely within the region defined by \p srcPos + * and \p extent. The destination object must lie entirely within the region + * defined by \p dstPos and \p extent. + * + * ::cudaMemcpy3DAsync() returns an error if the pitch of \p srcPtr or + * \p dstPtr exceeds the maximum allowed. The pitch of a + * ::cudaPitchedPtr allocated with ::cudaMalloc3D() will always be valid. + * + * ::cudaMemcpy3DAsync() is asynchronous with respect to the host, so + * the call may return before the copy is complete. The copy can optionally + * be associated to a stream by passing a non-zero \p stream argument. If + * \p kind is ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost and \p stream + * is non-zero, the copy may overlap with operations in other streams. + * + * The device version of this function only handles device to device copies and + * cannot be given local or shared pointers. + * + * \param p - 3D memory copy parameters + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidPitchValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cudaMalloc3D, ::cudaMalloc3DArray, ::cudaMemset3D, ::cudaMemcpy3D, + * ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, :::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::make_cudaExtent, ::make_cudaPos, + * ::cuMemcpy3DAsync + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy3DAsync(const struct cudaMemcpy3DParms *p, cudaStream_t stream __dv(0)); + +/** + * \brief Copies memory between devices asynchronously. + * + * Perform a 3D memory copy according to the parameters specified in + * \p p. See the definition of the ::cudaMemcpy3DPeerParms structure + * for documentation of its parameters. + * + * \param p - Parameters for the memory copy + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidPitchValue + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpyPeer, ::cudaMemcpyAsync, ::cudaMemcpyPeerAsync, + * ::cudaMemcpy3DPeerAsync, + * ::cuMemcpy3DPeerAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpy3DPeerAsync(const struct cudaMemcpy3DPeerParms *p, cudaStream_t stream __dv(0)); + +/** + * \brief Gets free and total device memory + * + * Returns in \p *total the total amount of memory available to the the current context. + * Returns in \p *free the amount of memory on the device that is free according to the OS. + * CUDA is not guaranteed to be able to allocate all of the memory that the OS reports as free. + * In a multi-tenet situation, free estimate returned is prone to race condition where + * a new allocation/free done by a different process or a different thread in the same + * process between the time when free memory was estimated and reported, will result in + * deviation in free value reported and actual free memory. + * + * The integrated GPU on Tegra shares memory with CPU and other component + * of the SoC. The free and total values returned by the API excludes + * the SWAP memory space maintained by the OS on some platforms. + * The OS may move some of the memory pages into swap area as the GPU or + * CPU allocate or access memory. See Tegra app note on how to calculate + * total and free memory on Tegra. + * + * \param free - Returned free memory in bytes + * \param total - Returned total memory in bytes + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorLaunchFailure + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cuMemGetInfo + */ +extern __host__ cudaError_t CUDARTAPI cudaMemGetInfo(size_t *free, size_t *total); + +/** + * \brief Gets info about the specified cudaArray + * + * Returns in \p *desc, \p *extent and \p *flags respectively, the type, shape + * and flags of \p array. + * + * Any of \p *desc, \p *extent and \p *flags may be specified as NULL. + * + * \param desc - Returned array type + * \param extent - Returned array shape. 2D arrays will have depth of zero + * \param flags - Returned array flags + * \param array - The ::cudaArray to get info for + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cuArrayGetDescriptor, + * ::cuArray3DGetDescriptor + */ +extern __host__ cudaError_t CUDARTAPI cudaArrayGetInfo(struct cudaChannelFormatDesc *desc, struct cudaExtent *extent, unsigned int *flags, cudaArray_t array); + +/** + * \brief Gets a CUDA array plane from a CUDA array + * + * Returns in \p pPlaneArray a CUDA array that represents a single format plane + * of the CUDA array \p hArray. + * + * If \p planeIdx is greater than the maximum number of planes in this array or if the array does + * not have a multi-planar format e.g: ::cudaChannelFormatKindNV12, then ::cudaErrorInvalidValue is returned. + * + * Note that if the \p hArray has format ::cudaChannelFormatKindNV12, then passing in 0 for \p planeIdx returns + * a CUDA array of the same size as \p hArray but with one 8-bit channel and ::cudaChannelFormatKindUnsigned as its format kind. + * If 1 is passed for \p planeIdx, then the returned CUDA array has half the height and width + * of \p hArray with two 8-bit channels and ::cudaChannelFormatKindUnsigned as its format kind. + * + * \param pPlaneArray - Returned CUDA array referenced by the \p planeIdx + * \param hArray - CUDA array + * \param planeIdx - Plane index + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * + * \sa + * ::cuArrayGetPlane + */ +extern __host__ cudaError_t CUDARTAPI cudaArrayGetPlane(cudaArray_t *pPlaneArray, cudaArray_t hArray, unsigned int planeIdx); + +/** + * \brief Returns the memory requirements of a CUDA array + * + * Returns the memory requirements of a CUDA array in \p memoryRequirements + * If the CUDA array is not allocated with flag ::cudaArrayDeferredMapping + * ::cudaErrorInvalidValue will be returned. + * + * The returned value in ::cudaArrayMemoryRequirements::size + * represents the total size of the CUDA array. + * The returned value in ::cudaArrayMemoryRequirements::alignment + * represents the alignment necessary for mapping the CUDA array. + * + * \return + * ::cudaSuccess + * ::cudaErrorInvalidValue + * + * \param[out] memoryRequirements - Pointer to ::cudaArrayMemoryRequirements + * \param[in] array - CUDA array to get the memory requirements of + * \param[in] device - Device to get the memory requirements for + * \sa ::cudaMipmappedArrayGetMemoryRequirements + */ +extern __host__ cudaError_t CUDARTAPI cudaArrayGetMemoryRequirements(struct cudaArrayMemoryRequirements *memoryRequirements, cudaArray_t array, int device); + +/** + * \brief Returns the memory requirements of a CUDA mipmapped array + * + * Returns the memory requirements of a CUDA mipmapped array in \p memoryRequirements + * If the CUDA mipmapped array is not allocated with flag ::cudaArrayDeferredMapping + * ::cudaErrorInvalidValue will be returned. + * + * The returned value in ::cudaArrayMemoryRequirements::size + * represents the total size of the CUDA mipmapped array. + * The returned value in ::cudaArrayMemoryRequirements::alignment + * represents the alignment necessary for mapping the CUDA mipmapped + * array. + * + * \return + * ::cudaSuccess + * ::cudaErrorInvalidValue + * + * \param[out] memoryRequirements - Pointer to ::cudaArrayMemoryRequirements + * \param[in] mipmap - CUDA mipmapped array to get the memory requirements of + * \param[in] device - Device to get the memory requirements for + * \sa ::cudaArrayGetMemoryRequirements + */ +extern __host__ cudaError_t CUDARTAPI cudaMipmappedArrayGetMemoryRequirements(struct cudaArrayMemoryRequirements *memoryRequirements, cudaMipmappedArray_t mipmap, int device); + +/** + * \brief Returns the layout properties of a sparse CUDA array + * + * Returns the layout properties of a sparse CUDA array in \p sparseProperties. + * If the CUDA array is not allocated with flag ::cudaArraySparse + * ::cudaErrorInvalidValue will be returned. + * + * If the returned value in ::cudaArraySparseProperties::flags contains ::cudaArraySparsePropertiesSingleMipTail, + * then ::cudaArraySparseProperties::miptailSize represents the total size of the array. Otherwise, it will be zero. + * Also, the returned value in ::cudaArraySparseProperties::miptailFirstLevel is always zero. + * Note that the \p array must have been allocated using ::cudaMallocArray or ::cudaMalloc3DArray. For CUDA arrays obtained + * using ::cudaMipmappedArrayGetLevel, ::cudaErrorInvalidValue will be returned. Instead, ::cudaMipmappedArrayGetSparseProperties + * must be used to obtain the sparse properties of the entire CUDA mipmapped array to which \p array belongs to. + * + * \return + * ::cudaSuccess + * ::cudaErrorInvalidValue + * + * \param[out] sparseProperties - Pointer to return the ::cudaArraySparseProperties + * \param[in] array - The CUDA array to get the sparse properties of + * + * \sa + * ::cudaMipmappedArrayGetSparseProperties, + * ::cuMemMapArrayAsync + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaArrayGetSparseProperties(struct cudaArraySparseProperties *sparseProperties, cudaArray_t array); +#endif + +/** + * \brief Returns the layout properties of a sparse CUDA mipmapped array + * + * Returns the sparse array layout properties in \p sparseProperties. + * If the CUDA mipmapped array is not allocated with flag ::cudaArraySparse + * ::cudaErrorInvalidValue will be returned. + * + * For non-layered CUDA mipmapped arrays, ::cudaArraySparseProperties::miptailSize returns the + * size of the mip tail region. The mip tail region includes all mip levels whose width, height or depth + * is less than that of the tile. + * For layered CUDA mipmapped arrays, if ::cudaArraySparseProperties::flags contains ::cudaArraySparsePropertiesSingleMipTail, + * then ::cudaArraySparseProperties::miptailSize specifies the size of the mip tail of all layers combined. + * Otherwise, ::cudaArraySparseProperties::miptailSize specifies mip tail size per layer. + * The returned value of ::cudaArraySparseProperties::miptailFirstLevel is valid only if ::cudaArraySparseProperties::miptailSize is non-zero. + * + * \return + * ::cudaSuccess + * ::cudaErrorInvalidValue + * + * \param[out] sparseProperties - Pointer to return ::cudaArraySparseProperties + * \param[in] mipmap - The CUDA mipmapped array to get the sparse properties of + * + * \sa + * ::cudaArrayGetSparseProperties, + * ::cuMemMapArrayAsync + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaMipmappedArrayGetSparseProperties(struct cudaArraySparseProperties *sparseProperties, cudaMipmappedArray_t mipmap); +#endif + +/** + * \brief Copies data between host and device + * + * Copies \p count bytes from the memory area pointed to by \p src to the + * memory area pointed to by \p dst, where \p kind specifies the direction + * of the copy, and must be one of ::cudaMemcpyHostToHost, + * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. Calling + * ::cudaMemcpy() with dst and src pointers that do not match the direction of + * the copy results in an undefined behavior. + * + * \param dst - Destination memory address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_init_rt + * \note_callback + * + * \note_sync + * \note_memcpy + * + * \sa ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpyDtoH, + * ::cuMemcpyHtoD, + * ::cuMemcpyDtoD, + * ::cuMemcpy + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpy(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind); + +/** + * \brief Copies memory between two devices + * + * Copies memory from one device to memory on another device. \p dst is the + * base device pointer of the destination memory and \p dstDevice is the + * destination device. \p src is the base device pointer of the source memory + * and \p srcDevice is the source device. \p count specifies the number of bytes + * to copy. + * + * Note that this function is asynchronous with respect to the host, but + * serialized with respect all pending and future asynchronous work in to the + * current device, \p srcDevice, and \p dstDevice (use ::cudaMemcpyPeerAsync + * to avoid this synchronization). + * + * \param dst - Destination device pointer + * \param dstDevice - Destination device + * \param src - Source device pointer + * \param srcDevice - Source device + * \param count - Size of memory copy in bytes + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidDevice + * \notefnerr + * \note_sync + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpyAsync, ::cudaMemcpyPeerAsync, + * ::cudaMemcpy3DPeerAsync, + * ::cuMemcpyPeer + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpyPeer(void *dst, int dstDevice, const void *src, int srcDevice, size_t count); + +/** + * \brief Copies data between host and device + * + * Copies a matrix (\p height rows of \p width bytes each) from the memory + * area pointed to by \p src to the memory area pointed to by \p dst, where + * \p kind specifies the direction of the copy, and must be one of + * ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. \p dpitch and + * \p spitch are the widths in memory in bytes of the 2D arrays pointed to by + * \p dst and \p src, including any padding added to the end of each row. The + * memory areas may not overlap. \p width must not exceed either \p dpitch or + * \p spitch. Calling ::cudaMemcpy2D() with \p dst and \p src pointers that do + * not match the direction of the copy results in an undefined behavior. + * ::cudaMemcpy2D() returns an error if \p dpitch or \p spitch exceeds + * the maximum allowed. + * + * \param dst - Destination memory address + * \param dpitch - Pitch of destination memory + * \param src - Source memory address + * \param spitch - Pitch of source memory + * \param width - Width of matrix transfer (columns in bytes) + * \param height - Height of matrix transfer (rows) + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidPitchValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_init_rt + * \note_callback + * \note_memcpy + * + * \sa ::cudaMemcpy, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpy2D, + * ::cuMemcpy2DUnaligned + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpy2D(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind); + +/** + * \brief Copies data between host and device + * + * Copies a matrix (\p height rows of \p width bytes each) from the memory + * area pointed to by \p src to the CUDA array \p dst starting at + * \p hOffset rows and \p wOffset bytes from the upper left corner, + * where \p kind specifies the direction of the copy, and must be one + * of ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * \p spitch is the width in memory in bytes of the 2D array pointed to by + * \p src, including any padding added to the end of each row. \p wOffset + + * \p width must not exceed the width of the CUDA array \p dst. \p width must + * not exceed \p spitch. ::cudaMemcpy2DToArray() returns an error if \p spitch + * exceeds the maximum allowed. + * + * \param dst - Destination memory address + * \param wOffset - Destination starting X offset (columns in bytes) + * \param hOffset - Destination starting Y offset (rows) + * \param src - Source memory address + * \param spitch - Pitch of source memory + * \param width - Width of matrix transfer (columns in bytes) + * \param height - Height of matrix transfer (rows) + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidPitchValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_sync + * \note_init_rt + * \note_callback + * \note_memcpy + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpy2D, + * ::cuMemcpy2DUnaligned + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpy2DToArray(cudaArray_t dst, size_t wOffset, size_t hOffset, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind); + +/** + * \brief Copies data between host and device + * + * Copies a matrix (\p height rows of \p width bytes each) from the CUDA + * array \p src starting at \p hOffset rows and \p wOffset bytes from the + * upper left corner to the memory area pointed to by \p dst, where + * \p kind specifies the direction of the copy, and must be one of + * ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. \p dpitch is the + * width in memory in bytes of the 2D array pointed to by \p dst, including any + * padding added to the end of each row. \p wOffset + \p width must not exceed + * the width of the CUDA array \p src. \p width must not exceed \p dpitch. + * ::cudaMemcpy2DFromArray() returns an error if \p dpitch exceeds the maximum + * allowed. + * + * \param dst - Destination memory address + * \param dpitch - Pitch of destination memory + * \param src - Source memory address + * \param wOffset - Source starting X offset (columns in bytes) + * \param hOffset - Source starting Y offset (rows) + * \param width - Width of matrix transfer (columns in bytes) + * \param height - Height of matrix transfer (rows) + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidPitchValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_sync + * \note_init_rt + * \note_callback + * \note_memcpy + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpy2D, + * ::cuMemcpy2DUnaligned + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpy2DFromArray(void *dst, size_t dpitch, cudaArray_const_t src, size_t wOffset, size_t hOffset, size_t width, size_t height, enum cudaMemcpyKind kind); + +/** + * \brief Copies data between host and device + * + * Copies a matrix (\p height rows of \p width bytes each) from the CUDA + * array \p src starting at \p hOffsetSrc rows and \p wOffsetSrc bytes from the + * upper left corner to the CUDA array \p dst starting at \p hOffsetDst rows + * and \p wOffsetDst bytes from the upper left corner, where \p kind + * specifies the direction of the copy, and must be one of + * ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * \p wOffsetDst + \p width must not exceed the width of the CUDA array \p dst. + * \p wOffsetSrc + \p width must not exceed the width of the CUDA array \p src. + * + * \param dst - Destination memory address + * \param wOffsetDst - Destination starting X offset (columns in bytes) + * \param hOffsetDst - Destination starting Y offset (rows) + * \param src - Source memory address + * \param wOffsetSrc - Source starting X offset (columns in bytes) + * \param hOffsetSrc - Source starting Y offset (rows) + * \param width - Width of matrix transfer (columns in bytes) + * \param height - Height of matrix transfer (rows) + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_sync + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpy2D, + * ::cuMemcpy2DUnaligned + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpy2DArrayToArray(cudaArray_t dst, size_t wOffsetDst, size_t hOffsetDst, cudaArray_const_t src, size_t wOffsetSrc, size_t hOffsetSrc, size_t width, size_t height, enum cudaMemcpyKind kind __dv(cudaMemcpyDeviceToDevice)); + +/** + * \brief Copies data to the given symbol on the device + * + * Copies \p count bytes from the memory area pointed to by \p src + * to the memory area pointed to by \p offset bytes from the start of symbol + * \p symbol. The memory areas may not overlap. \p symbol is a variable that + * resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of + * transfer is inferred from the pointer values. However, ::cudaMemcpyDefault + * is only allowed on systems that support unified virtual addressing. + * + * \param symbol - Device symbol address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorInvalidMemcpyDirection, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_sync + * \note_string_api_deprecation + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpy, + * ::cuMemcpyHtoD, + * ::cuMemcpyDtoD + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpyToSymbol(const void *symbol, const void *src, size_t count, size_t offset __dv(0), enum cudaMemcpyKind kind __dv(cudaMemcpyHostToDevice)); + +/** + * \brief Copies data from the given symbol on the device + * + * Copies \p count bytes from the memory area pointed to by \p offset bytes + * from the start of symbol \p symbol to the memory area pointed to by \p dst. + * The memory areas may not overlap. \p symbol is a variable that + * resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyDeviceToHost, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of + * transfer is inferred from the pointer values. However, ::cudaMemcpyDefault + * is only allowed on systems that support unified virtual addressing. + * + * \param dst - Destination memory address + * \param symbol - Device symbol address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorInvalidMemcpyDirection, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_sync + * \note_string_api_deprecation + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpy, + * ::cuMemcpyDtoH, + * ::cuMemcpyDtoD + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpyFromSymbol(void *dst, const void *symbol, size_t count, size_t offset __dv(0), enum cudaMemcpyKind kind __dv(cudaMemcpyDeviceToHost)); + + +/** + * \brief Copies data between host and device + * + * Copies \p count bytes from the memory area pointed to by \p src to the + * memory area pointed to by \p dst, where \p kind specifies the + * direction of the copy, and must be one of ::cudaMemcpyHostToHost, + * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * The memory areas may not overlap. Calling ::cudaMemcpyAsync() with \p dst and + * \p src pointers that do not match the direction of the copy results in an + * undefined behavior. + * + * ::cudaMemcpyAsync() is asynchronous with respect to the host, so the call + * may return before the copy is complete. The copy can optionally be + * associated to a stream by passing a non-zero \p stream argument. If \p kind + * is ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost and the \p stream is + * non-zero, the copy may overlap with operations in other streams. + * + * The device version of this function only handles device to device copies and + * cannot be given local or shared pointers. + * + * \param dst - Destination memory address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param kind - Type of transfer + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * \note_memcpy + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpyAsync, + * ::cuMemcpyDtoHAsync, + * ::cuMemcpyHtoDAsync, + * ::cuMemcpyDtoDAsync + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpyAsync(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + +/** + * \brief Copies memory between two devices asynchronously. + * + * Copies memory from one device to memory on another device. \p dst is the + * base device pointer of the destination memory and \p dstDevice is the + * destination device. \p src is the base device pointer of the source memory + * and \p srcDevice is the source device. \p count specifies the number of bytes + * to copy. + * + * Note that this function is asynchronous with respect to the host and all work + * on other devices. + * + * \param dst - Destination device pointer + * \param dstDevice - Destination device + * \param src - Source device pointer + * \param srcDevice - Source device + * \param count - Size of memory copy in bytes + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidDevice + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpyPeer, ::cudaMemcpyAsync, + * ::cudaMemcpy3DPeerAsync, + * ::cuMemcpyPeerAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpyPeerAsync(void *dst, int dstDevice, const void *src, int srcDevice, size_t count, cudaStream_t stream __dv(0)); + +/** + * \brief Copies data between host and device + * + * Copies a matrix (\p height rows of \p width bytes each) from the memory + * area pointed to by \p src to the memory area pointed to by \p dst, where + * \p kind specifies the direction of the copy, and must be one of + * ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * \p dpitch and \p spitch are the widths in memory in bytes of the 2D arrays + * pointed to by \p dst and \p src, including any padding added to the end of + * each row. The memory areas may not overlap. \p width must not exceed either + * \p dpitch or \p spitch. + * + * Calling ::cudaMemcpy2DAsync() with \p dst and \p src pointers that do not + * match the direction of the copy results in an undefined behavior. + * ::cudaMemcpy2DAsync() returns an error if \p dpitch or \p spitch is greater + * than the maximum allowed. + * + * ::cudaMemcpy2DAsync() is asynchronous with respect to the host, so + * the call may return before the copy is complete. The copy can optionally + * be associated to a stream by passing a non-zero \p stream argument. If + * \p kind is ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost and + * \p stream is non-zero, the copy may overlap with operations in other + * streams. + * + * The device version of this function only handles device to device copies and + * cannot be given local or shared pointers. + * + * \param dst - Destination memory address + * \param dpitch - Pitch of destination memory + * \param src - Source memory address + * \param spitch - Pitch of source memory + * \param width - Width of matrix transfer (columns in bytes) + * \param height - Height of matrix transfer (rows) + * \param kind - Type of transfer + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidPitchValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * \note_memcpy + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpy2DAsync + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy2DAsync(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + +/** + * \brief Copies data between host and device + * + * Copies a matrix (\p height rows of \p width bytes each) from the memory + * area pointed to by \p src to the CUDA array \p dst starting at \p hOffset + * rows and \p wOffset bytes from the upper left corner, where \p kind specifies + * the direction of the copy, and must be one of ::cudaMemcpyHostToHost, + * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * \p spitch is the width in memory in bytes of the 2D array pointed to by + * \p src, including any padding added to the end of each row. \p wOffset + + * \p width must not exceed the width of the CUDA array \p dst. \p width must + * not exceed \p spitch. ::cudaMemcpy2DToArrayAsync() returns an error if + * \p spitch exceeds the maximum allowed. + * + * ::cudaMemcpy2DToArrayAsync() is asynchronous with respect to the host, so + * the call may return before the copy is complete. The copy can optionally + * be associated to a stream by passing a non-zero \p stream argument. If + * \p kind is ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost and + * \p stream is non-zero, the copy may overlap with operations in other + * streams. + * + * \param dst - Destination memory address + * \param wOffset - Destination starting X offset (columns in bytes) + * \param hOffset - Destination starting Y offset (rows) + * \param src - Source memory address + * \param spitch - Pitch of source memory + * \param width - Width of matrix transfer (columns in bytes) + * \param height - Height of matrix transfer (rows) + * \param kind - Type of transfer + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidPitchValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * \note_memcpy + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpy2DAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpy2DToArrayAsync(cudaArray_t dst, size_t wOffset, size_t hOffset, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + +/** + * \brief Copies data between host and device + * + * Copies a matrix (\p height rows of \p width bytes each) from the CUDA + * array \p src starting at \p hOffset rows and \p wOffset bytes from the + * upper left corner to the memory area pointed to by \p dst, + * where \p kind specifies the direction of the copy, and must be one of + * ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * \p dpitch is the width in memory in bytes of the 2D + * array pointed to by \p dst, including any padding added to the end of each + * row. \p wOffset + \p width must not exceed the width of the CUDA array + * \p src. \p width must not exceed \p dpitch. ::cudaMemcpy2DFromArrayAsync() + * returns an error if \p dpitch exceeds the maximum allowed. + * + * ::cudaMemcpy2DFromArrayAsync() is asynchronous with respect to the host, so + * the call may return before the copy is complete. The copy can optionally be + * associated to a stream by passing a non-zero \p stream argument. If \p kind + * is ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost and \p stream is + * non-zero, the copy may overlap with operations in other streams. + * + * \param dst - Destination memory address + * \param dpitch - Pitch of destination memory + * \param src - Source memory address + * \param wOffset - Source starting X offset (columns in bytes) + * \param hOffset - Source starting Y offset (rows) + * \param width - Width of matrix transfer (columns in bytes) + * \param height - Height of matrix transfer (rows) + * \param kind - Type of transfer + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidPitchValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * \note_memcpy + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpy2DAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpy2DFromArrayAsync(void *dst, size_t dpitch, cudaArray_const_t src, size_t wOffset, size_t hOffset, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + +/** + * \brief Copies data to the given symbol on the device + * + * Copies \p count bytes from the memory area pointed to by \p src + * to the memory area pointed to by \p offset bytes from the start of symbol + * \p symbol. The memory areas may not overlap. \p symbol is a variable that + * resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of transfer + * is inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * ::cudaMemcpyToSymbolAsync() is asynchronous with respect to the host, so + * the call may return before the copy is complete. The copy can optionally + * be associated to a stream by passing a non-zero \p stream argument. If + * \p kind is ::cudaMemcpyHostToDevice and \p stream is non-zero, the copy + * may overlap with operations in other streams. + * + * \param symbol - Device symbol address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorInvalidMemcpyDirection, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_async + * \note_null_stream + * \note_string_api_deprecation + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpyAsync, + * ::cuMemcpyHtoDAsync, + * ::cuMemcpyDtoDAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpyToSymbolAsync(const void *symbol, const void *src, size_t count, size_t offset, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + +/** + * \brief Copies data from the given symbol on the device + * + * Copies \p count bytes from the memory area pointed to by \p offset bytes + * from the start of symbol \p symbol to the memory area pointed to by \p dst. + * The memory areas may not overlap. \p symbol is a variable that resides in + * global or constant memory space. \p kind can be either + * ::cudaMemcpyDeviceToHost, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of transfer + * is inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * ::cudaMemcpyFromSymbolAsync() is asynchronous with respect to the host, so + * the call may return before the copy is complete. The copy can optionally be + * associated to a stream by passing a non-zero \p stream argument. If \p kind + * is ::cudaMemcpyDeviceToHost and \p stream is non-zero, the copy may overlap + * with operations in other streams. + * + * \param dst - Destination memory address + * \param symbol - Device symbol address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorInvalidMemcpyDirection, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_async + * \note_null_stream + * \note_string_api_deprecation + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, + * ::cuMemcpyAsync, + * ::cuMemcpyDtoHAsync, + * ::cuMemcpyDtoDAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaMemcpyFromSymbolAsync(void *dst, const void *symbol, size_t count, size_t offset, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + + +/** + * \brief Initializes or sets device memory to a value + * + * Fills the first \p count bytes of the memory area pointed to by \p devPtr + * with the constant byte value \p value. + * + * Note that this function is asynchronous with respect to the host unless + * \p devPtr refers to pinned host memory. + * + * \param devPtr - Pointer to device memory + * \param value - Value to set for each byte of specified memory + * \param count - Size in bytes to set + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \notefnerr + * \note_memset + * \note_init_rt + * \note_callback + * + * \sa + * ::cuMemsetD8, + * ::cuMemsetD16, + * ::cuMemsetD32 + */ +extern __host__ cudaError_t CUDARTAPI cudaMemset(void *devPtr, int value, size_t count); + +/** + * \brief Initializes or sets device memory to a value + * + * Sets to the specified value \p value a matrix (\p height rows of \p width + * bytes each) pointed to by \p dstPtr. \p pitch is the width in bytes of the + * 2D array pointed to by \p dstPtr, including any padding added to the end + * of each row. This function performs fastest when the pitch is one that has + * been passed back by ::cudaMallocPitch(). + * + * Note that this function is asynchronous with respect to the host unless + * \p devPtr refers to pinned host memory. + * + * \param devPtr - Pointer to 2D device memory + * \param pitch - Pitch in bytes of 2D device memory(Unused if \p height is 1) + * \param value - Value to set for each byte of specified memory + * \param width - Width of matrix set (columns in bytes) + * \param height - Height of matrix set (rows) + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \notefnerr + * \note_memset + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemset, ::cudaMemset3D, ::cudaMemsetAsync, + * ::cudaMemset2DAsync, ::cudaMemset3DAsync, + * ::cuMemsetD2D8, + * ::cuMemsetD2D16, + * ::cuMemsetD2D32 + */ +extern __host__ cudaError_t CUDARTAPI cudaMemset2D(void *devPtr, size_t pitch, int value, size_t width, size_t height); + +/** + * \brief Initializes or sets device memory to a value + * + * Initializes each element of a 3D array to the specified value \p value. + * The object to initialize is defined by \p pitchedDevPtr. The \p pitch field + * of \p pitchedDevPtr is the width in memory in bytes of the 3D array pointed + * to by \p pitchedDevPtr, including any padding added to the end of each row. + * The \p xsize field specifies the logical width of each row in bytes, while + * the \p ysize field specifies the height of each 2D slice in rows. + * The \p pitch field of \p pitchedDevPtr is ignored when \p height and \p depth + * are both equal to 1. + * + * The extents of the initialized region are specified as a \p width in bytes, + * a \p height in rows, and a \p depth in slices. + * + * Extents with \p width greater than or equal to the \p xsize of + * \p pitchedDevPtr may perform significantly faster than extents narrower + * than the \p xsize. Secondarily, extents with \p height equal to the + * \p ysize of \p pitchedDevPtr will perform faster than when the \p height is + * shorter than the \p ysize. + * + * This function performs fastest when the \p pitchedDevPtr has been allocated + * by ::cudaMalloc3D(). + * + * Note that this function is asynchronous with respect to the host unless + * \p pitchedDevPtr refers to pinned host memory. + * + * \param pitchedDevPtr - Pointer to pitched device memory + * \param value - Value to set for each byte of specified memory + * \param extent - Size parameters for where to set device memory (\p width field in bytes) + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \notefnerr + * \note_memset + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemset, ::cudaMemset2D, + * ::cudaMemsetAsync, ::cudaMemset2DAsync, ::cudaMemset3DAsync, + * ::cudaMalloc3D, ::make_cudaPitchedPtr, + * ::make_cudaExtent + */ +extern __host__ cudaError_t CUDARTAPI cudaMemset3D(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent); + +/** + * \brief Initializes or sets device memory to a value + * + * Fills the first \p count bytes of the memory area pointed to by \p devPtr + * with the constant byte value \p value. + * + * ::cudaMemsetAsync() is asynchronous with respect to the host, so + * the call may return before the memset is complete. The operation can optionally + * be associated to a stream by passing a non-zero \p stream argument. + * If \p stream is non-zero, the operation may overlap with operations in other streams. + * + * The device version of this function only handles device to device copies and + * cannot be given local or shared pointers. + * + * \param devPtr - Pointer to device memory + * \param value - Value to set for each byte of specified memory + * \param count - Size in bytes to set + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \notefnerr + * \note_memset + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemset, ::cudaMemset2D, ::cudaMemset3D, + * ::cudaMemset2DAsync, ::cudaMemset3DAsync, + * ::cuMemsetD8Async, + * ::cuMemsetD16Async, + * ::cuMemsetD32Async + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemsetAsync(void *devPtr, int value, size_t count, cudaStream_t stream __dv(0)); + +/** + * \brief Initializes or sets device memory to a value + * + * Sets to the specified value \p value a matrix (\p height rows of \p width + * bytes each) pointed to by \p dstPtr. \p pitch is the width in bytes of the + * 2D array pointed to by \p dstPtr, including any padding added to the end + * of each row. This function performs fastest when the pitch is one that has + * been passed back by ::cudaMallocPitch(). + * + * ::cudaMemset2DAsync() is asynchronous with respect to the host, so + * the call may return before the memset is complete. The operation can optionally + * be associated to a stream by passing a non-zero \p stream argument. + * If \p stream is non-zero, the operation may overlap with operations in other streams. + * + * The device version of this function only handles device to device copies and + * cannot be given local or shared pointers. + * + * \param devPtr - Pointer to 2D device memory + * \param pitch - Pitch in bytes of 2D device memory(Unused if \p height is 1) + * \param value - Value to set for each byte of specified memory + * \param width - Width of matrix set (columns in bytes) + * \param height - Height of matrix set (rows) + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \notefnerr + * \note_memset + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemset, ::cudaMemset2D, ::cudaMemset3D, + * ::cudaMemsetAsync, ::cudaMemset3DAsync, + * ::cuMemsetD2D8Async, + * ::cuMemsetD2D16Async, + * ::cuMemsetD2D32Async + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset2DAsync(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream __dv(0)); + +/** + * \brief Initializes or sets device memory to a value + * + * Initializes each element of a 3D array to the specified value \p value. + * The object to initialize is defined by \p pitchedDevPtr. The \p pitch field + * of \p pitchedDevPtr is the width in memory in bytes of the 3D array pointed + * to by \p pitchedDevPtr, including any padding added to the end of each row. + * The \p xsize field specifies the logical width of each row in bytes, while + * the \p ysize field specifies the height of each 2D slice in rows. + * The \p pitch field of \p pitchedDevPtr is ignored when \p height and \p depth + * are both equal to 1. + * + * The extents of the initialized region are specified as a \p width in bytes, + * a \p height in rows, and a \p depth in slices. + * + * Extents with \p width greater than or equal to the \p xsize of + * \p pitchedDevPtr may perform significantly faster than extents narrower + * than the \p xsize. Secondarily, extents with \p height equal to the + * \p ysize of \p pitchedDevPtr will perform faster than when the \p height is + * shorter than the \p ysize. + * + * This function performs fastest when the \p pitchedDevPtr has been allocated + * by ::cudaMalloc3D(). + * + * ::cudaMemset3DAsync() is asynchronous with respect to the host, so + * the call may return before the memset is complete. The operation can optionally + * be associated to a stream by passing a non-zero \p stream argument. + * If \p stream is non-zero, the operation may overlap with operations in other streams. + * + * The device version of this function only handles device to device copies and + * cannot be given local or shared pointers. + * + * \param pitchedDevPtr - Pointer to pitched device memory + * \param value - Value to set for each byte of specified memory + * \param extent - Size parameters for where to set device memory (\p width field in bytes) + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \notefnerr + * \note_memset + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemset, ::cudaMemset2D, ::cudaMemset3D, + * ::cudaMemsetAsync, ::cudaMemset2DAsync, + * ::cudaMalloc3D, ::make_cudaPitchedPtr, + * ::make_cudaExtent + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset3DAsync(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream __dv(0)); + +/** + * \brief Finds the address associated with a CUDA symbol + * + * Returns in \p *devPtr the address of symbol \p symbol on the device. + * \p symbol is a variable that resides in global or constant memory space. + * If \p symbol cannot be found, or if \p symbol is not declared in the + * global or constant memory space, \p *devPtr is unchanged and the error + * ::cudaErrorInvalidSymbol is returned. + * + * \param devPtr - Return device pointer associated with symbol + * \param symbol - Device symbol address + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_string_api_deprecation + * \note_init_rt + * \note_callback + * + * \sa + * \ref ::cudaGetSymbolAddress(void**, const T&) "cudaGetSymbolAddress (C++ API)", + * \ref ::cudaGetSymbolSize(size_t*, const void*) "cudaGetSymbolSize (C API)", + * ::cuModuleGetGlobal + */ +extern __host__ cudaError_t CUDARTAPI cudaGetSymbolAddress(void **devPtr, const void *symbol); + +/** + * \brief Finds the size of the object associated with a CUDA symbol + * + * Returns in \p *size the size of symbol \p symbol. \p symbol is a variable that + * resides in global or constant memory space. If \p symbol cannot be found, or + * if \p symbol is not declared in global or constant memory space, \p *size is + * unchanged and the error ::cudaErrorInvalidSymbol is returned. + * + * \param size - Size of object associated with symbol + * \param symbol - Device symbol address + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_string_api_deprecation + * \note_init_rt + * \note_callback + * + * \sa + * \ref ::cudaGetSymbolAddress(void**, const void*) "cudaGetSymbolAddress (C API)", + * \ref ::cudaGetSymbolSize(size_t*, const T&) "cudaGetSymbolSize (C++ API)", + * ::cuModuleGetGlobal + */ +extern __host__ cudaError_t CUDARTAPI cudaGetSymbolSize(size_t *size, const void *symbol); + +/** + * \brief Prefetches memory to the specified destination device + * + * Prefetches memory to the specified destination device. \p devPtr is the + * base device pointer of the memory to be prefetched and \p dstDevice is the + * destination device. \p count specifies the number of bytes to copy. \p stream + * is the stream in which the operation is enqueued. The memory range must refer + * to managed memory allocated via ::cudaMallocManaged or declared via __managed__ variables. + * + * Passing in cudaCpuDeviceId for \p dstDevice will prefetch the data to host memory. If + * \p dstDevice is a GPU, then the device attribute ::cudaDevAttrConcurrentManagedAccess + * must be non-zero. Additionally, \p stream must be associated with a device that has a + * non-zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess. + * + * The start address and end address of the memory range will be rounded down and rounded up + * respectively to be aligned to CPU page size before the prefetch operation is enqueued + * in the stream. + * + * If no physical memory has been allocated for this region, then this memory region + * will be populated and mapped on the destination device. If there's insufficient + * memory to prefetch the desired region, the Unified Memory driver may evict pages from other + * ::cudaMallocManaged allocations to host memory in order to make room. Device memory + * allocated using ::cudaMalloc or ::cudaMallocArray will not be evicted. + * + * By default, any mappings to the previous location of the migrated pages are removed and + * mappings for the new location are only setup on \p dstDevice. The exact behavior however + * also depends on the settings applied to this memory range via ::cudaMemAdvise as described + * below: + * + * If ::cudaMemAdviseSetReadMostly was set on any subset of this memory range, + * then that subset will create a read-only copy of the pages on \p dstDevice. + * + * If ::cudaMemAdviseSetPreferredLocation was called on any subset of this memory + * range, then the pages will be migrated to \p dstDevice even if \p dstDevice is not the + * preferred location of any pages in the memory range. + * + * If ::cudaMemAdviseSetAccessedBy was called on any subset of this memory range, + * then mappings to those pages from all the appropriate processors are updated to + * refer to the new location if establishing such a mapping is possible. Otherwise, + * those mappings are cleared. + * + * Note that this API is not required for functionality and only serves to improve performance + * by allowing the application to migrate data to a suitable location before it is accessed. + * Memory accesses to this range are always coherent and are allowed even when the data is + * actively being migrated. + * + * Note that this function is asynchronous with respect to the host and all work + * on other devices. + * + * \param devPtr - Pointer to be prefetched + * \param count - Size in bytes + * \param dstDevice - Destination device to prefetch to + * \param stream - Stream to enqueue prefetch operation + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidDevice + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpyPeer, ::cudaMemcpyAsync, + * ::cudaMemcpy3DPeerAsync, ::cudaMemAdvise, + * ::cuMemPrefetchAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaMemPrefetchAsync(const void *devPtr, size_t count, int dstDevice, cudaStream_t stream __dv(0)); + +/** + * \brief Advise about the usage of a given memory range + * + * Advise the Unified Memory subsystem about the usage pattern for the memory range + * starting at \p devPtr with a size of \p count bytes. The start address and end address of the memory + * range will be rounded down and rounded up respectively to be aligned to CPU page size before the + * advice is applied. The memory range must refer to managed memory allocated via ::cudaMallocManaged + * or declared via __managed__ variables. The memory range could also refer to system-allocated pageable + * memory provided it represents a valid, host-accessible region of memory and all additional constraints + * imposed by \p advice as outlined below are also satisfied. Specifying an invalid system-allocated pageable + * memory range results in an error being returned. + * + * The \p advice parameter can take the following values: + * - ::cudaMemAdviseSetReadMostly: This implies that the data is mostly going to be read + * from and only occasionally written to. Any read accesses from any processor to this region will create a + * read-only copy of at least the accessed pages in that processor's memory. Additionally, if ::cudaMemPrefetchAsync + * is called on this region, it will create a read-only copy of the data on the destination processor. + * If any processor writes to this region, all copies of the corresponding page will be invalidated + * except for the one where the write occurred. The \p device argument is ignored for this advice. + * Note that for a page to be read-duplicated, the accessing processor must either be the CPU or a GPU + * that has a non-zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess. + * Also, if a context is created on a device that does not have the device attribute + * ::cudaDevAttrConcurrentManagedAccess set, then read-duplication will not occur until + * all such contexts are destroyed. + * If the memory region refers to valid system-allocated pageable memory, then the accessing device must + * have a non-zero value for the device attribute ::cudaDevAttrPageableMemoryAccess for a read-only + * copy to be created on that device. Note however that if the accessing device also has a non-zero value for the + * device attribute ::cudaDevAttrPageableMemoryAccessUsesHostPageTables, then setting this advice + * will not create a read-only copy when that device accesses this memory region. + * + * - ::cudaMemAdviceUnsetReadMostly: Undoes the effect of ::cudaMemAdviceReadMostly and also prevents the + * Unified Memory driver from attempting heuristic read-duplication on the memory range. Any read-duplicated + * copies of the data will be collapsed into a single copy. The location for the collapsed + * copy will be the preferred location if the page has a preferred location and one of the read-duplicated + * copies was resident at that location. Otherwise, the location chosen is arbitrary. + * + * - ::cudaMemAdviseSetPreferredLocation: This advice sets the preferred location for the + * data to be the memory belonging to \p device. Passing in cudaCpuDeviceId for \p device sets the + * preferred location as host memory. If \p device is a GPU, then it must have a non-zero value for the + * device attribute ::cudaDevAttrConcurrentManagedAccess. Setting the preferred location + * does not cause data to migrate to that location immediately. Instead, it guides the migration policy + * when a fault occurs on that memory region. If the data is already in its preferred location and the + * faulting processor can establish a mapping without requiring the data to be migrated, then + * data migration will be avoided. On the other hand, if the data is not in its preferred location + * or if a direct mapping cannot be established, then it will be migrated to the processor accessing + * it. It is important to note that setting the preferred location does not prevent data prefetching + * done using ::cudaMemPrefetchAsync. + * Having a preferred location can override the page thrash detection and resolution logic in the Unified + * Memory driver. Normally, if a page is detected to be constantly thrashing between for example host and device + * memory, the page may eventually be pinned to host memory by the Unified Memory driver. But + * if the preferred location is set as device memory, then the page will continue to thrash indefinitely. + * If ::cudaMemAdviseSetReadMostly is also set on this memory region or any subset of it, then the + * policies associated with that advice will override the policies of this advice, unless read accesses from + * \p device will not result in a read-only copy being created on that device as outlined in description for + * the advice ::cudaMemAdviseSetReadMostly. + * If the memory region refers to valid system-allocated pageable memory, then \p device must have a non-zero + * value for the device attribute ::cudaDevAttrPageableMemoryAccess. Additionally, if \p device has + * a non-zero value for the device attribute ::cudaDevAttrPageableMemoryAccessUsesHostPageTables, + * then this call has no effect. Note however that this behavior may change in the future. + * + * - ::cudaMemAdviseUnsetPreferredLocation: Undoes the effect of ::cudaMemAdviseSetPreferredLocation + * and changes the preferred location to none. + * + * - ::cudaMemAdviseSetAccessedBy: This advice implies that the data will be accessed by \p device. + * Passing in ::cudaCpuDeviceId for \p device will set the advice for the CPU. If \p device is a GPU, then + * the device attribute ::cudaDevAttrConcurrentManagedAccess must be non-zero. + * This advice does not cause data migration and has no impact on the location of the data per se. Instead, + * it causes the data to always be mapped in the specified processor's page tables, as long as the + * location of the data permits a mapping to be established. If the data gets migrated for any reason, + * the mappings are updated accordingly. + * This advice is recommended in scenarios where data locality is not important, but avoiding faults is. + * Consider for example a system containing multiple GPUs with peer-to-peer access enabled, where the + * data located on one GPU is occasionally accessed by peer GPUs. In such scenarios, migrating data + * over to the other GPUs is not as important because the accesses are infrequent and the overhead of + * migration may be too high. But preventing faults can still help improve performance, and so having + * a mapping set up in advance is useful. Note that on CPU access of this data, the data may be migrated + * to host memory because the CPU typically cannot access device memory directly. Any GPU that had the + * ::cudaMemAdviceSetAccessedBy flag set for this data will now have its mapping updated to point to the + * page in host memory. + * If ::cudaMemAdviseSetReadMostly is also set on this memory region or any subset of it, then the + * policies associated with that advice will override the policies of this advice. Additionally, if the + * preferred location of this memory region or any subset of it is also \p device, then the policies + * associated with ::cudaMemAdviseSetPreferredLocation will override the policies of this advice. + * If the memory region refers to valid system-allocated pageable memory, then \p device must have a non-zero + * value for the device attribute ::cudaDevAttrPageableMemoryAccess. Additionally, if \p device has + * a non-zero value for the device attribute ::cudaDevAttrPageableMemoryAccessUsesHostPageTables, + * then this call has no effect. + * + * - ::cudaMemAdviseUnsetAccessedBy: Undoes the effect of ::cudaMemAdviseSetAccessedBy. Any mappings to + * the data from \p device may be removed at any time causing accesses to result in non-fatal page faults. + * If the memory region refers to valid system-allocated pageable memory, then \p device must have a non-zero + * value for the device attribute ::cudaDevAttrPageableMemoryAccess. Additionally, if \p device has + * a non-zero value for the device attribute ::cudaDevAttrPageableMemoryAccessUsesHostPageTables, + * then this call has no effect. + * + * \param devPtr - Pointer to memory to set the advice for + * \param count - Size in bytes of the memory range + * \param advice - Advice to be applied for the specified memory range + * \param device - Device to apply the advice for + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidDevice + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpyPeer, ::cudaMemcpyAsync, + * ::cudaMemcpy3DPeerAsync, ::cudaMemPrefetchAsync, + * ::cuMemAdvise + */ +extern __host__ cudaError_t CUDARTAPI cudaMemAdvise(const void *devPtr, size_t count, enum cudaMemoryAdvise advice, int device); + +/** +* \brief Query an attribute of a given memory range +* +* Query an attribute about the memory range starting at \p devPtr with a size of \p count bytes. The +* memory range must refer to managed memory allocated via ::cudaMallocManaged or declared via +* __managed__ variables. +* +* The \p attribute parameter can take the following values: +* - ::cudaMemRangeAttributeReadMostly: If this attribute is specified, \p data will be interpreted +* as a 32-bit integer, and \p dataSize must be 4. The result returned will be 1 if all pages in the given +* memory range have read-duplication enabled, or 0 otherwise. +* - ::cudaMemRangeAttributePreferredLocation: If this attribute is specified, \p data will be +* interpreted as a 32-bit integer, and \p dataSize must be 4. The result returned will be a GPU device +* id if all pages in the memory range have that GPU as their preferred location, or it will be cudaCpuDeviceId +* if all pages in the memory range have the CPU as their preferred location, or it will be cudaInvalidDeviceId +* if either all the pages don't have the same preferred location or some of the pages don't have a +* preferred location at all. Note that the actual location of the pages in the memory range at the time of +* the query may be different from the preferred location. +* - ::cudaMemRangeAttributeAccessedBy: If this attribute is specified, \p data will be interpreted +* as an array of 32-bit integers, and \p dataSize must be a non-zero multiple of 4. The result returned +* will be a list of device ids that had ::cudaMemAdviceSetAccessedBy set for that entire memory range. +* If any device does not have that advice set for the entire memory range, that device will not be included. +* If \p data is larger than the number of devices that have that advice set for that memory range, +* cudaInvalidDeviceId will be returned in all the extra space provided. For ex., if \p dataSize is 12 +* (i.e. \p data has 3 elements) and only device 0 has the advice set, then the result returned will be +* { 0, cudaInvalidDeviceId, cudaInvalidDeviceId }. If \p data is smaller than the number of devices that have +* that advice set, then only as many devices will be returned as can fit in the array. There is no +* guarantee on which specific devices will be returned, however. +* - ::cudaMemRangeAttributeLastPrefetchLocation: If this attribute is specified, \p data will be +* interpreted as a 32-bit integer, and \p dataSize must be 4. The result returned will be the last location +* to which all pages in the memory range were prefetched explicitly via ::cudaMemPrefetchAsync. This will either be +* a GPU id or cudaCpuDeviceId depending on whether the last location for prefetch was a GPU or the CPU +* respectively. If any page in the memory range was never explicitly prefetched or if all pages were not +* prefetched to the same location, cudaInvalidDeviceId will be returned. Note that this simply returns the +* last location that the applicaton requested to prefetch the memory range to. It gives no indication as to +* whether the prefetch operation to that location has completed or even begun. +* +* \param data - A pointers to a memory location where the result +* of each attribute query will be written to. +* \param dataSize - Array containing the size of data +* \param attribute - The attribute to query +* \param devPtr - Start of the range to query +* \param count - Size of the range to query + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemRangeGetAttributes, ::cudaMemPrefetchAsync, + * ::cudaMemAdvise, + * ::cuMemRangeGetAttribute + */ +extern __host__ cudaError_t CUDARTAPI cudaMemRangeGetAttribute(void *data, size_t dataSize, enum cudaMemRangeAttribute attribute, const void *devPtr, size_t count); + +/** + * \brief Query attributes of a given memory range. + * + * Query attributes of the memory range starting at \p devPtr with a size of \p count bytes. The + * memory range must refer to managed memory allocated via ::cudaMallocManaged or declared via + * __managed__ variables. The \p attributes array will be interpreted to have \p numAttributes + * entries. The \p dataSizes array will also be interpreted to have \p numAttributes entries. + * The results of the query will be stored in \p data. + * + * The list of supported attributes are given below. Please refer to ::cudaMemRangeGetAttribute for + * attribute descriptions and restrictions. + * + * - ::cudaMemRangeAttributeReadMostly + * - ::cudaMemRangeAttributePreferredLocation + * - ::cudaMemRangeAttributeAccessedBy + * - ::cudaMemRangeAttributeLastPrefetchLocation + * + * \param data - A two-dimensional array containing pointers to memory + * locations where the result of each attribute query will be written to. + * \param dataSizes - Array containing the sizes of each result + * \param attributes - An array of attributes to query + * (numAttributes and the number of attributes in this array should match) + * \param numAttributes - Number of attributes to query + * \param devPtr - Start of the range to query + * \param count - Size of the range to query + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemRangeGetAttribute, ::cudaMemAdvise, + * ::cudaMemPrefetchAsync, + * ::cuMemRangeGetAttributes + */ +extern __host__ cudaError_t CUDARTAPI cudaMemRangeGetAttributes(void **data, size_t *dataSizes, enum cudaMemRangeAttribute *attributes, size_t numAttributes, const void *devPtr, size_t count); + +/** @} */ /* END CUDART_MEMORY */ + +/** + * \defgroup CUDART_MEMORY_DEPRECATED Memory Management [DEPRECATED] + * + * ___MANBRIEF___ deprecated memory management functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes deprecated memory management functions of the CUDA runtime + * application programming interface. + * + * Some functions have overloaded C++ API template versions documented separately in the + * \ref CUDART_HIGHLEVEL "C++ API Routines" module. + * + * @{ + */ + +/** + * \brief Copies data between host and device + * + * \deprecated + * + * Copies \p count bytes from the memory area pointed to by \p src to the + * CUDA array \p dst starting at \p hOffset rows and \p wOffset bytes from + * the upper left corner, where \p kind specifies the direction + * of the copy, and must be one of ::cudaMemcpyHostToHost, + * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * \param dst - Destination memory address + * \param wOffset - Destination starting X offset (columns in bytes) + * \param hOffset - Destination starting Y offset (rows) + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_sync + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpyFromArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpyArrayToArray, ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpyToArrayAsync, ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpyFromArrayAsync, ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpyHtoA, + * ::cuMemcpyDtoA + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaMemcpyToArray(cudaArray_t dst, size_t wOffset, size_t hOffset, const void *src, size_t count, enum cudaMemcpyKind kind); + +/** + * \brief Copies data between host and device + * + * \deprecated + * + * Copies \p count bytes from the CUDA array \p src starting at \p hOffset rows + * and \p wOffset bytes from the upper left corner to the memory area pointed to + * by \p dst, where \p kind specifies the direction of the copy, and must be one of + * ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * \param dst - Destination memory address + * \param src - Source memory address + * \param wOffset - Source starting X offset (columns in bytes) + * \param hOffset - Source starting Y offset (rows) + * \param count - Size in bytes to copy + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_sync + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, ::cudaMemcpyToArray, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpyArrayToArray, ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpyToArrayAsync, ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpyFromArrayAsync, ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpyAtoH, + * ::cuMemcpyAtoD + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaMemcpyFromArray(void *dst, cudaArray_const_t src, size_t wOffset, size_t hOffset, size_t count, enum cudaMemcpyKind kind); + +/** + * \brief Copies data between host and device + * + * \deprecated + * + * Copies \p count bytes from the CUDA array \p src starting at \p hOffsetSrc + * rows and \p wOffsetSrc bytes from the upper left corner to the CUDA array + * \p dst starting at \p hOffsetDst rows and \p wOffsetDst bytes from the upper + * left corner, where \p kind specifies the direction of the copy, and must be one of + * ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * \param dst - Destination memory address + * \param wOffsetDst - Destination starting X offset (columns in bytes) + * \param hOffsetDst - Destination starting Y offset (rows) + * \param src - Source memory address + * \param wOffsetSrc - Source starting X offset (columns in bytes) + * \param hOffsetSrc - Source starting Y offset (rows) + * \param count - Size in bytes to copy + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, ::cudaMemcpyToArray, + * ::cudaMemcpy2DToArray, ::cudaMemcpyFromArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpyToArrayAsync, ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpyFromArrayAsync, ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpyAtoA + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaMemcpyArrayToArray(cudaArray_t dst, size_t wOffsetDst, size_t hOffsetDst, cudaArray_const_t src, size_t wOffsetSrc, size_t hOffsetSrc, size_t count, enum cudaMemcpyKind kind __dv(cudaMemcpyDeviceToDevice)); + +/** + * \brief Copies data between host and device + * + * \deprecated + * + * Copies \p count bytes from the memory area pointed to by \p src to the + * CUDA array \p dst starting at \p hOffset rows and \p wOffset bytes from + * the upper left corner, where \p kind specifies the + * direction of the copy, and must be one of ::cudaMemcpyHostToHost, + * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * ::cudaMemcpyToArrayAsync() is asynchronous with respect to the host, so + * the call may return before the copy is complete. The copy can optionally + * be associated to a stream by passing a non-zero \p stream argument. If \p + * kind is ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost and \p stream + * is non-zero, the copy may overlap with operations in other streams. + * + * \param dst - Destination memory address + * \param wOffset - Destination starting X offset (columns in bytes) + * \param hOffset - Destination starting Y offset (rows) + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param kind - Type of transfer + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, ::cudaMemcpyToArray, + * ::cudaMemcpy2DToArray, ::cudaMemcpyFromArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpyArrayToArray, ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpyFromArrayAsync, ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpyHtoAAsync, + * ::cuMemcpy2DAsync + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaMemcpyToArrayAsync(cudaArray_t dst, size_t wOffset, size_t hOffset, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + +/** + * \brief Copies data between host and device + * + * \deprecated + * + * Copies \p count bytes from the CUDA array \p src starting at \p hOffset rows + * and \p wOffset bytes from the upper left corner to the memory area pointed to + * by \p dst, where \p kind specifies the direction of the copy, and must be one of + * ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * ::cudaMemcpyFromArrayAsync() is asynchronous with respect to the host, so + * the call may return before the copy is complete. The copy can optionally + * be associated to a stream by passing a non-zero \p stream argument. If \p + * kind is ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost and \p stream + * is non-zero, the copy may overlap with operations in other streams. + * + * \param dst - Destination memory address + * \param src - Source memory address + * \param wOffset - Source starting X offset (columns in bytes) + * \param hOffset - Source starting Y offset (rows) + * \param count - Size in bytes to copy + * \param kind - Type of transfer + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidMemcpyDirection + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, ::cudaMemcpyToArray, + * ::cudaMemcpy2DToArray, ::cudaMemcpyFromArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpyArrayToArray, ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpyToArrayAsync, ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync, + * ::cuMemcpyAtoHAsync, + * ::cuMemcpy2DAsync + */ +extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaMemcpyFromArrayAsync(void *dst, cudaArray_const_t src, size_t wOffset, size_t hOffset, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + +/** @} */ /* END CUDART_MEMORY_DEPRECATED */ + +/** + * \defgroup CUDART_MEMORY_POOLS Stream Ordered Memory Allocator + * + * ___MANBRIEF___ Functions for performing allocation and free operations in stream order. + * Functions for controlling the behavior of the underlying allocator. + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * + * @{ + * + * \section CUDART_MEMORY_POOLS_overview overview + * + * The asynchronous allocator allows the user to allocate and free in stream order. + * All asynchronous accesses of the allocation must happen between + * the stream executions of the allocation and the free. If the memory is accessed + * outside of the promised stream order, a use before allocation / use after free error + * will cause undefined behavior. + * + * The allocator is free to reallocate the memory as long as it can guarantee + * that compliant memory accesses will not overlap temporally. + * The allocator may refer to internal stream ordering as well as inter-stream dependencies + * (such as CUDA events and null stream dependencies) when establishing the temporal guarantee. + * The allocator may also insert inter-stream dependencies to establish the temporal guarantee. + * + * \section CUDART_MEMORY_POOLS_support Supported Platforms + * + * Whether or not a device supports the integrated stream ordered memory allocator + * may be queried by calling ::cudaDeviceGetAttribute() with the device attribute + * ::cudaDevAttrMemoryPoolsSupported. + */ + +/** + * \brief Allocates memory with stream ordered semantics + * + * Inserts an allocation operation into \p hStream. + * A pointer to the allocated memory is returned immediately in *dptr. + * The allocation must not be accessed until the the allocation operation completes. + * The allocation comes from the memory pool associated with the stream's device. + * + * \note The default memory pool of a device contains device memory from that device. + * \note Basic stream ordering allows future work submitted into the same stream to use the allocation. + * Stream query, stream synchronize, and CUDA events can be used to guarantee that the allocation + * operation completes before work submitted in a separate stream runs. + * \note During stream capture, this function results in the creation of an allocation node. In this case, + * the allocation is owned by the graph instead of the memory pool. The memory pool's properties + * are used to set the node's creation parameters. + * + * \param[out] devPtr - Returned device pointer + * \param[in] size - Number of bytes to allocate + * \param[in] hStream - The stream establishing the stream ordering contract and the memory pool to allocate from + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorNotSupported, + * ::cudaErrorOutOfMemory, + * \notefnerr + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cuMemAllocAsync, + * \ref ::cudaMallocAsync(void** ptr, size_t size, cudaMemPool_t memPool, cudaStream_t stream) "cudaMallocAsync (C++ API)", + * ::cudaMallocFromPoolAsync, ::cudaFreeAsync, ::cudaDeviceSetMemPool, ::cudaDeviceGetDefaultMemPool, ::cudaDeviceGetMemPool, ::cudaMemPoolSetAccess, ::cudaMemPoolSetAttribute, ::cudaMemPoolGetAttribute + */ +extern __host__ cudaError_t CUDARTAPI cudaMallocAsync(void **devPtr, size_t size, cudaStream_t hStream); + +/** + * \brief Frees memory with stream ordered semantics + * + * Inserts a free operation into \p hStream. + * The allocation must not be accessed after stream execution reaches the free. + * After this API returns, accessing the memory from any subsequent work launched on the GPU + * or querying its pointer attributes results in undefined behavior. + * + * \note During stream capture, this function results in the creation of a free node and + * must therefore be passed the address of a graph allocation. + * + * \param dptr - memory to free + * \param hStream - The stream establishing the stream ordering promise + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorNotSupported + * \notefnerr + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \sa ::cuMemFreeAsync, ::cudaMallocAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaFreeAsync(void *devPtr, cudaStream_t hStream); + +/** + * \brief Tries to release memory back to the OS + * + * Releases memory back to the OS until the pool contains fewer than minBytesToKeep + * reserved bytes, or there is no more memory that the allocator can safely release. + * The allocator cannot release OS allocations that back outstanding asynchronous allocations. + * The OS allocations may happen at different granularity from the user allocations. + * + * \note: Allocations that have not been freed count as outstanding. + * \note: Allocations that have been asynchronously freed but whose completion has + * not been observed on the host (eg. by a synchronize) can count as outstanding. + * + * \param[in] pool - The memory pool to trim + * \param[in] minBytesToKeep - If the pool has less than minBytesToKeep reserved, + * the TrimTo operation is a no-op. Otherwise the pool will be guaranteed to have + * at least minBytesToKeep bytes reserved after the operation. + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_callback + * + * \sa ::cuMemPoolTrimTo, ::cudaMallocAsync, ::cudaFreeAsync, ::cudaDeviceGetDefaultMemPool, ::cudaDeviceGetMemPool, ::cudaMemPoolCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaMemPoolTrimTo(cudaMemPool_t memPool, size_t minBytesToKeep); + +/** + * \brief Sets attributes of a memory pool + * + * Supported attributes are: + * - ::cudaMemPoolAttrReleaseThreshold: (value type = cuuint64_t) + * Amount of reserved memory in bytes to hold onto before trying + * to release memory back to the OS. When more than the release + * threshold bytes of memory are held by the memory pool, the + * allocator will try to release memory back to the OS on the + * next call to stream, event or context synchronize. (default 0) + * - ::cudaMemPoolReuseFollowEventDependencies: (value type = int) + * Allow ::cudaMallocAsync to use memory asynchronously freed + * in another stream as long as a stream ordering dependency + * of the allocating stream on the free action exists. + * Cuda events and null stream interactions can create the required + * stream ordered dependencies. (default enabled) + * - ::cudaMemPoolReuseAllowOpportunistic: (value type = int) + * Allow reuse of already completed frees when there is no dependency + * between the free and allocation. (default enabled) + * - ::cudaMemPoolReuseAllowInternalDependencies: (value type = int) + * Allow ::cudaMallocAsync to insert new stream dependencies + * in order to establish the stream ordering required to reuse + * a piece of memory released by ::cudaFreeAsync (default enabled). + * - ::cudaMemPoolAttrReservedMemHigh: (value type = cuuint64_t) + * Reset the high watermark that tracks the amount of backing memory that was + * allocated for the memory pool. It is illegal to set this attribute to a non-zero value. + * - ::cudaMemPoolAttrUsedMemHigh: (value type = cuuint64_t) + * Reset the high watermark that tracks the amount of used memory that was + * allocated for the memory pool. It is illegal to set this attribute to a non-zero value. + * + * \param[in] pool - The memory pool to modify + * \param[in] attr - The attribute to modify + * \param[in] value - Pointer to the value to assign + * + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_callback + * + * \sa ::cuMemPoolSetAttribute, ::cudaMallocAsync, ::cudaFreeAsync, ::cudaDeviceGetDefaultMemPool, ::cudaDeviceGetMemPool, ::cudaMemPoolCreate + + */ +extern __host__ cudaError_t CUDARTAPI cudaMemPoolSetAttribute(cudaMemPool_t memPool, enum cudaMemPoolAttr attr, void *value ); + +/** + * \brief Gets attributes of a memory pool + * + * Supported attributes are: + * - ::cudaMemPoolAttrReleaseThreshold: (value type = cuuint64_t) + * Amount of reserved memory in bytes to hold onto before trying + * to release memory back to the OS. When more than the release + * threshold bytes of memory are held by the memory pool, the + * allocator will try to release memory back to the OS on the + * next call to stream, event or context synchronize. (default 0) + * - ::cudaMemPoolReuseFollowEventDependencies: (value type = int) + * Allow ::cudaMallocAsync to use memory asynchronously freed + * in another stream as long as a stream ordering dependency + * of the allocating stream on the free action exists. + * Cuda events and null stream interactions can create the required + * stream ordered dependencies. (default enabled) + * - ::cudaMemPoolReuseAllowOpportunistic: (value type = int) + * Allow reuse of already completed frees when there is no dependency + * between the free and allocation. (default enabled) + * - ::cudaMemPoolReuseAllowInternalDependencies: (value type = int) + * Allow ::cudaMallocAsync to insert new stream dependencies + * in order to establish the stream ordering required to reuse + * a piece of memory released by ::cudaFreeAsync (default enabled). + * - ::cudaMemPoolAttrReservedMemCurrent: (value type = cuuint64_t) + * Amount of backing memory currently allocated for the mempool. + * - ::cudaMemPoolAttrReservedMemHigh: (value type = cuuint64_t) + * High watermark of backing memory allocated for the mempool since + * the last time it was reset. + * - ::cudaMemPoolAttrUsedMemCurrent: (value type = cuuint64_t) + * Amount of memory from the pool that is currently in use by the application. + * - ::cudaMemPoolAttrUsedMemHigh: (value type = cuuint64_t) + * High watermark of the amount of memory from the pool that was in use by the + * application since the last time it was reset. + * + * \param[in] pool - The memory pool to get attributes of + * \param[in] attr - The attribute to get + * \param[in] value - Retrieved value + * + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_callback + * + * \sa ::cuMemPoolGetAttribute, ::cudaMallocAsync, ::cudaFreeAsync, ::cudaDeviceGetDefaultMemPool, ::cudaDeviceGetMemPool, ::cudaMemPoolCreate + + */ +extern __host__ cudaError_t CUDARTAPI cudaMemPoolGetAttribute(cudaMemPool_t memPool, enum cudaMemPoolAttr attr, void *value ); + +/** + * \brief Controls visibility of pools between devices + * + * \param[in] pool - The pool being modified + * \param[in] map - Array of access descriptors. Each descriptor instructs the access to enable for a single gpu + * \param[in] count - Number of descriptors in the map array. + * + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * + * \sa ::cuMemPoolSetAccess, ::cudaMemPoolGetAccess, ::cudaMallocAsync, cudaFreeAsync + */ +extern __host__ cudaError_t CUDARTAPI cudaMemPoolSetAccess(cudaMemPool_t memPool, const struct cudaMemAccessDesc *descList, size_t count); + +/** + * \brief Returns the accessibility of a pool from a device + * + * Returns the accessibility of the pool's memory from the specified location. + * + * \param[out] flags - the accessibility of the pool from the specified location + * \param[in] memPool - the pool being queried + * \param[in] location - the location accessing the pool + * + * \sa ::cuMemPoolGetAccess, ::cudaMemPoolSetAccess + */ +extern __host__ cudaError_t CUDARTAPI cudaMemPoolGetAccess(enum cudaMemAccessFlags *flags, cudaMemPool_t memPool, struct cudaMemLocation *location); + +/** + * \brief Creates a memory pool + * + * Creates a CUDA memory pool and returns the handle in \p pool. The \p poolProps determines + * the properties of the pool such as the backing device and IPC capabilities. + * + * By default, the pool's memory will be accessible from the device it is allocated on. + * + * \note Specifying cudaMemHandleTypeNone creates a memory pool that will not support IPC. + * + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorNotSupported + * + * \sa ::cuMemPoolCreate, ::cudaDeviceSetMemPool, ::cudaMallocFromPoolAsync, ::cudaMemPoolExportToShareableHandle, ::cudaDeviceGetDefaultMemPool, ::cudaDeviceGetMemPool + + */ +extern __host__ cudaError_t CUDARTAPI cudaMemPoolCreate(cudaMemPool_t *memPool, const struct cudaMemPoolProps *poolProps); + +/** + * \brief Destroys the specified memory pool + * + * If any pointers obtained from this pool haven't been freed or + * the pool has free operations that haven't completed + * when ::cudaMemPoolDestroy is invoked, the function will return immediately and the + * resources associated with the pool will be released automatically + * once there are no more outstanding allocations. + * + * Destroying the current mempool of a device sets the default mempool of + * that device as the current mempool for that device. + * + * \note A device's default memory pool cannot be destroyed. + * + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * + * \sa cuMemPoolDestroy, ::cudaFreeAsync, ::cudaDeviceSetMemPool, ::cudaDeviceGetDefaultMemPool, ::cudaDeviceGetMemPool, ::cudaMemPoolCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaMemPoolDestroy(cudaMemPool_t memPool); + +/** + * \brief Allocates memory from a specified pool with stream ordered semantics. + * + * Inserts an allocation operation into \p hStream. + * A pointer to the allocated memory is returned immediately in *dptr. + * The allocation must not be accessed until the the allocation operation completes. + * The allocation comes from the specified memory pool. + * + * \note + * - The specified memory pool may be from a device different than that of the specified \p hStream. + * + * - Basic stream ordering allows future work submitted into the same stream to use the allocation. + * Stream query, stream synchronize, and CUDA events can be used to guarantee that the allocation + * operation completes before work submitted in a separate stream runs. + * + * \note During stream capture, this function results in the creation of an allocation node. In this case, + * the allocation is owned by the graph instead of the memory pool. The memory pool's properties + * are used to set the node's creation parameters. + * + * \param[out] ptr - Returned device pointer + * \param[in] bytesize - Number of bytes to allocate + * \param[in] memPool - The pool to allocate from + * \param[in] stream - The stream establishing the stream ordering semantic + * + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorNotSupported, + * ::cudaErrorOutOfMemory + * + * \sa ::cuMemAllocFromPoolAsync, + * \ref ::cudaMallocAsync(void** ptr, size_t size, cudaMemPool_t memPool, cudaStream_t stream) "cudaMallocAsync (C++ API)", + * ::cudaMallocAsync, ::cudaFreeAsync, ::cudaDeviceGetDefaultMemPool, ::cudaMemPoolCreate, ::cudaMemPoolSetAccess, ::cudaMemPoolSetAttribute + */ +extern __host__ cudaError_t CUDARTAPI cudaMallocFromPoolAsync(void **ptr, size_t size, cudaMemPool_t memPool, cudaStream_t stream); + +/** + * \brief Exports a memory pool to the requested handle type. + * + * Given an IPC capable mempool, create an OS handle to share the pool with another process. + * A recipient process can convert the shareable handle into a mempool with ::cudaMemPoolImportFromShareableHandle. + * Individual pointers can then be shared with the ::cudaMemPoolExportPointer and ::cudaMemPoolImportPointer APIs. + * The implementation of what the shareable handle is and how it can be transferred is defined by the requested + * handle type. + * + * \note: To create an IPC capable mempool, create a mempool with a CUmemAllocationHandleType other than cudaMemHandleTypeNone. + * + * \param[out] handle_out - pointer to the location in which to store the requested handle + * \param[in] pool - pool to export + * \param[in] handleType - the type of handle to create + * \param[in] flags - must be 0 + * + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorOutOfMemory + * + * \sa ::cuMemPoolExportToShareableHandle, ::cudaMemPoolImportFromShareableHandle, ::cudaMemPoolExportPointer, ::cudaMemPoolImportPointer + */ +extern __host__ cudaError_t CUDARTAPI cudaMemPoolExportToShareableHandle( + void *shareableHandle, + cudaMemPool_t memPool, + enum cudaMemAllocationHandleType handleType, + unsigned int flags); + +/** + * \brief imports a memory pool from a shared handle. + * + * Specific allocations can be imported from the imported pool with ::cudaMemPoolImportPointer. + * + * \note Imported memory pools do not support creating new allocations. + * As such imported memory pools may not be used in ::cudaDeviceSetMemPool + * or ::cudaMallocFromPoolAsync calls. + * + * \param[out] pool_out - Returned memory pool + * \param[in] handle - OS handle of the pool to open + * \param[in] handleType - The type of handle being imported + * \param[in] flags - must be 0 + * + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorOutOfMemory + * + * \sa ::cuMemPoolImportFromShareableHandle, ::cudaMemPoolExportToShareableHandle, ::cudaMemPoolExportPointer, ::cudaMemPoolImportPointer + */ +extern __host__ cudaError_t CUDARTAPI cudaMemPoolImportFromShareableHandle( + cudaMemPool_t *memPool, + void *shareableHandle, + enum cudaMemAllocationHandleType handleType, + unsigned int flags); + +/** + * \brief Export data to share a memory pool allocation between processes. + * + * Constructs \p shareData_out for sharing a specific allocation from an already shared memory pool. + * The recipient process can import the allocation with the ::cudaMemPoolImportPointer api. + * The data is not a handle and may be shared through any IPC mechanism. + * + * \param[out] shareData_out - Returned export data + * \param[in] ptr - pointer to memory being exported + * + * \returns + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorOutOfMemory + * + * \sa ::cuMemPoolExportPointer, ::cudaMemPoolExportToShareableHandle, ::cudaMemPoolImportFromShareableHandle, ::cudaMemPoolImportPointer + */ +extern __host__ cudaError_t CUDARTAPI cudaMemPoolExportPointer(struct cudaMemPoolPtrExportData *exportData, void *ptr); + +/** + * \brief Import a memory pool allocation from another process. + * + * Returns in \p ptr_out a pointer to the imported memory. + * The imported memory must not be accessed before the allocation operation completes + * in the exporting process. The imported memory must be freed from all importing processes before + * being freed in the exporting process. The pointer may be freed with cudaFree + * or cudaFreeAsync. If ::cudaFreeAsync is used, the free must be completed + * on the importing process before the free operation on the exporting process. + * + * \note The ::cudaFreeAsync api may be used in the exporting process before + * the ::cudaFreeAsync operation completes in its stream as long as the + * ::cudaFreeAsync in the exporting process specifies a stream with + * a stream dependency on the importing process's ::cudaFreeAsync. + * + * \param[out] ptr_out - pointer to imported memory + * \param[in] pool - pool from which to import + * \param[in] shareData - data specifying the memory to import + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_OUT_OF_MEMORY + * + * \sa ::cuMemPoolImportPointer, ::cudaMemPoolExportToShareableHandle, ::cudaMemPoolImportFromShareableHandle, ::cudaMemPoolExportPointer + */ +extern __host__ cudaError_t CUDARTAPI cudaMemPoolImportPointer(void **ptr, cudaMemPool_t memPool, struct cudaMemPoolPtrExportData *exportData); + +/** @} */ /* END CUDART_MEMORY_POOLS */ + +/** + * \defgroup CUDART_UNIFIED Unified Addressing + * + * ___MANBRIEF___ unified addressing functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the unified addressing functions of the CUDA + * runtime application programming interface. + * + * @{ + * + * \section CUDART_UNIFIED_overview Overview + * + * CUDA devices can share a unified address space with the host. + * For these devices there is no distinction between a device + * pointer and a host pointer -- the same pointer value may be + * used to access memory from the host program and from a kernel + * running on the device (with exceptions enumerated below). + * + * \section CUDART_UNIFIED_support Supported Platforms + * + * Whether or not a device supports unified addressing may be + * queried by calling ::cudaGetDeviceProperties() with the device + * property ::cudaDeviceProp::unifiedAddressing. + * + * Unified addressing is automatically enabled in 64-bit processes . + * + * \section CUDART_UNIFIED_lookup Looking Up Information from Pointer Values + * + * It is possible to look up information about the memory which backs a + * pointer value. For instance, one may want to know if a pointer points + * to host or device memory. As another example, in the case of device + * memory, one may want to know on which CUDA device the memory + * resides. These properties may be queried using the function + * ::cudaPointerGetAttributes() + * + * Since pointers are unique, it is not necessary to specify information + * about the pointers specified to ::cudaMemcpy() and other copy functions. + * The copy direction ::cudaMemcpyDefault may be used to specify that the + * CUDA runtime should infer the location of the pointer from its value. + * + * \section CUDART_UNIFIED_automaphost Automatic Mapping of Host Allocated Host Memory + * + * All host memory allocated through all devices using ::cudaMallocHost() and + * ::cudaHostAlloc() is always directly accessible from all devices that + * support unified addressing. This is the case regardless of whether or + * not the flags ::cudaHostAllocPortable and ::cudaHostAllocMapped are + * specified. + * + * The pointer value through which allocated host memory may be accessed + * in kernels on all devices that support unified addressing is the same + * as the pointer value through which that memory is accessed on the host. + * It is not necessary to call ::cudaHostGetDevicePointer() to get the device + * pointer for these allocations. + * + * Note that this is not the case for memory allocated using the flag + * ::cudaHostAllocWriteCombined, as discussed below. + * + * \section CUDART_UNIFIED_autopeerregister Direct Access of Peer Memory + + * Upon enabling direct access from a device that supports unified addressing + * to another peer device that supports unified addressing using + * ::cudaDeviceEnablePeerAccess() all memory allocated in the peer device using + * ::cudaMalloc() and ::cudaMallocPitch() will immediately be accessible + * by the current device. The device pointer value through + * which any peer's memory may be accessed in the current device + * is the same pointer value through which that memory may be + * accessed from the peer device. + * + * \section CUDART_UNIFIED_exceptions Exceptions, Disjoint Addressing + * + * Not all memory may be accessed on devices through the same pointer + * value through which they are accessed on the host. These exceptions + * are host memory registered using ::cudaHostRegister() and host memory + * allocated using the flag ::cudaHostAllocWriteCombined. For these + * exceptions, there exists a distinct host and device address for the + * memory. The device address is guaranteed to not overlap any valid host + * pointer range and is guaranteed to have the same value across all devices + * that support unified addressing. + * + * This device address may be queried using ::cudaHostGetDevicePointer() + * when a device using unified addressing is current. Either the host + * or the unified device pointer value may be used to refer to this memory + * in ::cudaMemcpy() and similar functions using the ::cudaMemcpyDefault + * memory direction. + * + */ + +/** + * \brief Returns attributes about a specified pointer + * + * Returns in \p *attributes the attributes of the pointer \p ptr. + * If pointer was not allocated in, mapped by or registered with context + * supporting unified addressing ::cudaErrorInvalidValue is returned. + * + * \note In CUDA 11.0 forward passing host pointer will return ::cudaMemoryTypeUnregistered + * in ::cudaPointerAttributes::type and call will return ::cudaSuccess. + * + * The ::cudaPointerAttributes structure is defined as: + * \code + struct cudaPointerAttributes { + enum cudaMemoryType type; + int device; + void *devicePointer; + void *hostPointer; + } + \endcode + * In this structure, the individual fields mean + * + * - \ref ::cudaPointerAttributes::type identifies type of memory. It can be + * ::cudaMemoryTypeUnregistered for unregistered host memory, + * ::cudaMemoryTypeHost for registered host memory, ::cudaMemoryTypeDevice for device + * memory or ::cudaMemoryTypeManaged for managed memory. + * + * - \ref ::cudaPointerAttributes::device "device" is the device against which + * \p ptr was allocated. If \p ptr has memory type ::cudaMemoryTypeDevice + * then this identifies the device on which the memory referred to by \p ptr + * physically resides. If \p ptr has memory type ::cudaMemoryTypeHost then this + * identifies the device which was current when the allocation was made + * (and if that device is deinitialized then this allocation will vanish + * with that device's state). + * + * - \ref ::cudaPointerAttributes::devicePointer "devicePointer" is + * the device pointer alias through which the memory referred to by \p ptr + * may be accessed on the current device. + * If the memory referred to by \p ptr cannot be accessed directly by the + * current device then this is NULL. + * + * - \ref ::cudaPointerAttributes::hostPointer "hostPointer" is + * the host pointer alias through which the memory referred to by \p ptr + * may be accessed on the host. + * If the memory referred to by \p ptr cannot be accessed directly by the + * host then this is NULL. + * + * \param attributes - Attributes for the specified pointer + * \param ptr - Pointer to get attributes for + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * + * \sa ::cudaGetDeviceCount, ::cudaGetDevice, ::cudaSetDevice, + * ::cudaChooseDevice, + * ::cudaInitDevice, + * ::cuPointerGetAttributes + */ +extern __host__ cudaError_t CUDARTAPI cudaPointerGetAttributes(struct cudaPointerAttributes *attributes, const void *ptr); + +/** @} */ /* END CUDART_UNIFIED */ + +/** + * \defgroup CUDART_PEER Peer Device Memory Access + * + * ___MANBRIEF___ peer device memory access functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the peer device memory access functions of the CUDA runtime + * application programming interface. + * + * @{ + */ + +/** + * \brief Queries if a device may directly access a peer device's memory. + * + * Returns in \p *canAccessPeer a value of 1 if device \p device is capable of + * directly accessing memory from \p peerDevice and 0 otherwise. If direct + * access of \p peerDevice from \p device is possible, then access may be + * enabled by calling ::cudaDeviceEnablePeerAccess(). + * + * \param canAccessPeer - Returned access capability + * \param device - Device from which allocations on \p peerDevice are to + * be directly accessed. + * \param peerDevice - Device on which the allocations to be directly accessed + * by \p device reside. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceEnablePeerAccess, + * ::cudaDeviceDisablePeerAccess, + * ::cuDeviceCanAccessPeer + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceCanAccessPeer(int *canAccessPeer, int device, int peerDevice); + +/** + * \brief Enables direct access to memory allocations on a peer device. + * + * On success, all allocations from \p peerDevice will immediately be accessible by + * the current device. They will remain accessible until access is explicitly + * disabled using ::cudaDeviceDisablePeerAccess() or either device is reset using + * ::cudaDeviceReset(). + * + * Note that access granted by this call is unidirectional and that in order to access + * memory on the current device from \p peerDevice, a separate symmetric call + * to ::cudaDeviceEnablePeerAccess() is required. + * + * Note that there are both device-wide and system-wide limitations per system + * configuration, as noted in the CUDA Programming Guide under the section + * "Peer-to-Peer Memory Access". + * + * Returns ::cudaErrorInvalidDevice if ::cudaDeviceCanAccessPeer() indicates + * that the current device cannot directly access memory from \p peerDevice. + * + * Returns ::cudaErrorPeerAccessAlreadyEnabled if direct access of + * \p peerDevice from the current device has already been enabled. + * + * Returns ::cudaErrorInvalidValue if \p flags is not 0. + * + * \param peerDevice - Peer device to enable direct access to from the current device + * \param flags - Reserved for future use and must be set to 0 + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorPeerAccessAlreadyEnabled, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceCanAccessPeer, + * ::cudaDeviceDisablePeerAccess, + * ::cuCtxEnablePeerAccess + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceEnablePeerAccess(int peerDevice, unsigned int flags); + +/** + * \brief Disables direct access to memory allocations on a peer device. + * + * Returns ::cudaErrorPeerAccessNotEnabled if direct access to memory on + * \p peerDevice has not yet been enabled from the current device. + * + * \param peerDevice - Peer device to disable direct access to + * + * \return + * ::cudaSuccess, + * ::cudaErrorPeerAccessNotEnabled, + * ::cudaErrorInvalidDevice + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaDeviceCanAccessPeer, + * ::cudaDeviceEnablePeerAccess, + * ::cuCtxDisablePeerAccess + */ +extern __host__ cudaError_t CUDARTAPI cudaDeviceDisablePeerAccess(int peerDevice); + +/** @} */ /* END CUDART_PEER */ + +/** \defgroup CUDART_OPENGL OpenGL Interoperability */ + +/** \defgroup CUDART_OPENGL_DEPRECATED OpenGL Interoperability [DEPRECATED] */ + +/** \defgroup CUDART_D3D9 Direct3D 9 Interoperability */ + +/** \defgroup CUDART_D3D9_DEPRECATED Direct3D 9 Interoperability [DEPRECATED] */ + +/** \defgroup CUDART_D3D10 Direct3D 10 Interoperability */ + +/** \defgroup CUDART_D3D10_DEPRECATED Direct3D 10 Interoperability [DEPRECATED] */ + +/** \defgroup CUDART_D3D11 Direct3D 11 Interoperability */ + +/** \defgroup CUDART_D3D11_DEPRECATED Direct3D 11 Interoperability [DEPRECATED] */ + +/** \defgroup CUDART_VDPAU VDPAU Interoperability */ + +/** \defgroup CUDART_EGL EGL Interoperability */ + +/** + * \defgroup CUDART_INTEROP Graphics Interoperability + * + * ___MANBRIEF___ graphics interoperability functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the graphics interoperability functions of the CUDA + * runtime application programming interface. + * + * @{ + */ + +/** + * \brief Unregisters a graphics resource for access by CUDA + * + * Unregisters the graphics resource \p resource so it is not accessible by + * CUDA unless registered again. + * + * If \p resource is invalid then ::cudaErrorInvalidResourceHandle is + * returned. + * + * \param resource - Resource to unregister + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorUnknown + * \notefnerr + * \note_init_rt + * \note_callback + * \note_destroy_ub + * + * \sa + * ::cudaGraphicsD3D9RegisterResource, + * ::cudaGraphicsD3D10RegisterResource, + * ::cudaGraphicsD3D11RegisterResource, + * ::cudaGraphicsGLRegisterBuffer, + * ::cudaGraphicsGLRegisterImage, + * ::cuGraphicsUnregisterResource + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphicsUnregisterResource(cudaGraphicsResource_t resource); + +/** + * \brief Set usage flags for mapping a graphics resource + * + * Set \p flags for mapping the graphics resource \p resource. + * + * Changes to \p flags will take effect the next time \p resource is mapped. + * The \p flags argument may be any of the following: + * - ::cudaGraphicsMapFlagsNone: Specifies no hints about how \p resource will + * be used. It is therefore assumed that CUDA may read from or write to \p resource. + * - ::cudaGraphicsMapFlagsReadOnly: Specifies that CUDA will not write to \p resource. + * - ::cudaGraphicsMapFlagsWriteDiscard: Specifies CUDA will not read from \p resource and will + * write over the entire contents of \p resource, so none of the data + * previously stored in \p resource will be preserved. + * + * If \p resource is presently mapped for access by CUDA then ::cudaErrorUnknown is returned. + * If \p flags is not one of the above values then ::cudaErrorInvalidValue is returned. + * + * \param resource - Registered resource to set flags for + * \param flags - Parameters for resource mapping + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphicsMapResources, + * ::cuGraphicsResourceSetMapFlags + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphicsResourceSetMapFlags(cudaGraphicsResource_t resource, unsigned int flags); + +/** + * \brief Map graphics resources for access by CUDA + * + * Maps the \p count graphics resources in \p resources for access by CUDA. + * + * The resources in \p resources may be accessed by CUDA until they + * are unmapped. The graphics API from which \p resources were registered + * should not access any resources while they are mapped by CUDA. If an + * application does so, the results are undefined. + * + * This function provides the synchronization guarantee that any graphics calls + * issued before ::cudaGraphicsMapResources() will complete before any subsequent CUDA + * work issued in \p stream begins. + * + * If \p resources contains any duplicate entries then ::cudaErrorInvalidResourceHandle + * is returned. If any of \p resources are presently mapped for access by + * CUDA then ::cudaErrorUnknown is returned. + * + * \param count - Number of resources to map + * \param resources - Resources to map for CUDA + * \param stream - Stream for synchronization + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorUnknown + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphicsResourceGetMappedPointer, + * ::cudaGraphicsSubResourceGetMappedArray, + * ::cudaGraphicsUnmapResources, + * ::cuGraphicsMapResources + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphicsMapResources(int count, cudaGraphicsResource_t *resources, cudaStream_t stream __dv(0)); + +/** + * \brief Unmap graphics resources. + * + * Unmaps the \p count graphics resources in \p resources. + * + * Once unmapped, the resources in \p resources may not be accessed by CUDA + * until they are mapped again. + * + * This function provides the synchronization guarantee that any CUDA work issued + * in \p stream before ::cudaGraphicsUnmapResources() will complete before any + * subsequently issued graphics work begins. + * + * If \p resources contains any duplicate entries then ::cudaErrorInvalidResourceHandle + * is returned. If any of \p resources are not presently mapped for access by + * CUDA then ::cudaErrorUnknown is returned. + * + * \param count - Number of resources to unmap + * \param resources - Resources to unmap + * \param stream - Stream for synchronization + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorUnknown + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphicsMapResources, + * ::cuGraphicsUnmapResources + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphicsUnmapResources(int count, cudaGraphicsResource_t *resources, cudaStream_t stream __dv(0)); + +/** + * \brief Get an device pointer through which to access a mapped graphics resource. + * + * Returns in \p *devPtr a pointer through which the mapped graphics resource + * \p resource may be accessed. + * Returns in \p *size the size of the memory in bytes which may be accessed from that pointer. + * The value set in \p devPtr may change every time that \p resource is mapped. + * + * If \p resource is not a buffer then it cannot be accessed via a pointer and + * ::cudaErrorUnknown is returned. + * If \p resource is not mapped then ::cudaErrorUnknown is returned. + * * + * \param devPtr - Returned pointer through which \p resource may be accessed + * \param size - Returned size of the buffer accessible starting at \p *devPtr + * \param resource - Mapped resource to access + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorUnknown + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphicsMapResources, + * ::cudaGraphicsSubResourceGetMappedArray, + * ::cuGraphicsResourceGetMappedPointer + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphicsResourceGetMappedPointer(void **devPtr, size_t *size, cudaGraphicsResource_t resource); + +/** + * \brief Get an array through which to access a subresource of a mapped graphics resource. + * + * Returns in \p *array an array through which the subresource of the mapped + * graphics resource \p resource which corresponds to array index \p arrayIndex + * and mipmap level \p mipLevel may be accessed. The value set in \p array may + * change every time that \p resource is mapped. + * + * If \p resource is not a texture then it cannot be accessed via an array and + * ::cudaErrorUnknown is returned. + * If \p arrayIndex is not a valid array index for \p resource then + * ::cudaErrorInvalidValue is returned. + * If \p mipLevel is not a valid mipmap level for \p resource then + * ::cudaErrorInvalidValue is returned. + * If \p resource is not mapped then ::cudaErrorUnknown is returned. + * + * \param array - Returned array through which a subresource of \p resource may be accessed + * \param resource - Mapped resource to access + * \param arrayIndex - Array index for array textures or cubemap face + * index as defined by ::cudaGraphicsCubeFace for + * cubemap textures for the subresource to access + * \param mipLevel - Mipmap level for the subresource to access + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorUnknown + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphicsResourceGetMappedPointer, + * ::cuGraphicsSubResourceGetMappedArray + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphicsSubResourceGetMappedArray(cudaArray_t *array, cudaGraphicsResource_t resource, unsigned int arrayIndex, unsigned int mipLevel); + +/** + * \brief Get a mipmapped array through which to access a mapped graphics resource. + * + * Returns in \p *mipmappedArray a mipmapped array through which the mapped + * graphics resource \p resource may be accessed. The value set in \p mipmappedArray may + * change every time that \p resource is mapped. + * + * If \p resource is not a texture then it cannot be accessed via an array and + * ::cudaErrorUnknown is returned. + * If \p resource is not mapped then ::cudaErrorUnknown is returned. + * + * \param mipmappedArray - Returned mipmapped array through which \p resource may be accessed + * \param resource - Mapped resource to access + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorUnknown + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphicsResourceGetMappedPointer, + * ::cuGraphicsResourceGetMappedMipmappedArray + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphicsResourceGetMappedMipmappedArray(cudaMipmappedArray_t *mipmappedArray, cudaGraphicsResource_t resource); + +/** @} */ /* END CUDART_INTEROP */ + +/** + * \defgroup CUDART_TEXTURE_OBJECT Texture Object Management + * + * ___MANBRIEF___ texture object management functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the low level texture object management functions + * of the CUDA runtime application programming interface. The texture + * object API is only supported on devices of compute capability 3.0 or higher. + * + * @{ + */ + +/** + * \brief Get the channel descriptor of an array + * + * Returns in \p *desc the channel descriptor of the CUDA array \p array. + * + * \param desc - Channel format + * \param array - Memory array on device + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa \ref ::cudaCreateChannelDesc(int, int, int, int, cudaChannelFormatKind) "cudaCreateChannelDesc (C API)", + * ::cudaCreateTextureObject, ::cudaCreateSurfaceObject + */ +extern __host__ cudaError_t CUDARTAPI cudaGetChannelDesc(struct cudaChannelFormatDesc *desc, cudaArray_const_t array); + +/** + * \brief Returns a channel descriptor using the specified format + * + * Returns a channel descriptor with format \p f and number of bits of each + * component \p x, \p y, \p z, and \p w. The ::cudaChannelFormatDesc is + * defined as: + * \code + struct cudaChannelFormatDesc { + int x, y, z, w; + enum cudaChannelFormatKind f; + }; + * \endcode + * + * where ::cudaChannelFormatKind is one of ::cudaChannelFormatKindSigned, + * ::cudaChannelFormatKindUnsigned, or ::cudaChannelFormatKindFloat. + * + * \param x - X component + * \param y - Y component + * \param z - Z component + * \param w - W component + * \param f - Channel format + * + * \return + * Channel descriptor with format \p f + * + * \sa \ref ::cudaCreateChannelDesc(void) "cudaCreateChannelDesc (C++ API)", + * ::cudaGetChannelDesc, ::cudaCreateTextureObject, ::cudaCreateSurfaceObject + */ +extern __host__ struct cudaChannelFormatDesc CUDARTAPI cudaCreateChannelDesc(int x, int y, int z, int w, enum cudaChannelFormatKind f); + +/** + * \brief Creates a texture object + * + * Creates a texture object and returns it in \p pTexObject. \p pResDesc describes + * the data to texture from. \p pTexDesc describes how the data should be sampled. + * \p pResViewDesc is an optional argument that specifies an alternate format for + * the data described by \p pResDesc, and also describes the subresource region + * to restrict access to when texturing. \p pResViewDesc can only be specified if + * the type of resource is a CUDA array or a CUDA mipmapped array. + * + * Texture objects are only supported on devices of compute capability 3.0 or higher. + * Additionally, a texture object is an opaque value, and, as such, should only be + * accessed through CUDA API calls. + * + * The ::cudaResourceDesc structure is defined as: + * \code + struct cudaResourceDesc { + enum cudaResourceType resType; + + union { + struct { + cudaArray_t array; + } array; + struct { + cudaMipmappedArray_t mipmap; + } mipmap; + struct { + void *devPtr; + struct cudaChannelFormatDesc desc; + size_t sizeInBytes; + } linear; + struct { + void *devPtr; + struct cudaChannelFormatDesc desc; + size_t width; + size_t height; + size_t pitchInBytes; + } pitch2D; + } res; + }; + * \endcode + * where: + * - ::cudaResourceDesc::resType specifies the type of resource to texture from. + * CUresourceType is defined as: + * \code + enum cudaResourceType { + cudaResourceTypeArray = 0x00, + cudaResourceTypeMipmappedArray = 0x01, + cudaResourceTypeLinear = 0x02, + cudaResourceTypePitch2D = 0x03 + }; + * \endcode + * + * \par + * If ::cudaResourceDesc::resType is set to ::cudaResourceTypeArray, ::cudaResourceDesc::res::array::array + * must be set to a valid CUDA array handle. + * + * \par + * If ::cudaResourceDesc::resType is set to ::cudaResourceTypeMipmappedArray, ::cudaResourceDesc::res::mipmap::mipmap + * must be set to a valid CUDA mipmapped array handle and ::cudaTextureDesc::normalizedCoords must be set to true. + * + * \par + * If ::cudaResourceDesc::resType is set to ::cudaResourceTypeLinear, ::cudaResourceDesc::res::linear::devPtr + * must be set to a valid device pointer, that is aligned to ::cudaDeviceProp::textureAlignment. + * ::cudaResourceDesc::res::linear::desc describes the format and the number of components per array element. ::cudaResourceDesc::res::linear::sizeInBytes + * specifies the size of the array in bytes. The total number of elements in the linear address range cannot exceed + * ::cudaDeviceProp::maxTexture1DLinear. The number of elements is computed as (sizeInBytes / sizeof(desc)). + * + * \par + * If ::cudaResourceDesc::resType is set to ::cudaResourceTypePitch2D, ::cudaResourceDesc::res::pitch2D::devPtr + * must be set to a valid device pointer, that is aligned to ::cudaDeviceProp::textureAlignment. + * ::cudaResourceDesc::res::pitch2D::desc describes the format and the number of components per array element. ::cudaResourceDesc::res::pitch2D::width + * and ::cudaResourceDesc::res::pitch2D::height specify the width and height of the array in elements, and cannot exceed + * ::cudaDeviceProp::maxTexture2DLinear[0] and ::cudaDeviceProp::maxTexture2DLinear[1] respectively. + * ::cudaResourceDesc::res::pitch2D::pitchInBytes specifies the pitch between two rows in bytes and has to be aligned to + * ::cudaDeviceProp::texturePitchAlignment. Pitch cannot exceed ::cudaDeviceProp::maxTexture2DLinear[2]. + * + * + * The ::cudaTextureDesc struct is defined as + * \code + struct cudaTextureDesc { + enum cudaTextureAddressMode addressMode[3]; + enum cudaTextureFilterMode filterMode; + enum cudaTextureReadMode readMode; + int sRGB; + float borderColor[4]; + int normalizedCoords; + unsigned int maxAnisotropy; + enum cudaTextureFilterMode mipmapFilterMode; + float mipmapLevelBias; + float minMipmapLevelClamp; + float maxMipmapLevelClamp; + int disableTrilinearOptimization; + int seamlessCubemap; + }; + * \endcode + * where + * - ::cudaTextureDesc::addressMode specifies the addressing mode for each dimension of the texture data. ::cudaTextureAddressMode is defined as: + * \code + enum cudaTextureAddressMode { + cudaAddressModeWrap = 0, + cudaAddressModeClamp = 1, + cudaAddressModeMirror = 2, + cudaAddressModeBorder = 3 + }; + * \endcode + * This is ignored if ::cudaResourceDesc::resType is ::cudaResourceTypeLinear. Also, if ::cudaTextureDesc::normalizedCoords + * is set to zero, ::cudaAddressModeWrap and ::cudaAddressModeMirror won't be supported and will be switched to ::cudaAddressModeClamp. + * + * - ::cudaTextureDesc::filterMode specifies the filtering mode to be used when fetching from the texture. ::cudaTextureFilterMode is defined as: + * \code + enum cudaTextureFilterMode { + cudaFilterModePoint = 0, + cudaFilterModeLinear = 1 + }; + * \endcode + * This is ignored if ::cudaResourceDesc::resType is ::cudaResourceTypeLinear. + * + * - ::cudaTextureDesc::readMode specifies whether integer data should be converted to floating point or not. ::cudaTextureReadMode is defined as: + * \code + enum cudaTextureReadMode { + cudaReadModeElementType = 0, + cudaReadModeNormalizedFloat = 1 + }; + * \endcode + * Note that this applies only to 8-bit and 16-bit integer formats. 32-bit integer format would not be promoted, regardless of + * whether or not this ::cudaTextureDesc::readMode is set ::cudaReadModeNormalizedFloat is specified. + * + * - ::cudaTextureDesc::sRGB specifies whether sRGB to linear conversion should be performed during texture fetch. + * + * - ::cudaTextureDesc::borderColor specifies the float values of color. where: + * ::cudaTextureDesc::borderColor[0] contains value of 'R', + * ::cudaTextureDesc::borderColor[1] contains value of 'G', + * ::cudaTextureDesc::borderColor[2] contains value of 'B', + * ::cudaTextureDesc::borderColor[3] contains value of 'A' + * Note that application using integer border color values will need to these values to float. + * The values are set only when the addressing mode specified by ::cudaTextureDesc::addressMode is cudaAddressModeBorder. + * + * - ::cudaTextureDesc::normalizedCoords specifies whether the texture coordinates will be normalized or not. + * + * - ::cudaTextureDesc::maxAnisotropy specifies the maximum anistropy ratio to be used when doing anisotropic filtering. This value will be + * clamped to the range [1,16]. + * + * - ::cudaTextureDesc::mipmapFilterMode specifies the filter mode when the calculated mipmap level lies between two defined mipmap levels. + * + * - ::cudaTextureDesc::mipmapLevelBias specifies the offset to be applied to the calculated mipmap level. + * + * - ::cudaTextureDesc::minMipmapLevelClamp specifies the lower end of the mipmap level range to clamp access to. + * + * - ::cudaTextureDesc::maxMipmapLevelClamp specifies the upper end of the mipmap level range to clamp access to. + * + * - ::cudaTextureDesc::disableTrilinearOptimization specifies whether the trilinear filtering optimizations will be disabled. + * + * - ::cudaTextureDesc::seamlessCubemap specifies whether seamless cube map filtering is enabled. This flag can only be specified if the + * underlying resource is a CUDA array or a CUDA mipmapped array that was created with the flag ::cudaArrayCubemap. + * When seamless cube map filtering is enabled, texture address modes specified by ::cudaTextureDesc::addressMode are ignored. + * Instead, if the ::cudaTextureDesc::filterMode is set to ::cudaFilterModePoint the address mode ::cudaAddressModeClamp will be applied for all dimensions. + * If the ::cudaTextureDesc::filterMode is set to ::cudaFilterModeLinear seamless cube map filtering will be performed when sampling along the cube face borders. + * + * The ::cudaResourceViewDesc struct is defined as + * \code + struct cudaResourceViewDesc { + enum cudaResourceViewFormat format; + size_t width; + size_t height; + size_t depth; + unsigned int firstMipmapLevel; + unsigned int lastMipmapLevel; + unsigned int firstLayer; + unsigned int lastLayer; + }; + * \endcode + * where: + * - ::cudaResourceViewDesc::format specifies how the data contained in the CUDA array or CUDA mipmapped array should + * be interpreted. Note that this can incur a change in size of the texture data. If the resource view format is a block + * compressed format, then the underlying CUDA array or CUDA mipmapped array has to have a 32-bit unsigned integer format + * with 2 or 4 channels, depending on the block compressed format. For ex., BC1 and BC4 require the underlying CUDA array to have + * a 32-bit unsigned int with 2 channels. The other BC formats require the underlying resource to have the same 32-bit unsigned int + * format but with 4 channels. + * + * - ::cudaResourceViewDesc::width specifies the new width of the texture data. If the resource view format is a block + * compressed format, this value has to be 4 times the original width of the resource. For non block compressed formats, + * this value has to be equal to that of the original resource. + * + * - ::cudaResourceViewDesc::height specifies the new height of the texture data. If the resource view format is a block + * compressed format, this value has to be 4 times the original height of the resource. For non block compressed formats, + * this value has to be equal to that of the original resource. + * + * - ::cudaResourceViewDesc::depth specifies the new depth of the texture data. This value has to be equal to that of the + * original resource. + * + * - ::cudaResourceViewDesc::firstMipmapLevel specifies the most detailed mipmap level. This will be the new mipmap level zero. + * For non-mipmapped resources, this value has to be zero.::cudaTextureDesc::minMipmapLevelClamp and ::cudaTextureDesc::maxMipmapLevelClamp + * will be relative to this value. For ex., if the firstMipmapLevel is set to 2, and a minMipmapLevelClamp of 1.2 is specified, + * then the actual minimum mipmap level clamp will be 3.2. + * + * - ::cudaResourceViewDesc::lastMipmapLevel specifies the least detailed mipmap level. For non-mipmapped resources, this value + * has to be zero. + * + * - ::cudaResourceViewDesc::firstLayer specifies the first layer index for layered textures. This will be the new layer zero. + * For non-layered resources, this value has to be zero. + * + * - ::cudaResourceViewDesc::lastLayer specifies the last layer index for layered textures. For non-layered resources, + * this value has to be zero. + * + * + * \param pTexObject - Texture object to create + * \param pResDesc - Resource descriptor + * \param pTexDesc - Texture descriptor + * \param pResViewDesc - Resource view descriptor + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaDestroyTextureObject, + * ::cuTexObjectCreate + */ + +extern __host__ cudaError_t CUDARTAPI cudaCreateTextureObject(cudaTextureObject_t *pTexObject, const struct cudaResourceDesc *pResDesc, const struct cudaTextureDesc *pTexDesc, const struct cudaResourceViewDesc *pResViewDesc); + +/** + * \brief Destroys a texture object + * + * Destroys the texture object specified by \p texObject. + * + * \param texObject - Texture object to destroy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * \note_destroy_ub + * + * \sa + * ::cudaCreateTextureObject, + * ::cuTexObjectDestroy + */ +extern __host__ cudaError_t CUDARTAPI cudaDestroyTextureObject(cudaTextureObject_t texObject); + +/** + * \brief Returns a texture object's resource descriptor + * + * Returns the resource descriptor for the texture object specified by \p texObject. + * + * \param pResDesc - Resource descriptor + * \param texObject - Texture object + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaCreateTextureObject, + * ::cuTexObjectGetResourceDesc + */ +extern __host__ cudaError_t CUDARTAPI cudaGetTextureObjectResourceDesc(struct cudaResourceDesc *pResDesc, cudaTextureObject_t texObject); + +/** + * \brief Returns a texture object's texture descriptor + * + * Returns the texture descriptor for the texture object specified by \p texObject. + * + * \param pTexDesc - Texture descriptor + * \param texObject - Texture object + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaCreateTextureObject, + * ::cuTexObjectGetTextureDesc + */ +extern __host__ cudaError_t CUDARTAPI cudaGetTextureObjectTextureDesc(struct cudaTextureDesc *pTexDesc, cudaTextureObject_t texObject); + +/** + * \brief Returns a texture object's resource view descriptor + * + * Returns the resource view descriptor for the texture object specified by \p texObject. + * If no resource view was specified, ::cudaErrorInvalidValue is returned. + * + * \param pResViewDesc - Resource view descriptor + * \param texObject - Texture object + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaCreateTextureObject, + * ::cuTexObjectGetResourceViewDesc + */ +extern __host__ cudaError_t CUDARTAPI cudaGetTextureObjectResourceViewDesc(struct cudaResourceViewDesc *pResViewDesc, cudaTextureObject_t texObject); + +/** @} */ /* END CUDART_TEXTURE_OBJECT */ + +/** + * \defgroup CUDART_SURFACE_OBJECT Surface Object Management + * + * ___MANBRIEF___ surface object management functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the low level texture object management functions + * of the CUDA runtime application programming interface. The surface object + * API is only supported on devices of compute capability 3.0 or higher. + * + * @{ + */ + +/** + * \brief Creates a surface object + * + * Creates a surface object and returns it in \p pSurfObject. \p pResDesc describes + * the data to perform surface load/stores on. ::cudaResourceDesc::resType must be + * ::cudaResourceTypeArray and ::cudaResourceDesc::res::array::array + * must be set to a valid CUDA array handle. + * + * Surface objects are only supported on devices of compute capability 3.0 or higher. + * Additionally, a surface object is an opaque value, and, as such, should only be + * accessed through CUDA API calls. + * + * \param pSurfObject - Surface object to create + * \param pResDesc - Resource descriptor + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidChannelDescriptor, + * ::cudaErrorInvalidResourceHandle + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaDestroySurfaceObject, + * ::cuSurfObjectCreate + */ + +extern __host__ cudaError_t CUDARTAPI cudaCreateSurfaceObject(cudaSurfaceObject_t *pSurfObject, const struct cudaResourceDesc *pResDesc); + +/** + * \brief Destroys a surface object + * + * Destroys the surface object specified by \p surfObject. + * + * \param surfObject - Surface object to destroy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * \note_destroy_ub + * + * \sa + * ::cudaCreateSurfaceObject, + * ::cuSurfObjectDestroy + */ +extern __host__ cudaError_t CUDARTAPI cudaDestroySurfaceObject(cudaSurfaceObject_t surfObject); + +/** + * \brief Returns a surface object's resource descriptor + * Returns the resource descriptor for the surface object specified by \p surfObject. + * + * \param pResDesc - Resource descriptor + * \param surfObject - Surface object + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaCreateSurfaceObject, + * ::cuSurfObjectGetResourceDesc + */ +extern __host__ cudaError_t CUDARTAPI cudaGetSurfaceObjectResourceDesc(struct cudaResourceDesc *pResDesc, cudaSurfaceObject_t surfObject); + +/** @} */ /* END CUDART_SURFACE_OBJECT */ + +/** + * \defgroup CUDART__VERSION Version Management + * + * @{ + */ + +/** + * \brief Returns the latest version of CUDA supported by the driver + * + * Returns in \p *driverVersion the latest version of CUDA supported by + * the driver. The version is returned as (1000 × major + 10 × minor). + * For example, CUDA 9.2 would be represented by 9020. If no driver is installed, + * then 0 is returned as the driver version. + * + * This function automatically returns ::cudaErrorInvalidValue + * if \p driverVersion is NULL. + * + * \param driverVersion - Returns the CUDA driver version. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaRuntimeGetVersion, + * ::cuDriverGetVersion + */ +extern __host__ cudaError_t CUDARTAPI cudaDriverGetVersion(int *driverVersion); + +/** + * \brief Returns the CUDA Runtime version + * + * Returns in \p *runtimeVersion the version number of the current CUDA + * Runtime instance. The version is returned as + * (1000 × major + 10 × minor). For example, + * CUDA 9.2 would be represented by 9020. + * + * As of CUDA 12.0, this function no longer initializes CUDA. The purpose + * of this API is solely to return a compile-time constant stating the + * CUDA Toolkit version in the above format. + * + * This function automatically returns ::cudaErrorInvalidValue if + * the \p runtimeVersion argument is NULL. + * + * \param runtimeVersion - Returns the CUDA Runtime version. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaDriverGetVersion, + * ::cuDriverGetVersion + */ +extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaRuntimeGetVersion(int *runtimeVersion); + +/** @} */ /* END CUDART__VERSION */ + +/** + * \defgroup CUDART_GRAPH Graph Management + * + * ___MANBRIEF___ graph management functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the graph management functions of CUDA + * runtime application programming interface. + * + * @{ + */ + +/** + * \brief Creates a graph + * + * Creates an empty graph, which is returned via \p pGraph. + * + * \param pGraph - Returns newly created graph + * \param flags - Graph creation flags, must be 0 + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemsetNode, + * ::cudaGraphInstantiate, + * ::cudaGraphDestroy, + * ::cudaGraphGetNodes, + * ::cudaGraphGetRootNodes, + * ::cudaGraphGetEdges, + * ::cudaGraphClone + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphCreate(cudaGraph_t *pGraph, unsigned int flags); + +/** + * \brief Creates a kernel execution node and adds it to a graph + * + * Creates a new kernel execution node and adds it to \p graph with \p numDependencies + * dependencies specified via \p pDependencies and arguments specified in \p pNodeParams. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p pGraphNode. + * + * The cudaKernelNodeParams structure is defined as: + * + * \code + * struct cudaKernelNodeParams + * { + * void* func; + * dim3 gridDim; + * dim3 blockDim; + * unsigned int sharedMemBytes; + * void **kernelParams; + * void **extra; + * }; + * \endcode + * + * When the graph is launched, the node will invoke kernel \p func on a (\p gridDim.x x + * \p gridDim.y x \p gridDim.z) grid of blocks. Each block contains + * (\p blockDim.x x \p blockDim.y x \p blockDim.z) threads. + * + * \p sharedMem sets the amount of dynamic shared memory that will be + * available to each thread block. + * + * Kernel parameters to \p func can be specified in one of two ways: + * + * 1) Kernel parameters can be specified via \p kernelParams. If the kernel has N + * parameters, then \p kernelParams needs to be an array of N pointers. Each pointer, + * from \p kernelParams[0] to \p kernelParams[N-1], points to the region of memory from which the actual + * parameter will be copied. The number of kernel parameters and their offsets and sizes do not need + * to be specified as that information is retrieved directly from the kernel's image. + * + * 2) Kernel parameters can also be packaged by the application into a single buffer that is passed in + * via \p extra. This places the burden on the application of knowing each kernel + * parameter's size and alignment/padding within the buffer. The \p extra parameter exists + * to allow this function to take additional less commonly used arguments. \p extra specifies + * a list of names of extra settings and their corresponding values. Each extra setting name is + * immediately followed by the corresponding value. The list must be terminated with either NULL or + * CU_LAUNCH_PARAM_END. + * + * - ::CU_LAUNCH_PARAM_END, which indicates the end of the \p extra + * array; + * - ::CU_LAUNCH_PARAM_BUFFER_POINTER, which specifies that the next + * value in \p extra will be a pointer to a buffer + * containing all the kernel parameters for launching kernel + * \p func; + * - ::CU_LAUNCH_PARAM_BUFFER_SIZE, which specifies that the next + * value in \p extra will be a pointer to a size_t + * containing the size of the buffer specified with + * ::CU_LAUNCH_PARAM_BUFFER_POINTER; + * + * The error ::cudaErrorInvalidValue will be returned if kernel parameters are specified with both + * \p kernelParams and \p extra (i.e. both \p kernelParams and + * \p extra are non-NULL). + * + * The \p kernelParams or \p extra array, as well as the argument values it points to, + * are copied during this call. + * + * \note Kernels launched using graphs must not use texture and surface references. Reading or + * writing through any texture or surface reference is undefined behavior. + * This restriction does not apply to texture and surface objects. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param pNodeParams - Parameters for the GPU execution node + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidDeviceFunction + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaLaunchKernel, + * ::cudaGraphKernelNodeGetParams, + * ::cudaGraphKernelNodeSetParams, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemsetNode + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphAddKernelNode(cudaGraphNode_t *pGraphNode, cudaGraph_t graph, const cudaGraphNode_t *pDependencies, size_t numDependencies, const struct cudaKernelNodeParams *pNodeParams); + +/** + * \brief Returns a kernel node's parameters + * + * Returns the parameters of kernel node \p node in \p pNodeParams. + * The \p kernelParams or \p extra array returned in \p pNodeParams, + * as well as the argument values it points to, are owned by the node. + * This memory remains valid until the node is destroyed or its + * parameters are modified, and should not be modified + * directly. Use ::cudaGraphKernelNodeSetParams to update the + * parameters of this node. + * + * The params will contain either \p kernelParams or \p extra, + * according to which of these was most recently set on the node. + * + * \param node - Node to get the parameters for + * \param pNodeParams - Pointer to return the parameters + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidDeviceFunction + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaLaunchKernel, + * ::cudaGraphAddKernelNode, + * ::cudaGraphKernelNodeSetParams + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphKernelNodeGetParams(cudaGraphNode_t node, struct cudaKernelNodeParams *pNodeParams); + +/** + * \brief Sets a kernel node's parameters + * + * Sets the parameters of kernel node \p node to \p pNodeParams. + * + * \param node - Node to set the parameters for + * \param pNodeParams - Parameters to copy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle, + * ::cudaErrorMemoryAllocation + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaLaunchKernel, + * ::cudaGraphAddKernelNode, + * ::cudaGraphKernelNodeGetParams + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphKernelNodeSetParams(cudaGraphNode_t node, const struct cudaKernelNodeParams *pNodeParams); + +/** + * \brief Copies attributes from source node to destination node. + * + * Copies attributes from source node \p src to destination node \p dst. + * Both node must have the same context. + * + * \param[out] dst Destination node + * \param[in] src Source node + * For list of attributes see ::cudaKernelNodeAttrID + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidContext + * \notefnerr + * + * \sa + * ::cudaAccessPolicyWindow + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphKernelNodeCopyAttributes( + cudaGraphNode_t hSrc, + cudaGraphNode_t hDst); + +/** + * \brief Queries node attribute. + * + * Queries attribute \p attr from node \p hNode and stores it in corresponding + * member of \p value_out. + * + * \param[in] hNode + * \param[in] attr + * \param[out] value_out + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * + * \sa + * ::cudaAccessPolicyWindow + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphKernelNodeGetAttribute( + cudaGraphNode_t hNode, + cudaKernelNodeAttrID attr, + cudaKernelNodeAttrValue *value_out); + +/** + * \brief Sets node attribute. + * + * Sets attribute \p attr on node \p hNode from corresponding attribute of + * \p value. + * + * \param[out] hNode + * \param[in] attr + * \param[out] value + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * + * \sa + * ::cudaAccessPolicyWindow + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphKernelNodeSetAttribute( + cudaGraphNode_t hNode, + cudaKernelNodeAttrID attr, + const cudaKernelNodeAttrValue *value); + +/** + * \brief Creates a memcpy node and adds it to a graph + * + * Creates a new memcpy node and adds it to \p graph with \p numDependencies + * dependencies specified via \p pDependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p pGraphNode. + * + * When the graph is launched, the node will perform the memcpy described by \p pCopyParams. + * See ::cudaMemcpy3D() for a description of the structure and its restrictions. + * + * Memcpy nodes have some additional restrictions with regards to managed memory, if the + * system contains at least one device which has a zero value for the device attribute + * ::cudaDevAttrConcurrentManagedAccess. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param pCopyParams - Parameters for the memory copy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpy3D, + * ::cudaGraphAddMemcpyNodeToSymbol, + * ::cudaGraphAddMemcpyNodeFromSymbol, + * ::cudaGraphAddMemcpyNode1D, + * ::cudaGraphMemcpyNodeGetParams, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemsetNode + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphAddMemcpyNode(cudaGraphNode_t *pGraphNode, cudaGraph_t graph, const cudaGraphNode_t *pDependencies, size_t numDependencies, const struct cudaMemcpy3DParms *pCopyParams); + +/** + * \brief Creates a memcpy node to copy to a symbol on the device and adds it to a graph + * + * Creates a new memcpy node to copy to \p symbol and adds it to \p graph with + * \p numDependencies dependencies specified via \p pDependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p pGraphNode. + * + * When the graph is launched, the node will copy \p count bytes from the memory area + * pointed to by \p src to the memory area pointed to by \p offset bytes from the start + * of symbol \p symbol. The memory areas may not overlap. \p symbol is a variable that + * resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of + * transfer is inferred from the pointer values. However, ::cudaMemcpyDefault + * is only allowed on systems that support unified virtual addressing. + * + * Memcpy nodes have some additional restrictions with regards to managed memory, if the + * system contains at least one device which has a zero value for the device attribute + * ::cudaDevAttrConcurrentManagedAccess. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param symbol - Device symbol address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpyToSymbol, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemcpyNodeFromSymbol, + * ::cudaGraphMemcpyNodeGetParams, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsToSymbol, + * ::cudaGraphMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemsetNode + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphAddMemcpyNodeToSymbol( + cudaGraphNode_t *pGraphNode, + cudaGraph_t graph, + const cudaGraphNode_t *pDependencies, + size_t numDependencies, + const void* symbol, + const void* src, + size_t count, + size_t offset, + enum cudaMemcpyKind kind); +#endif + +/** + * \brief Creates a memcpy node to copy from a symbol on the device and adds it to a graph + * + * Creates a new memcpy node to copy from \p symbol and adds it to \p graph with + * \p numDependencies dependencies specified via \p pDependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p pGraphNode. + * + * When the graph is launched, the node will copy \p count bytes from the memory area + * pointed to by \p offset bytes from the start of symbol \p symbol to the memory area + * pointed to by \p dst. The memory areas may not overlap. \p symbol is a variable + * that resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyDeviceToHost, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of transfer + * is inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * Memcpy nodes have some additional restrictions with regards to managed memory, if the + * system contains at least one device which has a zero value for the device attribute + * ::cudaDevAttrConcurrentManagedAccess. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param dst - Destination memory address + * \param symbol - Device symbol address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpyFromSymbol, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemcpyNodeToSymbol, + * ::cudaGraphMemcpyNodeGetParams, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphMemcpyNodeSetParamsToSymbol, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemsetNode + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphAddMemcpyNodeFromSymbol( + cudaGraphNode_t* pGraphNode, + cudaGraph_t graph, + const cudaGraphNode_t* pDependencies, + size_t numDependencies, + void* dst, + const void* symbol, + size_t count, + size_t offset, + enum cudaMemcpyKind kind); +#endif + +/** + * \brief Creates a 1D memcpy node and adds it to a graph + * + * Creates a new 1D memcpy node and adds it to \p graph with \p numDependencies + * dependencies specified via \p pDependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p pGraphNode. + * + * When the graph is launched, the node will copy \p count bytes from the memory + * area pointed to by \p src to the memory area pointed to by \p dst, where + * \p kind specifies the direction of the copy, and must be one of + * ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. Launching a + * memcpy node with dst and src pointers that do not match the direction of + * the copy results in an undefined behavior. + * + * Memcpy nodes have some additional restrictions with regards to managed memory, if the + * system contains at least one device which has a zero value for the device attribute + * ::cudaDevAttrConcurrentManagedAccess. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param dst - Destination memory address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpy, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphMemcpyNodeGetParams, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParams1D, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemsetNode + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphAddMemcpyNode1D( + cudaGraphNode_t *pGraphNode, + cudaGraph_t graph, + const cudaGraphNode_t *pDependencies, + size_t numDependencies, + void* dst, + const void* src, + size_t count, + enum cudaMemcpyKind kind); +#endif + +/** + * \brief Returns a memcpy node's parameters + * + * Returns the parameters of memcpy node \p node in \p pNodeParams. + * + * \param node - Node to get the parameters for + * \param pNodeParams - Pointer to return the parameters + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpy3D, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphMemcpyNodeSetParams + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphMemcpyNodeGetParams(cudaGraphNode_t node, struct cudaMemcpy3DParms *pNodeParams); + +/** + * \brief Sets a memcpy node's parameters + * + * Sets the parameters of memcpy node \p node to \p pNodeParams. + * + * \param node - Node to set the parameters for + * \param pNodeParams - Parameters to copy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpy3D, + * ::cudaGraphMemcpyNodeSetParamsToSymbol, + * ::cudaGraphMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphMemcpyNodeSetParams1D, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphMemcpyNodeGetParams + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphMemcpyNodeSetParams(cudaGraphNode_t node, const struct cudaMemcpy3DParms *pNodeParams); + +/** + * \brief Sets a memcpy node's parameters to copy to a symbol on the device + * + * Sets the parameters of memcpy node \p node to the copy described by the provided parameters. + * + * When the graph is launched, the node will copy \p count bytes from the memory area + * pointed to by \p src to the memory area pointed to by \p offset bytes from the start + * of symbol \p symbol. The memory areas may not overlap. \p symbol is a variable that + * resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of + * transfer is inferred from the pointer values. However, ::cudaMemcpyDefault + * is only allowed on systems that support unified virtual addressing. + * + * \param node - Node to set the parameters for + * \param symbol - Device symbol address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpyToSymbol, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphMemcpyNodeGetParams + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphMemcpyNodeSetParamsToSymbol( + cudaGraphNode_t node, + const void* symbol, + const void* src, + size_t count, + size_t offset, + enum cudaMemcpyKind kind); +#endif + +/** + * \brief Sets a memcpy node's parameters to copy from a symbol on the device + * + * Sets the parameters of memcpy node \p node to the copy described by the provided parameters. + * + * When the graph is launched, the node will copy \p count bytes from the memory area + * pointed to by \p offset bytes from the start of symbol \p symbol to the memory area + * pointed to by \p dst. The memory areas may not overlap. \p symbol is a variable + * that resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyDeviceToHost, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of transfer + * is inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * \param node - Node to set the parameters for + * \param dst - Destination memory address + * \param symbol - Device symbol address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpyFromSymbol, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsToSymbol, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphMemcpyNodeGetParams + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphMemcpyNodeSetParamsFromSymbol( + cudaGraphNode_t node, + void* dst, + const void* symbol, + size_t count, + size_t offset, + enum cudaMemcpyKind kind); +#endif + +/** + * \brief Sets a memcpy node's parameters to perform a 1-dimensional copy + * + * Sets the parameters of memcpy node \p node to the copy described by the provided parameters. + * + * When the graph is launched, the node will copy \p count bytes from the memory + * area pointed to by \p src to the memory area pointed to by \p dst, where + * \p kind specifies the direction of the copy, and must be one of + * ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost, + * ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing + * ::cudaMemcpyDefault is recommended, in which case the type of transfer is + * inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. Launching a + * memcpy node with dst and src pointers that do not match the direction of + * the copy results in an undefined behavior. + * + * \param node - Node to set the parameters for + * \param dst - Destination memory address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpy, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphMemcpyNodeGetParams + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphMemcpyNodeSetParams1D( + cudaGraphNode_t node, + void* dst, + const void* src, + size_t count, + enum cudaMemcpyKind kind); +#endif + +/** + * \brief Creates a memset node and adds it to a graph + * + * Creates a new memset node and adds it to \p graph with \p numDependencies + * dependencies specified via \p pDependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p pGraphNode. + * + * The element size must be 1, 2, or 4 bytes. + * When the graph is launched, the node will perform the memset described by \p pMemsetParams. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param pMemsetParams - Parameters for the memory set + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidDevice + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemset2D, + * ::cudaGraphMemsetNodeGetParams, + * ::cudaGraphMemsetNodeSetParams, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemcpyNode + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphAddMemsetNode(cudaGraphNode_t *pGraphNode, cudaGraph_t graph, const cudaGraphNode_t *pDependencies, size_t numDependencies, const struct cudaMemsetParams *pMemsetParams); + +/** + * \brief Returns a memset node's parameters + * + * Returns the parameters of memset node \p node in \p pNodeParams. + * + * \param node - Node to get the parameters for + * \param pNodeParams - Pointer to return the parameters + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemset2D, + * ::cudaGraphAddMemsetNode, + * ::cudaGraphMemsetNodeSetParams + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphMemsetNodeGetParams(cudaGraphNode_t node, struct cudaMemsetParams *pNodeParams); + +/** + * \brief Sets a memset node's parameters + * + * Sets the parameters of memset node \p node to \p pNodeParams. + * + * \param node - Node to set the parameters for + * \param pNodeParams - Parameters to copy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemset2D, + * ::cudaGraphAddMemsetNode, + * ::cudaGraphMemsetNodeGetParams + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphMemsetNodeSetParams(cudaGraphNode_t node, const struct cudaMemsetParams *pNodeParams); + +/** + * \brief Creates a host execution node and adds it to a graph + * + * Creates a new CPU execution node and adds it to \p graph with \p numDependencies + * dependencies specified via \p pDependencies and arguments specified in \p pNodeParams. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p pGraphNode. + * + * When the graph is launched, the node will invoke the specified CPU function. + * Host nodes are not supported under MPS with pre-Volta GPUs. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param pNodeParams - Parameters for the host node + * + * \return + * ::cudaSuccess, + * ::cudaErrorNotSupported, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaLaunchHostFunc, + * ::cudaGraphHostNodeGetParams, + * ::cudaGraphHostNodeSetParams, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemsetNode + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphAddHostNode(cudaGraphNode_t *pGraphNode, cudaGraph_t graph, const cudaGraphNode_t *pDependencies, size_t numDependencies, const struct cudaHostNodeParams *pNodeParams); + +/** + * \brief Returns a host node's parameters + * + * Returns the parameters of host node \p node in \p pNodeParams. + * + * \param node - Node to get the parameters for + * \param pNodeParams - Pointer to return the parameters + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaLaunchHostFunc, + * ::cudaGraphAddHostNode, + * ::cudaGraphHostNodeSetParams + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphHostNodeGetParams(cudaGraphNode_t node, struct cudaHostNodeParams *pNodeParams); + +/** + * \brief Sets a host node's parameters + * + * Sets the parameters of host node \p node to \p nodeParams. + * + * \param node - Node to set the parameters for + * \param pNodeParams - Parameters to copy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaLaunchHostFunc, + * ::cudaGraphAddHostNode, + * ::cudaGraphHostNodeGetParams + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphHostNodeSetParams(cudaGraphNode_t node, const struct cudaHostNodeParams *pNodeParams); + +/** + * \brief Creates a child graph node and adds it to a graph + * + * Creates a new node which executes an embedded graph, and adds it to \p graph with + * \p numDependencies dependencies specified via \p pDependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p pGraphNode. + * + * If \p hGraph contains allocation or free nodes, this call will return an error. + * + * The node executes an embedded child graph. The child graph is cloned in this call. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param childGraph - The graph to clone into this node + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphChildGraphNodeGetGraph, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemsetNode, + * ::cudaGraphClone + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphAddChildGraphNode(cudaGraphNode_t *pGraphNode, cudaGraph_t graph, const cudaGraphNode_t *pDependencies, size_t numDependencies, cudaGraph_t childGraph); + +/** + * \brief Gets a handle to the embedded graph of a child graph node + * + * Gets a handle to the embedded graph in a child graph node. This call + * does not clone the graph. Changes to the graph will be reflected in + * the node, and the node retains ownership of the graph. + * + * Allocation and free nodes cannot be added to the returned graph. + * Attempting to do so will return an error. + * + * \param node - Node to get the embedded graph for + * \param pGraph - Location to store a handle to the graph + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphNodeFindInClone + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphChildGraphNodeGetGraph(cudaGraphNode_t node, cudaGraph_t *pGraph); + +/** + * \brief Creates an empty node and adds it to a graph + * + * Creates a new node which performs no operation, and adds it to \p graph with + * \p numDependencies dependencies specified via \p pDependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p pGraphNode. + * + * An empty node performs no operation during execution, but can be used for + * transitive ordering. For example, a phased execution graph with 2 groups of n + * nodes with a barrier between them can be represented using an empty node and + * 2*n dependency edges, rather than no empty node and n^2 dependency edges. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemsetNode + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphAddEmptyNode(cudaGraphNode_t *pGraphNode, cudaGraph_t graph, const cudaGraphNode_t *pDependencies, size_t numDependencies); + +/** + * \brief Creates an event record node and adds it to a graph + * + * Creates a new event record node and adds it to \p hGraph with \p numDependencies + * dependencies specified via \p dependencies and event specified in \p event. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p phGraphNode. + * + * Each launch of the graph will record \p event to capture execution of the + * node's dependencies. + * + * These nodes may not be used in loops or conditionals. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param event - Event for the node + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddEventWaitNode, + * ::cudaEventRecordWithFlags, + * ::cudaStreamWaitEvent, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemsetNode + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphAddEventRecordNode(cudaGraphNode_t *pGraphNode, cudaGraph_t graph, const cudaGraphNode_t *pDependencies, size_t numDependencies, cudaEvent_t event); +#endif + +/** + * \brief Returns the event associated with an event record node + * + * Returns the event of event record node \p hNode in \p event_out. + * + * \param hNode - Node to get the event for + * \param event_out - Pointer to return the event + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddEventRecordNode, + * ::cudaGraphEventRecordNodeSetEvent, + * ::cudaGraphEventWaitNodeGetEvent, + * ::cudaEventRecordWithFlags, + * ::cudaStreamWaitEvent + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphEventRecordNodeGetEvent(cudaGraphNode_t node, cudaEvent_t *event_out); +#endif + +/** + * \brief Sets an event record node's event + * + * Sets the event of event record node \p hNode to \p event. + * + * \param hNode - Node to set the event for + * \param event - Event to use + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddEventRecordNode, + * ::cudaGraphEventRecordNodeGetEvent, + * ::cudaGraphEventWaitNodeSetEvent, + * ::cudaEventRecordWithFlags, + * ::cudaStreamWaitEvent + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphEventRecordNodeSetEvent(cudaGraphNode_t node, cudaEvent_t event); +#endif + +/** + * \brief Creates an event wait node and adds it to a graph + * + * Creates a new event wait node and adds it to \p hGraph with \p numDependencies + * dependencies specified via \p dependencies and event specified in \p event. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p phGraphNode. + * + * The graph node will wait for all work captured in \p event. See ::cuEventRecord() + * for details on what is captured by an event. The synchronization will be performed + * efficiently on the device when applicable. \p event may be from a different context + * or device than the launch stream. + * + * These nodes may not be used in loops or conditionals. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param event - Event for the node + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddEventRecordNode, + * ::cudaEventRecordWithFlags, + * ::cudaStreamWaitEvent, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemsetNode + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphAddEventWaitNode(cudaGraphNode_t *pGraphNode, cudaGraph_t graph, const cudaGraphNode_t *pDependencies, size_t numDependencies, cudaEvent_t event); +#endif + +/** + * \brief Returns the event associated with an event wait node + * + * Returns the event of event wait node \p hNode in \p event_out. + * + * \param hNode - Node to get the event for + * \param event_out - Pointer to return the event + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddEventWaitNode, + * ::cudaGraphEventWaitNodeSetEvent, + * ::cudaGraphEventRecordNodeGetEvent, + * ::cudaEventRecordWithFlags, + * ::cudaStreamWaitEvent + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphEventWaitNodeGetEvent(cudaGraphNode_t node, cudaEvent_t *event_out); +#endif + +/** + * \brief Sets an event wait node's event + * + * Sets the event of event wait node \p hNode to \p event. + * + * \param hNode - Node to set the event for + * \param event - Event to use + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddEventWaitNode, + * ::cudaGraphEventWaitNodeGetEvent, + * ::cudaGraphEventRecordNodeSetEvent, + * ::cudaEventRecordWithFlags, + * ::cudaStreamWaitEvent + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphEventWaitNodeSetEvent(cudaGraphNode_t node, cudaEvent_t event); +#endif + +/** + * \brief Creates an external semaphore signal node and adds it to a graph + * + * Creates a new external semaphore signal node and adds it to \p graph with \p + * numDependencies dependencies specified via \p dependencies and arguments specified + * in \p nodeParams. It is possible for \p numDependencies to be 0, in which case the + * node will be placed at the root of the graph. \p dependencies may not have any + * duplicate entries. A handle to the new node will be returned in \p pGraphNode. + * + * Performs a signal operation on a set of externally allocated semaphore objects + * when the node is launched. The operation(s) will occur after all of the node's + * dependencies have completed. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param nodeParams - Parameters for the node + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphExternalSemaphoresSignalNodeGetParams, + * ::cudaGraphExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphAddExternalSemaphoresWaitNode, + * ::cudaImportExternalSemaphore, + * ::cudaSignalExternalSemaphoresAsync, + * ::cudaWaitExternalSemaphoresAsync, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddEventRecordNode, + * ::cudaGraphAddEventWaitNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemsetNode + */ +#if __CUDART_API_VERSION >= 11020 +extern __host__ cudaError_t CUDARTAPI cudaGraphAddExternalSemaphoresSignalNode(cudaGraphNode_t *pGraphNode, cudaGraph_t graph, const cudaGraphNode_t *pDependencies, size_t numDependencies, const struct cudaExternalSemaphoreSignalNodeParams *nodeParams); +#endif + +/** + * \brief Returns an external semaphore signal node's parameters + * + * Returns the parameters of an external semaphore signal node \p hNode in \p params_out. + * The \p extSemArray and \p paramsArray returned in \p params_out, + * are owned by the node. This memory remains valid until the node is destroyed or its + * parameters are modified, and should not be modified + * directly. Use ::cudaGraphExternalSemaphoresSignalNodeSetParams to update the + * parameters of this node. + * + * \param hNode - Node to get the parameters for + * \param params_out - Pointer to return the parameters + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaLaunchKernel, + * ::cudaGraphAddExternalSemaphoresSignalNode, + * ::cudaGraphExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphAddExternalSemaphoresWaitNode, + * ::cudaSignalExternalSemaphoresAsync, + * ::cudaWaitExternalSemaphoresAsync + */ +#if __CUDART_API_VERSION >= 11020 +extern __host__ cudaError_t CUDARTAPI cudaGraphExternalSemaphoresSignalNodeGetParams(cudaGraphNode_t hNode, struct cudaExternalSemaphoreSignalNodeParams *params_out); +#endif + +/** + * \brief Sets an external semaphore signal node's parameters + * + * Sets the parameters of an external semaphore signal node \p hNode to \p nodeParams. + * + * \param hNode - Node to set the parameters for + * \param nodeParams - Parameters to copy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddExternalSemaphoresSignalNode, + * ::cudaGraphExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphAddExternalSemaphoresWaitNode, + * ::cudaSignalExternalSemaphoresAsync, + * ::cudaWaitExternalSemaphoresAsync + */ +#if __CUDART_API_VERSION >= 11020 +extern __host__ cudaError_t CUDARTAPI cudaGraphExternalSemaphoresSignalNodeSetParams(cudaGraphNode_t hNode, const struct cudaExternalSemaphoreSignalNodeParams *nodeParams); +#endif + +/** + * \brief Creates an external semaphore wait node and adds it to a graph + * + * Creates a new external semaphore wait node and adds it to \p graph with \p numDependencies + * dependencies specified via \p dependencies and arguments specified in \p nodeParams. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. A handle + * to the new node will be returned in \p pGraphNode. + * + * Performs a wait operation on a set of externally allocated semaphore objects + * when the node is launched. The node's dependencies will not be launched until + * the wait operation has completed. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param nodeParams - Parameters for the node + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphExternalSemaphoresWaitNodeGetParams, + * ::cudaGraphExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphAddExternalSemaphoresSignalNode, + * ::cudaImportExternalSemaphore, + * ::cudaSignalExternalSemaphoresAsync, + * ::cudaWaitExternalSemaphoresAsync, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddEventRecordNode, + * ::cudaGraphAddEventWaitNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemsetNode + */ +#if __CUDART_API_VERSION >= 11020 +extern __host__ cudaError_t CUDARTAPI cudaGraphAddExternalSemaphoresWaitNode(cudaGraphNode_t *pGraphNode, cudaGraph_t graph, const cudaGraphNode_t *pDependencies, size_t numDependencies, const struct cudaExternalSemaphoreWaitNodeParams *nodeParams); +#endif + +/** + * \brief Returns an external semaphore wait node's parameters + * + * Returns the parameters of an external semaphore wait node \p hNode in \p params_out. + * The \p extSemArray and \p paramsArray returned in \p params_out, + * are owned by the node. This memory remains valid until the node is destroyed or its + * parameters are modified, and should not be modified + * directly. Use ::cudaGraphExternalSemaphoresSignalNodeSetParams to update the + * parameters of this node. + * + * \param hNode - Node to get the parameters for + * \param params_out - Pointer to return the parameters + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaLaunchKernel, + * ::cudaGraphAddExternalSemaphoresWaitNode, + * ::cudaGraphExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphAddExternalSemaphoresWaitNode, + * ::cudaSignalExternalSemaphoresAsync, + * ::cudaWaitExternalSemaphoresAsync + */ +#if __CUDART_API_VERSION >= 11020 +extern __host__ cudaError_t CUDARTAPI cudaGraphExternalSemaphoresWaitNodeGetParams(cudaGraphNode_t hNode, struct cudaExternalSemaphoreWaitNodeParams *params_out); +#endif + +/** + * \brief Sets an external semaphore wait node's parameters + * + * Sets the parameters of an external semaphore wait node \p hNode to \p nodeParams. + * + * \param hNode - Node to set the parameters for + * \param nodeParams - Parameters to copy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddExternalSemaphoresWaitNode, + * ::cudaGraphExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphAddExternalSemaphoresWaitNode, + * ::cudaSignalExternalSemaphoresAsync, + * ::cudaWaitExternalSemaphoresAsync + */ +#if __CUDART_API_VERSION >= 11020 +extern __host__ cudaError_t CUDARTAPI cudaGraphExternalSemaphoresWaitNodeSetParams(cudaGraphNode_t hNode, const struct cudaExternalSemaphoreWaitNodeParams *nodeParams); +#endif + +/** + * \brief Creates an allocation node and adds it to a graph + * + * Creates a new allocation node and adds it to \p graph with \p numDependencies + * dependencies specified via \p pDependencies and arguments specified in \p nodeParams. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. A handle + * to the new node will be returned in \p pGraphNode. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param nodeParams - Parameters for the node + * + * When ::cudaGraphAddMemAllocNode creates an allocation node, it returns the address of the allocation in + * \p nodeParams.dptr. The allocation's address remains fixed across instantiations and launches. + * + * If the allocation is freed in the same graph, by creating a free node using ::cudaGraphAddMemFreeNode, + * the allocation can be accessed by nodes ordered after the allocation node but before the free node. + * These allocations cannot be freed outside the owning graph, and they can only be freed once in the + * owning graph. + * + * If the allocation is not freed in the same graph, then it can be accessed not only by nodes in the + * graph which are ordered after the allocation node, but also by stream operations ordered after the + * graph's execution but before the allocation is freed. + * + * Allocations which are not freed in the same graph can be freed by: + * - passing the allocation to ::cudaMemFreeAsync or ::cudaMemFree; + * - launching a graph with a free node for that allocation; or + * - specifying ::cudaGraphInstantiateFlagAutoFreeOnLaunch during instantiation, which makes + * each launch behave as though it called ::cudaMemFreeAsync for every unfreed allocation. + * + * It is not possible to free an allocation in both the owning graph and another graph. If the allocation + * is freed in the same graph, a free node cannot be added to another graph. If the allocation is freed + * in another graph, a free node can no longer be added to the owning graph. + * + * The following restrictions apply to graphs which contain allocation and/or memory free nodes: + * - Nodes and edges of the graph cannot be deleted. + * - The graph cannot be used in a child node. + * - Only one instantiation of the graph may exist at any point in time. + * - The graph cannot be cloned. + * + * \return + * ::cudaSuccess, + * ::cudaErrorCudartUnloading, + * ::cudaErrorInitializationError, + * ::cudaErrorNotSupported, + * ::cudaErrorInvalidValue, + * ::cudaErrorOutOfMemory + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cudaGraphAddMemFreeNode, + * ::cudaGraphMemAllocNodeGetParams, + * ::cudaDeviceGraphMemTrim, + * ::cudaDeviceGetGraphMemAttribute, + * ::cudaDeviceSetGraphMemAttribute, + * ::cudaMallocAsync, + * ::cudaFreeAsync, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddEventRecordNode, + * ::cudaGraphAddEventWaitNode, + * ::cudaGraphAddExternalSemaphoresSignalNode, + * ::cudaGraphAddExternalSemaphoresWaitNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemsetNode + */ +#if __CUDART_API_VERSION >= 11040 +extern __host__ cudaError_t CUDARTAPI cudaGraphAddMemAllocNode(cudaGraphNode_t *pGraphNode, cudaGraph_t graph, const cudaGraphNode_t *pDependencies, size_t numDependencies, struct cudaMemAllocNodeParams *nodeParams); +#endif + +/** + * \brief Returns a memory alloc node's parameters + * + * Returns the parameters of a memory alloc node \p hNode in \p params_out. + * The \p poolProps and \p accessDescs returned in \p params_out, are owned by the + * node. This memory remains valid until the node is destroyed. The returned + * parameters must not be modified. + * + * \param node - Node to get the parameters for + * \param params_out - Pointer to return the parameters + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddMemAllocNode, + * ::cudaGraphMemFreeNodeGetParams + */ +#if __CUDART_API_VERSION >= 11040 +extern __host__ cudaError_t CUDARTAPI cudaGraphMemAllocNodeGetParams(cudaGraphNode_t node, struct cudaMemAllocNodeParams *params_out); +#endif + +/** + * \brief Creates a memory free node and adds it to a graph + * + * Creates a new memory free node and adds it to \p graph with \p numDependencies + * dependencies specified via \p pDependencies and address specified in \p dptr. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. A handle + * to the new node will be returned in \p pGraphNode. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param dptr - Address of memory to free + * + * ::cudaGraphAddMemFreeNode will return ::cudaErrorInvalidValue if the user attempts to free: + * - an allocation twice in the same graph. + * - an address that was not returned by an allocation node. + * - an invalid address. + * + * The following restrictions apply to graphs which contain allocation and/or memory free nodes: + * - Nodes and edges of the graph cannot be deleted. + * - The graph cannot be used in a child node. + * - Only one instantiation of the graph may exist at any point in time. + * - The graph cannot be cloned. + * + * \return + * ::cudaSuccess, + * ::cudaErrorCudartUnloading, + * ::cudaErrorInitializationError, + * ::cudaErrorNotSupported, + * ::cudaErrorInvalidValue, + * ::cudaErrorOutOfMemory + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cudaGraphAddMemAllocNode, + * ::cudaGraphMemFreeNodeGetParams, + * ::cudaDeviceGraphMemTrim, + * ::cudaDeviceGetGraphMemAttribute, + * ::cudaDeviceSetGraphMemAttribute, + * ::cudaMallocAsync, + * ::cudaFreeAsync, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddEventRecordNode, + * ::cudaGraphAddEventWaitNode, + * ::cudaGraphAddExternalSemaphoresSignalNode, + * ::cudaGraphAddExternalSemaphoresWaitNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemsetNode + */ +#if __CUDART_API_VERSION >= 11040 +extern __host__ cudaError_t CUDARTAPI cudaGraphAddMemFreeNode(cudaGraphNode_t *pGraphNode, cudaGraph_t graph, const cudaGraphNode_t *pDependencies, size_t numDependencies, void *dptr); +#endif + +/** + * \brief Returns a memory free node's parameters + * + * Returns the address of a memory free node \p hNode in \p dptr_out. + * + * \param node - Node to get the parameters for + * \param dptr_out - Pointer to return the device address + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddMemFreeNode, + * ::cudaGraphMemFreeNodeGetParams + */ +#if __CUDART_API_VERSION >= 11040 +extern __host__ cudaError_t CUDARTAPI cudaGraphMemFreeNodeGetParams(cudaGraphNode_t node, void *dptr_out); +#endif + +/** + * \brief Free unused memory that was cached on the specified device for use with graphs back to the OS. + * + * Blocks which are not in use by a graph that is either currently executing or scheduled to execute are + * freed back to the operating system. + * + * \param device - The device for which cached memory should be freed. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddMemAllocNode, + * ::cudaGraphAddMemFreeNode, + * ::cudaDeviceGetGraphMemAttribute, + * ::cudaDeviceSetGraphMemAttribute, + * ::cudaMallocAsync, + * ::cudaFreeAsync + */ +#if __CUDART_API_VERSION >= 11040 +extern __host__ cudaError_t CUDARTAPI cudaDeviceGraphMemTrim(int device); +#endif + +/** + * \brief Query asynchronous allocation attributes related to graphs + * + * Valid attributes are: + * + * - ::cudaGraphMemAttrUsedMemCurrent: Amount of memory, in bytes, currently associated with graphs + * - ::cudaGraphMemAttrUsedMemHigh: High watermark of memory, in bytes, associated with graphs since the + * last time it was reset. High watermark can only be reset to zero. + * - ::cudaGraphMemAttrReservedMemCurrent: Amount of memory, in bytes, currently allocated for use by + * the CUDA graphs asynchronous allocator. + * - ::cudaGraphMemAttrReservedMemHigh: High watermark of memory, in bytes, currently allocated for use by + * the CUDA graphs asynchronous allocator. + * + * \param device - Specifies the scope of the query + * \param attr - attribute to get + * \param value - retrieved value + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaDeviceSetGraphMemAttribute, + * ::cudaGraphAddMemAllocNode, + * ::cudaGraphAddMemFreeNode, + * ::cudaDeviceGraphMemTrim, + * ::cudaMallocAsync, + * ::cudaFreeAsync + */ +#if __CUDART_API_VERSION >= 11040 +extern __host__ cudaError_t CUDARTAPI cudaDeviceGetGraphMemAttribute(int device, enum cudaGraphMemAttributeType attr, void* value); +#endif + +/** + * \brief Set asynchronous allocation attributes related to graphs + * + * Valid attributes are: + * + * - ::cudaGraphMemAttrUsedMemHigh: High watermark of memory, in bytes, associated with graphs since the + * last time it was reset. High watermark can only be reset to zero. + * - ::cudaGraphMemAttrReservedMemHigh: High watermark of memory, in bytes, currently allocated for use by + * the CUDA graphs asynchronous allocator. + * + * \param device - Specifies the scope of the query + * \param attr - attribute to get + * \param value - pointer to value to set + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaDeviceGetGraphMemAttribute, + * ::cudaGraphAddMemAllocNode, + * ::cudaGraphAddMemFreeNode, + * ::cudaDeviceGraphMemTrim, + * ::cudaMallocAsync, + * ::cudaFreeAsync + */ +#if __CUDART_API_VERSION >= 11040 +extern __host__ cudaError_t CUDARTAPI cudaDeviceSetGraphMemAttribute(int device, enum cudaGraphMemAttributeType attr, void* value); +#endif + +/** + * \brief Clones a graph + * + * This function creates a copy of \p originalGraph and returns it in \p pGraphClone. + * All parameters are copied into the cloned graph. The original graph may be modified + * after this call without affecting the clone. + * + * Child graph nodes in the original graph are recursively copied into the clone. + * + * \param pGraphClone - Returns newly created cloned graph + * \param originalGraph - Graph to clone + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorMemoryAllocation + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphCreate, + * ::cudaGraphNodeFindInClone + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphClone(cudaGraph_t *pGraphClone, cudaGraph_t originalGraph); + +/** + * \brief Finds a cloned version of a node + * + * This function returns the node in \p clonedGraph corresponding to \p originalNode + * in the original graph. + * + * \p clonedGraph must have been cloned from \p originalGraph via ::cudaGraphClone. + * \p originalNode must have been in \p originalGraph at the time of the call to + * ::cudaGraphClone, and the corresponding cloned node in \p clonedGraph must not have + * been removed. The cloned node is then returned via \p pClonedNode. + * + * \param pNode - Returns handle to the cloned node + * \param originalNode - Handle to the original node + * \param clonedGraph - Cloned graph to query + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphClone + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphNodeFindInClone(cudaGraphNode_t *pNode, cudaGraphNode_t originalNode, cudaGraph_t clonedGraph); + +/** + * \brief Returns a node's type + * + * Returns the node type of \p node in \p pType. + * + * \param node - Node to query + * \param pType - Pointer to return the node type + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphGetNodes, + * ::cudaGraphGetRootNodes, + * ::cudaGraphChildGraphNodeGetGraph, + * ::cudaGraphKernelNodeGetParams, + * ::cudaGraphKernelNodeSetParams, + * ::cudaGraphHostNodeGetParams, + * ::cudaGraphHostNodeSetParams, + * ::cudaGraphMemcpyNodeGetParams, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemsetNodeGetParams, + * ::cudaGraphMemsetNodeSetParams + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphNodeGetType(cudaGraphNode_t node, enum cudaGraphNodeType *pType); + +/** + * \brief Returns a graph's nodes + * + * Returns a list of \p graph's nodes. \p nodes may be NULL, in which case this + * function will return the number of nodes in \p numNodes. Otherwise, + * \p numNodes entries will be filled in. If \p numNodes is higher than the actual + * number of nodes, the remaining entries in \p nodes will be set to NULL, and the + * number of nodes actually obtained will be returned in \p numNodes. + * + * \param graph - Graph to query + * \param nodes - Pointer to return the nodes + * \param numNodes - See description + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphCreate, + * ::cudaGraphGetRootNodes, + * ::cudaGraphGetEdges, + * ::cudaGraphNodeGetType, + * ::cudaGraphNodeGetDependencies, + * ::cudaGraphNodeGetDependentNodes + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphGetNodes(cudaGraph_t graph, cudaGraphNode_t *nodes, size_t *numNodes); + +/** + * \brief Returns a graph's root nodes + * + * Returns a list of \p graph's root nodes. \p pRootNodes may be NULL, in which case this + * function will return the number of root nodes in \p pNumRootNodes. Otherwise, + * \p pNumRootNodes entries will be filled in. If \p pNumRootNodes is higher than the actual + * number of root nodes, the remaining entries in \p pRootNodes will be set to NULL, and the + * number of nodes actually obtained will be returned in \p pNumRootNodes. + * + * \param graph - Graph to query + * \param pRootNodes - Pointer to return the root nodes + * \param pNumRootNodes - See description + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphCreate, + * ::cudaGraphGetNodes, + * ::cudaGraphGetEdges, + * ::cudaGraphNodeGetType, + * ::cudaGraphNodeGetDependencies, + * ::cudaGraphNodeGetDependentNodes + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphGetRootNodes(cudaGraph_t graph, cudaGraphNode_t *pRootNodes, size_t *pNumRootNodes); + +/** + * \brief Returns a graph's dependency edges + * + * Returns a list of \p graph's dependency edges. Edges are returned via corresponding + * indices in \p from and \p to; that is, the node in \p to[i] has a dependency on the + * node in \p from[i]. \p from and \p to may both be NULL, in which + * case this function only returns the number of edges in \p numEdges. Otherwise, + * \p numEdges entries will be filled in. If \p numEdges is higher than the actual + * number of edges, the remaining entries in \p from and \p to will be set to NULL, and + * the number of edges actually returned will be written to \p numEdges. + * + * \param graph - Graph to get the edges from + * \param from - Location to return edge endpoints + * \param to - Location to return edge endpoints + * \param numEdges - See description + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphGetNodes, + * ::cudaGraphGetRootNodes, + * ::cudaGraphAddDependencies, + * ::cudaGraphRemoveDependencies, + * ::cudaGraphNodeGetDependencies, + * ::cudaGraphNodeGetDependentNodes + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphGetEdges(cudaGraph_t graph, cudaGraphNode_t *from, cudaGraphNode_t *to, size_t *numEdges); + +/** + * \brief Returns a node's dependencies + * + * Returns a list of \p node's dependencies. \p pDependencies may be NULL, in which case this + * function will return the number of dependencies in \p pNumDependencies. Otherwise, + * \p pNumDependencies entries will be filled in. If \p pNumDependencies is higher than the actual + * number of dependencies, the remaining entries in \p pDependencies will be set to NULL, and the + * number of nodes actually obtained will be returned in \p pNumDependencies. + * + * \param node - Node to query + * \param pDependencies - Pointer to return the dependencies + * \param pNumDependencies - See description + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphNodeGetDependentNodes, + * ::cudaGraphGetNodes, + * ::cudaGraphGetRootNodes, + * ::cudaGraphGetEdges, + * ::cudaGraphAddDependencies, + * ::cudaGraphRemoveDependencies + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphNodeGetDependencies(cudaGraphNode_t node, cudaGraphNode_t *pDependencies, size_t *pNumDependencies); + +/** + * \brief Returns a node's dependent nodes + * + * Returns a list of \p node's dependent nodes. \p pDependentNodes may be NULL, in which + * case this function will return the number of dependent nodes in \p pNumDependentNodes. + * Otherwise, \p pNumDependentNodes entries will be filled in. If \p pNumDependentNodes is + * higher than the actual number of dependent nodes, the remaining entries in + * \p pDependentNodes will be set to NULL, and the number of nodes actually obtained will + * be returned in \p pNumDependentNodes. + * + * \param node - Node to query + * \param pDependentNodes - Pointer to return the dependent nodes + * \param pNumDependentNodes - See description + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphNodeGetDependencies, + * ::cudaGraphGetNodes, + * ::cudaGraphGetRootNodes, + * ::cudaGraphGetEdges, + * ::cudaGraphAddDependencies, + * ::cudaGraphRemoveDependencies + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphNodeGetDependentNodes(cudaGraphNode_t node, cudaGraphNode_t *pDependentNodes, size_t *pNumDependentNodes); + +/** + * \brief Adds dependency edges to a graph. + * + * The number of dependencies to be added is defined by \p numDependencies + * Elements in \p pFrom and \p pTo at corresponding indices define a dependency. + * Each node in \p pFrom and \p pTo must belong to \p graph. + * + * If \p numDependencies is 0, elements in \p pFrom and \p pTo will be ignored. + * Specifying an existing dependency will return an error. + * + * \param graph - Graph to which dependencies are added + * \param from - Array of nodes that provide the dependencies + * \param to - Array of dependent nodes + * \param numDependencies - Number of dependencies to be added + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphRemoveDependencies, + * ::cudaGraphGetEdges, + * ::cudaGraphNodeGetDependencies, + * ::cudaGraphNodeGetDependentNodes + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphAddDependencies(cudaGraph_t graph, const cudaGraphNode_t *from, const cudaGraphNode_t *to, size_t numDependencies); + +/** + * \brief Removes dependency edges from a graph. + * + * The number of \p pDependencies to be removed is defined by \p numDependencies. + * Elements in \p pFrom and \p pTo at corresponding indices define a dependency. + * Each node in \p pFrom and \p pTo must belong to \p graph. + * + * If \p numDependencies is 0, elements in \p pFrom and \p pTo will be ignored. + * Specifying a non-existing dependency will return an error. + * + * \param graph - Graph from which to remove dependencies + * \param from - Array of nodes that provide the dependencies + * \param to - Array of dependent nodes + * \param numDependencies - Number of dependencies to be removed + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddDependencies, + * ::cudaGraphGetEdges, + * ::cudaGraphNodeGetDependencies, + * ::cudaGraphNodeGetDependentNodes + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphRemoveDependencies(cudaGraph_t graph, const cudaGraphNode_t *from, const cudaGraphNode_t *to, size_t numDependencies); + +/** + * \brief Remove a node from the graph + * + * Removes \p node from its graph. This operation also severs any dependencies of other nodes + * on \p node and vice versa. + * + * Dependencies cannot be removed from graphs which contain allocation or free nodes. + * Any attempt to do so will return an error. + * + * \param node - Node to remove + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * \note_destroy_ub + * + * \sa + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemsetNode + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphDestroyNode(cudaGraphNode_t node); + +/** + * \brief Creates an executable graph from a graph + * + * Instantiates \p graph as an executable graph. The graph is validated for any + * structural constraints or intra-node constraints which were not previously + * validated. If instantiation is successful, a handle to the instantiated graph + * is returned in \p pGraphExec. + * + * The \p flags parameter controls the behavior of instantiation and subsequent + * graph launches. Valid flags are: + * + * - ::cudaGraphInstantiateFlagAutoFreeOnLaunch, which configures a + * graph containing memory allocation nodes to automatically free any + * unfreed memory allocations before the graph is relaunched. + * + * - ::cudaGraphInstantiateFlagDeviceLaunch, which configures the graph for launch + * from the device. If this flag is passed, the executable graph handle returned can be + * used to launch the graph from both the host and device. This flag cannot be used in + * conjunction with ::cudaGraphInstantiateFlagAutoFreeOnLaunch. + * + * - ::cudaGraphInstantiateFlagUseNodePriority, which causes the graph + * to use the priorities from the per-node attributes rather than the priority + * of the launch stream during execution. Note that priorities are only available + * on kernel nodes, and are copied from stream priority during stream capture. + * + * If \p graph contains any allocation or free nodes, there can be at most one + * executable graph in existence for that graph at a time. An attempt to + * instantiate a second executable graph before destroying the first with + * ::cudaGraphExecDestroy will result in an error. + * + * Graphs instantiated for launch on the device have additional restrictions which do not + * apply to host graphs: + * + * - The graph's nodes must reside on a single device. + * + * - The graph can only contain kernel nodes. Furthermore, use of CUDA Dynamic Parallelism + * is not permitted. Cooperative launches are permitted as long as MPS is not in use. + * + * If \p graph is not instantiated for launch on the device but contains kernels which + * call device-side cudaGraphLaunch() from multiple devices, this will result in an error. + * + * \param pGraphExec - Returns instantiated graph + * \param graph - Graph to instantiate + * \param flags - Flags to control instantiation. See ::CUgraphInstantiate_flags. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphInstantiateWithFlags, + * ::cudaGraphCreate, + * ::cudaGraphUpload, + * ::cudaGraphLaunch, + * ::cudaGraphExecDestroy + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphInstantiate(cudaGraphExec_t *pGraphExec, cudaGraph_t graph, unsigned long long flags __dv(0)); + +/** + * \brief Creates an executable graph from a graph + * + * Instantiates \p graph as an executable graph. The graph is validated for any + * structural constraints or intra-node constraints which were not previously + * validated. If instantiation is successful, a handle to the instantiated graph + * is returned in \p pGraphExec. + * + * The \p flags parameter controls the behavior of instantiation and subsequent + * graph launches. Valid flags are: + * + * - ::cudaGraphInstantiateFlagAutoFreeOnLaunch, which configures a + * graph containing memory allocation nodes to automatically free any + * unfreed memory allocations before the graph is relaunched. + * + * - ::cudaGraphInstantiateFlagDeviceLaunch, which configures the graph for launch + * from the device. If this flag is passed, the executable graph handle returned can be + * used to launch the graph from both the host and device. This flag can only be used + * on platforms which support unified addressing. This flag cannot be used in + * conjunction with ::cudaGraphInstantiateFlagAutoFreeOnLaunch. + * + * - ::cudaGraphInstantiateFlagUseNodePriority, which causes the graph + * to use the priorities from the per-node attributes rather than the priority + * of the launch stream during execution. Note that priorities are only available + * on kernel nodes, and are copied from stream priority during stream capture. + * + * If \p graph contains any allocation or free nodes, there can be at most one + * executable graph in existence for that graph at a time. An attempt to + * instantiate a second executable graph before destroying the first with + * ::cudaGraphExecDestroy will result in an error. + * + * If \p graph contains kernels which call device-side cudaGraphLaunch() from multiple + * devices, this will result in an error. + * + * Graphs instantiated for launch on the device have additional restrictions which do not + * apply to host graphs: + * + * - The graph's nodes must reside on a single device. + * - The graph can only contain kernel nodes, memcpy nodes, memset nodes, and child graph nodes. + * Operation-specific restrictions are outlined below. + * - Kernel nodes: + * - Use of CUDA Dynamic Parallelism is not permitted. + * - Cooperative launches are permitted as long as MPS is not in use. + * - Memcpy nodes: + * - Only copies involving device memory and/or pinned device-mapped host memory are permitted. + * - Copies involving CUDA arrays are not permitted. + * - Both operands must be accessible from the current device, and the current device must + * match the device of other nodes in the graph. + * + * \param pGraphExec - Returns instantiated graph + * \param graph - Graph to instantiate + * \param flags - Flags to control instantiation. See ::CUgraphInstantiate_flags. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphInstantiate, + * ::cudaGraphCreate, + * ::cudaGraphUpload, + * ::cudaGraphLaunch, + * ::cudaGraphExecDestroy + */ +#if __CUDART_API_VERSION >= 11040 +extern __host__ cudaError_t CUDARTAPI cudaGraphInstantiateWithFlags(cudaGraphExec_t *pGraphExec, cudaGraph_t graph, unsigned long long flags __dv(0)); +#endif + +/** + * \brief Creates an executable graph from a graph + * + * Instantiates \p graph as an executable graph according to the \p instantiateParams structure. + * The graph is validated for any structural constraints or intra-node constraints + * which were not previously validated. If instantiation is successful, a handle to + * the instantiated graph is returned in \p pGraphExec. + * + * \p instantiateParams controls the behavior of instantiation and subsequent + * graph launches, as well as returning more detailed information in the event of an error. + * ::cudaGraphInstantiateParams is defined as: + * + * \code + typedef struct { + unsigned long long flags; + cudaStream_t uploadStream; + cudaGraphNode_t errNode_out; + cudaGraphInstantiateResult result_out; + } cudaGraphInstantiateParams; + * \endcode + * + * The \p flags field controls the behavior of instantiation and subsequent + * graph launches. Valid flags are: + * + * - ::cudaGraphInstantiateFlagAutoFreeOnLaunch, which configures a + * graph containing memory allocation nodes to automatically free any + * unfreed memory allocations before the graph is relaunched. + * + * - ::cudaGraphInstantiateFlagUpload, which will perform an upload of the graph + * into \p uploadStream once the graph has been instantiated. + * + * - ::cudaGraphInstantiateFlagDeviceLaunch, which configures the graph for launch + * from the device. If this flag is passed, the executable graph handle returned can be + * used to launch the graph from both the host and device. This flag can only be used + * on platforms which support unified addressing. This flag cannot be used in + * conjunction with ::cudaGraphInstantiateFlagAutoFreeOnLaunch. + * + * - ::cudaGraphInstantiateFlagUseNodePriority, which causes the graph + * to use the priorities from the per-node attributes rather than the priority + * of the launch stream during execution. Note that priorities are only available + * on kernel nodes, and are copied from stream priority during stream capture. + * + * If \p graph contains any allocation or free nodes, there can be at most one + * executable graph in existence for that graph at a time. An attempt to instantiate a + * second executable graph before destroying the first with ::cudaGraphExecDestroy will + * result in an error. + * + * If \p graph contains kernels which call device-side cudaGraphLaunch() from multiple + * devices, this will result in an error. + * + * Graphs instantiated for launch on the device have additional restrictions which do not + * apply to host graphs: + * + * - The graph's nodes must reside on a single device. + * - The graph can only contain kernel nodes, memcpy nodes, memset nodes, and child graph nodes. + * Operation-specific restrictions are outlined below. + * - Kernel nodes: + * - Use of CUDA Dynamic Parallelism is not permitted. + * - Cooperative launches are permitted as long as MPS is not in use. + * - Memcpy nodes: + * - Only copies involving device memory and/or pinned device-mapped host memory are permitted. + * - Copies involving CUDA arrays are not permitted. + * - Both operands must be accessible from the current device, and the current device must + * match the device of other nodes in the graph. + * + * In the event of an error, the \p result_out and \p errNode_out fields will contain more + * information about the nature of the error. Possible error reporting includes: + * + * - ::cudaGraphInstantiateError, if passed an invalid value or if an unexpected error occurred + * which is described by the return value of the function. \p errNode_out will be set to NULL. + * - ::cudaGraphInstantiateInvalidStructure, if the graph structure is invalid. \p errNode_out + * will be set to one of the offending nodes. + * - ::cudaGraphInstantiateNodeOperationNotSupported, if the graph is instantiated for device + * launch but contains a node of an unsupported node type, or a node which performs unsupported + * operations, such as use of CUDA dynamic parallelism within a kernel node. \p errNode_out will + * be set to this node. + * - ::cudaGraphInstantiateMultipleDevicesNotSupported, if the graph is instantiated for device + * launch but a node’s device differs from that of another node. This error can also be returned + * if a graph is not instantiated for device launch and it contains kernels which call device-side + * cudaGraphLaunch() from multiple devices. \p errNode_out will be set to this node. + * + * If instantiation is successful, \p result_out will be set to ::cudaGraphInstantiateSuccess, + * and \p hErrNode_out will be set to NULL. + * + * \param pGraphExec - Returns instantiated graph + * \param graph - Graph to instantiate + * \param instantiateParams - Instantiation parameters + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphCreate, + * ::cudaGraphInstantiate, + * ::cudaGraphInstantiateWithFlags, + * ::cudaGraphExecDestroy + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphInstantiateWithParams(cudaGraphExec_t *pGraphExec, cudaGraph_t graph, cudaGraphInstantiateParams *instantiateParams); + +/** + * \brief Query the instantiation flags of an executable graph + * + * Returns the flags that were passed to instantiation for the given executable graph. + * ::cudaGraphInstantiateFlagUpload will not be returned by this API as it does + * not affect the resulting executable graph. + * + * \param graphExec - The executable graph to query + * \param flags - Returns the instantiation flags + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphInstantiate, + * ::cudaGraphInstantiateWithFlags, + * ::cudaGraphInstantiateWithParams + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphExecGetFlags(cudaGraphExec_t graphExec, unsigned long long *flags); + +/** + * \brief Sets the parameters for a kernel node in the given graphExec + * + * Sets the parameters of a kernel node in an executable graph \p hGraphExec. + * The node is identified by the corresponding node \p node in the + * non-executable graph, from which the executable graph was instantiated. + * + * \p hNode must not have been removed from the original graph. All \p nodeParams + * fields may change, but the following restrictions apply to \p func updates: + * + * - The owning device of the function cannot change. + * - A node whose function originally did not use CUDA dynamic parallelism cannot be updated + * to a function which uses CDP + * - If \p hGraphExec was not instantiated for device launch, a node whose function originally + * did not use device-side cudaGraphLaunch() cannot be updated to a function which uses + * device-side cudaGraphLaunch() unless the node resides on the same device as nodes which + * contained such calls at instantiate-time. If no such calls were present at instantiation, + * these updates cannot be performed at all. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p node is also not modified by this call. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param node - kernel node from the graph from which graphExec was instantiated + * \param pNodeParams - Updated Parameters to set + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddKernelNode, + * ::cudaGraphKernelNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams, + * ::cudaGraphExecChildGraphNodeSetParams, + * ::cudaGraphExecEventRecordNodeSetEvent, + * ::cudaGraphExecEventWaitNodeSetEvent, + * ::cudaGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphExecKernelNodeSetParams(cudaGraphExec_t hGraphExec, cudaGraphNode_t node, const struct cudaKernelNodeParams *pNodeParams); + +/** + * \brief Sets the parameters for a memcpy node in the given graphExec. + * + * Updates the work represented by \p node in \p hGraphExec as though \p node had + * contained \p pNodeParams at instantiation. \p node must remain in the graph which was + * used to instantiate \p hGraphExec. Changed edges to and from \p node are ignored. + * + * The source and destination memory in \p pNodeParams must be allocated from the same + * contexts as the original source and destination memory. Both the instantiation-time + * memory operands and the memory operands in \p pNodeParams must be 1-dimensional. + * Zero-length operations are not supported. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. \p node is also + * not modified by this call. + * + * Returns ::cudaErrorInvalidValue if the memory operands' mappings changed or + * either the original or new memory operands are multidimensional. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param node - Memcpy node from the graph which was used to instantiate graphExec + * \param pNodeParams - Updated Parameters to set + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParamsToSymbol, + * ::cudaGraphExecMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphExecMemcpyNodeSetParams1D, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams, + * ::cudaGraphExecChildGraphNodeSetParams, + * ::cudaGraphExecEventRecordNodeSetEvent, + * ::cudaGraphExecEventWaitNodeSetEvent, + * ::cudaGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphExecMemcpyNodeSetParams(cudaGraphExec_t hGraphExec, cudaGraphNode_t node, const struct cudaMemcpy3DParms *pNodeParams); + +/** + * \brief Sets the parameters for a memcpy node in the given graphExec to copy to a symbol on the device + * + * Updates the work represented by \p node in \p hGraphExec as though \p node had + * contained the given params at instantiation. \p node must remain in the graph which was + * used to instantiate \p hGraphExec. Changed edges to and from \p node are ignored. + * + * \p src and \p symbol must be allocated from the same contexts as the original source and + * destination memory. The instantiation-time memory operands must be 1-dimensional. + * Zero-length operations are not supported. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. \p node is also + * not modified by this call. + * + * Returns ::cudaErrorInvalidValue if the memory operands' mappings changed or + * the original memory operands are multidimensional. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param node - Memcpy node from the graph which was used to instantiate graphExec + * \param symbol - Device symbol address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemcpyNodeToSymbol, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsToSymbol, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams, + * ::cudaGraphExecChildGraphNodeSetParams, + * ::cudaGraphExecEventRecordNodeSetEvent, + * ::cudaGraphExecEventWaitNodeSetEvent, + * ::cudaGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphExecMemcpyNodeSetParamsToSymbol( + cudaGraphExec_t hGraphExec, + cudaGraphNode_t node, + const void* symbol, + const void* src, + size_t count, + size_t offset, + enum cudaMemcpyKind kind); +#endif + +/** + * \brief Sets the parameters for a memcpy node in the given graphExec to copy from a symbol on the device + * + * Updates the work represented by \p node in \p hGraphExec as though \p node had + * contained the given params at instantiation. \p node must remain in the graph which was + * used to instantiate \p hGraphExec. Changed edges to and from \p node are ignored. + * + * \p symbol and \p dst must be allocated from the same contexts as the original source and + * destination memory. The instantiation-time memory operands must be 1-dimensional. + * Zero-length operations are not supported. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. \p node is also + * not modified by this call. + * + * Returns ::cudaErrorInvalidValue if the memory operands' mappings changed or + * the original memory operands are multidimensional. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param node - Memcpy node from the graph which was used to instantiate graphExec + * \param dst - Destination memory address + * \param symbol - Device symbol address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemcpyNodeFromSymbol, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParamsToSymbol, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams, + * ::cudaGraphExecChildGraphNodeSetParams, + * ::cudaGraphExecEventRecordNodeSetEvent, + * ::cudaGraphExecEventWaitNodeSetEvent, + * ::cudaGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphExecMemcpyNodeSetParamsFromSymbol( + cudaGraphExec_t hGraphExec, + cudaGraphNode_t node, + void* dst, + const void* symbol, + size_t count, + size_t offset, + enum cudaMemcpyKind kind); +#endif + +/** + * \brief Sets the parameters for a memcpy node in the given graphExec to perform a 1-dimensional copy + * + * Updates the work represented by \p node in \p hGraphExec as though \p node had + * contained the given params at instantiation. \p node must remain in the graph which was + * used to instantiate \p hGraphExec. Changed edges to and from \p node are ignored. + * + * \p src and \p dst must be allocated from the same contexts as the original source + * and destination memory. The instantiation-time memory operands must be 1-dimensional. + * Zero-length operations are not supported. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. \p node is also + * not modified by this call. + * + * Returns ::cudaErrorInvalidValue if the memory operands' mappings changed or + * the original memory operands are multidimensional. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param node - Memcpy node from the graph which was used to instantiate graphExec + * \param dst - Destination memory address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemcpyNode1D, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParams1D, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams, + * ::cudaGraphExecChildGraphNodeSetParams, + * ::cudaGraphExecEventRecordNodeSetEvent, + * ::cudaGraphExecEventWaitNodeSetEvent, + * ::cudaGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphExecMemcpyNodeSetParams1D( + cudaGraphExec_t hGraphExec, + cudaGraphNode_t node, + void* dst, + const void* src, + size_t count, + enum cudaMemcpyKind kind); +#endif + +/** + * \brief Sets the parameters for a memset node in the given graphExec. + * + * Updates the work represented by \p node in \p hGraphExec as though \p node had + * contained \p pNodeParams at instantiation. \p node must remain in the graph which was + * used to instantiate \p hGraphExec. Changed edges to and from \p node are ignored. + * + * The destination memory in \p pNodeParams must be allocated from the same + * context as the original destination memory. Both the instantiation-time + * memory operand and the memory operand in \p pNodeParams must be 1-dimensional. + * Zero-length operations are not supported. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. \p node is also + * not modified by this call. + * + * Returns cudaErrorInvalidValue if the memory operand's mappings changed or + * either the original or new memory operand are multidimensional. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param node - Memset node from the graph which was used to instantiate graphExec + * \param pNodeParams - Updated Parameters to set + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddMemsetNode, + * ::cudaGraphMemsetNodeSetParams, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecHostNodeSetParams, + * ::cudaGraphExecChildGraphNodeSetParams, + * ::cudaGraphExecEventRecordNodeSetEvent, + * ::cudaGraphExecEventWaitNodeSetEvent, + * ::cudaGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphExecMemsetNodeSetParams(cudaGraphExec_t hGraphExec, cudaGraphNode_t node, const struct cudaMemsetParams *pNodeParams); + +/** + * \brief Sets the parameters for a host node in the given graphExec. + * + * Updates the work represented by \p node in \p hGraphExec as though \p node had + * contained \p pNodeParams at instantiation. \p node must remain in the graph which was + * used to instantiate \p hGraphExec. Changed edges to and from \p node are ignored. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. \p node is also + * not modified by this call. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param node - Host node from the graph which was used to instantiate graphExec + * \param pNodeParams - Updated Parameters to set + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddHostNode, + * ::cudaGraphHostNodeSetParams, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecChildGraphNodeSetParams, + * ::cudaGraphExecEventRecordNodeSetEvent, + * ::cudaGraphExecEventWaitNodeSetEvent, + * ::cudaGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphExecHostNodeSetParams(cudaGraphExec_t hGraphExec, cudaGraphNode_t node, const struct cudaHostNodeParams *pNodeParams); + +/** + * \brief Updates node parameters in the child graph node in the given graphExec. + * + * Updates the work represented by \p node in \p hGraphExec as though the nodes contained + * in \p node's graph had the parameters contained in \p childGraph's nodes at instantiation. + * \p node must remain in the graph which was used to instantiate \p hGraphExec. + * Changed edges to and from \p node are ignored. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. \p node is also + * not modified by this call. + * + * The topology of \p childGraph, as well as the node insertion order, must match that + * of the graph contained in \p node. See ::cudaGraphExecUpdate() for a list of restrictions + * on what can be updated in an instantiated graph. The update is recursive, so child graph + * nodes contained within the top level child graph will also be updated. + + * \param hGraphExec - The executable graph in which to set the specified node + * \param node - Host node from the graph which was used to instantiate graphExec + * \param childGraph - The graph supplying the updated parameters + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphChildGraphNodeGetGraph, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams, + * ::cudaGraphExecEventRecordNodeSetEvent, + * ::cudaGraphExecEventWaitNodeSetEvent, + * ::cudaGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphExecChildGraphNodeSetParams(cudaGraphExec_t hGraphExec, cudaGraphNode_t node, cudaGraph_t childGraph); +#endif + +/** + * \brief Sets the event for an event record node in the given graphExec + * + * Sets the event of an event record node in an executable graph \p hGraphExec. + * The node is identified by the corresponding node \p hNode in the + * non-executable graph, from which the executable graph was instantiated. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p hNode is also not modified by this call. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - Event record node from the graph from which graphExec was instantiated + * \param event - Updated event to use + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddEventRecordNode, + * ::cudaGraphEventRecordNodeGetEvent, + * ::cudaGraphEventWaitNodeSetEvent, + * ::cudaEventRecordWithFlags, + * ::cudaStreamWaitEvent, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams, + * ::cudaGraphExecChildGraphNodeSetParams, + * ::cudaGraphExecEventWaitNodeSetEvent, + * ::cudaGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphExecEventRecordNodeSetEvent(cudaGraphExec_t hGraphExec, cudaGraphNode_t hNode, cudaEvent_t event); +#endif + +/** + * \brief Sets the event for an event wait node in the given graphExec + * + * Sets the event of an event wait node in an executable graph \p hGraphExec. + * The node is identified by the corresponding node \p hNode in the + * non-executable graph, from which the executable graph was instantiated. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p hNode is also not modified by this call. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - Event wait node from the graph from which graphExec was instantiated + * \param event - Updated event to use + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddEventWaitNode, + * ::cudaGraphEventWaitNodeGetEvent, + * ::cudaGraphEventRecordNodeSetEvent, + * ::cudaEventRecordWithFlags, + * ::cudaStreamWaitEvent, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams, + * ::cudaGraphExecChildGraphNodeSetParams, + * ::cudaGraphExecEventRecordNodeSetEvent, + * ::cudaGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphExecEventWaitNodeSetEvent(cudaGraphExec_t hGraphExec, cudaGraphNode_t hNode, cudaEvent_t event); +#endif + +/** + * \brief Sets the parameters for an external semaphore signal node in the given graphExec + * + * Sets the parameters of an external semaphore signal node in an executable graph \p hGraphExec. + * The node is identified by the corresponding node \p hNode in the + * non-executable graph, from which the executable graph was instantiated. + * + * \p hNode must not have been removed from the original graph. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p hNode is also not modified by this call. + * + * Changing \p nodeParams->numExtSems is not supported. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - semaphore signal node from the graph from which graphExec was instantiated + * \param nodeParams - Updated Parameters to set + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddExternalSemaphoresSignalNode, + * ::cudaImportExternalSemaphore, + * ::cudaSignalExternalSemaphoresAsync, + * ::cudaWaitExternalSemaphoresAsync, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams, + * ::cudaGraphExecChildGraphNodeSetParams, + * ::cudaGraphExecEventRecordNodeSetEvent, + * ::cudaGraphExecEventWaitNodeSetEvent, + * ::cudaGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + */ +#if __CUDART_API_VERSION >= 11020 +extern __host__ cudaError_t CUDARTAPI cudaGraphExecExternalSemaphoresSignalNodeSetParams(cudaGraphExec_t hGraphExec, cudaGraphNode_t hNode, const struct cudaExternalSemaphoreSignalNodeParams *nodeParams); +#endif + +/** + * \brief Sets the parameters for an external semaphore wait node in the given graphExec + * + * Sets the parameters of an external semaphore wait node in an executable graph \p hGraphExec. + * The node is identified by the corresponding node \p hNode in the + * non-executable graph, from which the executable graph was instantiated. + * + * \p hNode must not have been removed from the original graph. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p hNode is also not modified by this call. + * + * Changing \p nodeParams->numExtSems is not supported. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - semaphore wait node from the graph from which graphExec was instantiated + * \param nodeParams - Updated Parameters to set + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddExternalSemaphoresWaitNode, + * ::cudaImportExternalSemaphore, + * ::cudaSignalExternalSemaphoresAsync, + * ::cudaWaitExternalSemaphoresAsync, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams, + * ::cudaGraphExecChildGraphNodeSetParams, + * ::cudaGraphExecEventRecordNodeSetEvent, + * ::cudaGraphExecEventWaitNodeSetEvent, + * ::cudaGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + */ +#if __CUDART_API_VERSION >= 11020 +extern __host__ cudaError_t CUDARTAPI cudaGraphExecExternalSemaphoresWaitNodeSetParams(cudaGraphExec_t hGraphExec, cudaGraphNode_t hNode, const struct cudaExternalSemaphoreWaitNodeParams *nodeParams); +#endif + +/** + * \brief Enables or disables the specified node in the given graphExec + * + * Sets \p hNode to be either enabled or disabled. Disabled nodes are functionally equivalent + * to empty nodes until they are reenabled. Existing node parameters are not affected by + * disabling/enabling the node. + * + * The node is identified by the corresponding node \p hNode in the non-executable + * graph, from which the executable graph was instantiated. + * + * \p hNode must not have been removed from the original graph. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p hNode is also not modified by this call. + * + * \note Currently only kernel, memset and memcpy nodes are supported. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - Node from the graph from which graphExec was instantiated + * \param isEnabled - Node is enabled if != 0, otherwise the node is disabled + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphNodeGetEnabled, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + * ::cudaGraphLaunch + */ +#if __CUDART_API_VERSION >= 11060 +extern __host__ cudaError_t CUDARTAPI cudaGraphNodeSetEnabled(cudaGraphExec_t hGraphExec, cudaGraphNode_t hNode, unsigned int isEnabled); +#endif + +/** + * \brief Query whether a node in the given graphExec is enabled + * + * Sets isEnabled to 1 if \p hNode is enabled, or 0 if \p hNode is disabled. + * + * The node is identified by the corresponding node \p hNode in the non-executable + * graph, from which the executable graph was instantiated. + * + * \p hNode must not have been removed from the original graph. + * + * \note Currently only kernel, memset and memcpy nodes are supported. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - Node from the graph from which graphExec was instantiated + * \param isEnabled - Location to return the enabled status of the node + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphNodeSetEnabled, + * ::cudaGraphExecUpdate, + * ::cudaGraphInstantiate + * ::cudaGraphLaunch + */ +#if __CUDART_API_VERSION >= 11060 +extern __host__ cudaError_t CUDARTAPI cudaGraphNodeGetEnabled(cudaGraphExec_t hGraphExec, cudaGraphNode_t hNode, unsigned int *isEnabled); +#endif + +/** + * \brief Check whether an executable graph can be updated with a graph and perform the update if possible + * + * Updates the node parameters in the instantiated graph specified by \p hGraphExec with the + * node parameters in a topologically identical graph specified by \p hGraph. + * + * Limitations: + * + * - Kernel nodes: + * - The owning context of the function cannot change. + * - A node whose function originally did not use CUDA dynamic parallelism cannot be updated + * to a function which uses CDP. + * - A cooperative node cannot be updated to a non-cooperative node, and vice-versa. + * - If the graph was instantiated with cudaGraphInstantiateFlagUseNodePriority, the + * priority attribute cannot change. Equality is checked on the originally requested + * priority values, before they are clamped to the device's supported range. + * - If \p hGraphExec was not instantiated for device launch, a node whose function originally + * did not use device-side cudaGraphLaunch() cannot be updated to a function which uses + * device-side cudaGraphLaunch() unless the node resides on the same device as nodes which + * contained such calls at instantiate-time. If no such calls were present at instantiation, + * these updates cannot be performed at all. + * - Memset and memcpy nodes: + * - The CUDA device(s) to which the operand(s) was allocated/mapped cannot change. + * - The source/destination memory must be allocated from the same contexts as the original + * source/destination memory. + * - Only 1D memsets can be changed. + * - Additional memcpy node restrictions: + * - Changing either the source or destination memory type(i.e. CU_MEMORYTYPE_DEVICE, + * CU_MEMORYTYPE_ARRAY, etc.) is not supported. + * + * Note: The API may add further restrictions in future releases. The return code should always be checked. + * + * cudaGraphExecUpdate sets the result member of \p resultInfo to cudaGraphExecUpdateErrorTopologyChanged + * under the following conditions: + * - The count of nodes directly in \p hGraphExec and \p hGraph differ, in which case resultInfo->errorNode + * is set to NULL. + * - \p hGraph has more exit nodes than \p hGraph, in which case resultInfo->errorNode is set to one of + * the exit nodes in hGraph. + * - A node in \p hGraph has a different number of dependencies than the node from \p hGraphExec it is paired with, + * in which case resultInfo->errorNode is set to the node from \p hGraph. + * - A node in \p hGraph has a dependency that does not match with the corresponding dependency of the paired node + * from \p hGraphExec. resultInfo->errorNode will be set to the node from \p hGraph. resultInfo->errorFromNode + * will be set to the mismatched dependency. The dependencies are paired based on edge order and a dependency + * does not match when the nodes are already paired based on other edges examined in the graph. + * + * cudaGraphExecUpdate sets \p the result member of \p resultInfo to: + * - cudaGraphExecUpdateError if passed an invalid value. + * - cudaGraphExecUpdateErrorTopologyChanged if the graph topology changed + * - cudaGraphExecUpdateErrorNodeTypeChanged if the type of a node changed, in which case + * \p hErrorNode_out is set to the node from \p hGraph. + * - cudaGraphExecUpdateErrorFunctionChanged if the function of a kernel node changed (CUDA driver < 11.2) + * - cudaGraphExecUpdateErrorUnsupportedFunctionChange if the func field of a kernel changed in an + * unsupported way(see note above), in which case \p hErrorNode_out is set to the node from \p hGraph + * - cudaGraphExecUpdateErrorParametersChanged if any parameters to a node changed in a way + * that is not supported, in which case \p hErrorNode_out is set to the node from \p hGraph + * - cudaGraphExecUpdateErrorAttributesChanged if any attributes of a node changed in a way + * that is not supported, in which case \p hErrorNode_out is set to the node from \p hGraph + * - cudaGraphExecUpdateErrorNotSupported if something about a node is unsupported, like + * the node's type or configuration, in which case \p hErrorNode_out is set to the node from \p hGraph + * + * If the update fails for a reason not listed above, the result member of \p resultInfo will be set + * to cudaGraphExecUpdateError. If the update succeeds, the result member will be set to cudaGraphExecUpdateSuccess. + * + * cudaGraphExecUpdate returns cudaSuccess when the updated was performed successfully. It returns + * cudaErrorGraphExecUpdateFailure if the graph update was not performed because it included + * changes which violated constraints specific to instantiated graph update. + * + * \param hGraphExec The instantiated graph to be updated + * \param hGraph The graph containing the updated parameters + \param resultInfo the error info structure + * + * \return + * ::cudaSuccess, + * ::cudaErrorGraphExecUpdateFailure, + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphInstantiate + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphExecUpdate(cudaGraphExec_t hGraphExec, cudaGraph_t hGraph, cudaGraphExecUpdateResultInfo *resultInfo); + +/** + * \brief Uploads an executable graph in a stream + * + * Uploads \p hGraphExec to the device in \p hStream without executing it. Uploads of + * the same \p hGraphExec will be serialized. Each upload is ordered behind both any + * previous work in \p hStream and any previous launches of \p hGraphExec. + * Uses memory cached by \p stream to back the allocations owned by \p graphExec. + * + * \param hGraphExec - Executable graph to upload + * \param hStream - Stream in which to upload the graph + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * \notefnerr + * \note_init_rt + * + * \sa + * ::cudaGraphInstantiate, + * ::cudaGraphLaunch, + * ::cudaGraphExecDestroy + */ +#if __CUDART_API_VERSION >= 11010 + extern __host__ cudaError_t CUDARTAPI cudaGraphUpload(cudaGraphExec_t graphExec, cudaStream_t stream); +#endif + +/** + * \brief Launches an executable graph in a stream + * + * Executes \p graphExec in \p stream. Only one instance of \p graphExec may be executing + * at a time. Each launch is ordered behind both any previous work in \p stream + * and any previous launches of \p graphExec. To execute a graph concurrently, it must be + * instantiated multiple times into multiple executable graphs. + * + * If any allocations created by \p graphExec remain unfreed (from a previous launch) and + * \p graphExec was not instantiated with ::cudaGraphInstantiateFlagAutoFreeOnLaunch, + * the launch will fail with ::cudaErrorInvalidValue. + * + * \param graphExec - Executable graph to launch + * \param stream - Stream in which to launch the graph + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphInstantiate, + * ::cudaGraphUpload, + * ::cudaGraphExecDestroy + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphLaunch(cudaGraphExec_t graphExec, cudaStream_t stream); + +/** + * \brief Destroys an executable graph + * + * Destroys the executable graph specified by \p graphExec. + * + * \param graphExec - Executable graph to destroy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * \note_destroy_ub + * + * \sa + * ::cudaGraphInstantiate, + * ::cudaGraphUpload, + * ::cudaGraphLaunch + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphExecDestroy(cudaGraphExec_t graphExec); + +/** + * \brief Destroys a graph + * + * Destroys the graph specified by \p graph, as well as all of its nodes. + * + * \param graph - Graph to destroy + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * \note_destroy_ub + * + * \sa + * ::cudaGraphCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphDestroy(cudaGraph_t graph); + +/** + * \brief Write a DOT file describing graph structure + * + * Using the provided \p graph, write to \p path a DOT formatted description of the graph. + * By default this includes the graph topology, node types, node id, kernel names and memcpy direction. + * \p flags can be specified to write more detailed information about each node type such as + * parameter values, kernel attributes, node and function handles. + * + * \param graph - The graph to create a DOT file from + * \param path - The path to write the DOT file to + * \param flags - Flags from cudaGraphDebugDotFlags for specifying which additional node information to write + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorOperatingSystem + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphDebugDotPrint(cudaGraph_t graph, const char *path, unsigned int flags); + +/** + * \brief Create a user object + * + * Create a user object with the specified destructor callback and initial reference count. The + * initial references are owned by the caller. + * + * Destructor callbacks cannot make CUDA API calls and should avoid blocking behavior, as they + * are executed by a shared internal thread. Another thread may be signaled to perform such + * actions, if it does not block forward progress of tasks scheduled through CUDA. + * + * See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. + * + * \param object_out - Location to return the user object handle + * \param ptr - The pointer to pass to the destroy function + * \param destroy - Callback to free the user object when it is no longer in use + * \param initialRefcount - The initial refcount to create the object with, typically 1. The + * initial references are owned by the calling thread. + * \param flags - Currently it is required to pass ::cudaUserObjectNoDestructorSync, + * which is the only defined flag. This indicates that the destroy + * callback cannot be waited on by any CUDA API. Users requiring + * synchronization of the callback should signal its completion + * manually. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * + * \sa + * ::cudaUserObjectRetain, + * ::cudaUserObjectRelease, + * ::cudaGraphRetainUserObject, + * ::cudaGraphReleaseUserObject, + * ::cudaGraphCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaUserObjectCreate(cudaUserObject_t *object_out, void *ptr, cudaHostFn_t destroy, unsigned int initialRefcount, unsigned int flags); + +/** + * \brief Retain a reference to a user object + * + * Retains new references to a user object. The new references are owned by the caller. + * + * See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. + * + * \param object - The object to retain + * \param count - The number of references to retain, typically 1. Must be nonzero + * and not larger than INT_MAX. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * + * \sa + * ::cudaUserObjectCreate, + * ::cudaUserObjectRelease, + * ::cudaGraphRetainUserObject, + * ::cudaGraphReleaseUserObject, + * ::cudaGraphCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaUserObjectRetain(cudaUserObject_t object, unsigned int count __dv(1)); + +/** + * \brief Release a reference to a user object + * + * Releases user object references owned by the caller. The object's destructor is invoked if + * the reference count reaches zero. + * + * It is undefined behavior to release references not owned by the caller, or to use a user + * object handle after all references are released. + * + * See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. + * + * \param object - The object to release + * \param count - The number of references to release, typically 1. Must be nonzero + * and not larger than INT_MAX. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * + * \sa + * ::cudaUserObjectCreate, + * ::cudaUserObjectRetain, + * ::cudaGraphRetainUserObject, + * ::cudaGraphReleaseUserObject, + * ::cudaGraphCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaUserObjectRelease(cudaUserObject_t object, unsigned int count __dv(1)); + +/** + * \brief Retain a reference to a user object from a graph + * + * Creates or moves user object references that will be owned by a CUDA graph. + * + * See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. + * + * \param graph - The graph to associate the reference with + * \param object - The user object to retain a reference for + * \param count - The number of references to add to the graph, typically 1. Must be + * nonzero and not larger than INT_MAX. + * \param flags - The optional flag ::cudaGraphUserObjectMove transfers references + * from the calling thread, rather than create new references. Pass 0 + * to create new references. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * + * \sa + * ::cudaUserObjectCreate + * ::cudaUserObjectRetain, + * ::cudaUserObjectRelease, + * ::cudaGraphReleaseUserObject, + * ::cudaGraphCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphRetainUserObject(cudaGraph_t graph, cudaUserObject_t object, unsigned int count __dv(1), unsigned int flags __dv(0)); + +/** + * \brief Release a user object reference from a graph + * + * Releases user object references owned by a graph. + * + * See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. + * + * \param graph - The graph that will release the reference + * \param object - The user object to release a reference for + * \param count - The number of references to release, typically 1. Must be nonzero + * and not larger than INT_MAX. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * + * \sa + * ::cudaUserObjectCreate + * ::cudaUserObjectRetain, + * ::cudaUserObjectRelease, + * ::cudaGraphRetainUserObject, + * ::cudaGraphCreate + */ +extern __host__ cudaError_t CUDARTAPI cudaGraphReleaseUserObject(cudaGraph_t graph, cudaUserObject_t object, unsigned int count __dv(1)); + +/** @} */ /* END CUDART_GRAPH */ + +/** + * \defgroup CUDART_DRIVER_ENTRY_POINT Driver Entry Point Access + * + * ___MANBRIEF___ driver entry point access functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the driver entry point access functions of CUDA + * runtime application programming interface. + * + * @{ + */ + +/** + * \brief Returns the requested driver API function pointer + * + * Returns in \p **funcPtr the address of the CUDA driver function for the requested flags. + * + * For a requested driver symbol, if the CUDA version in which the driver symbol was + * introduced is less than or equal to the CUDA runtime version, the API will return + * the function pointer to the corresponding versioned driver function. + * + * The pointer returned by the API should be cast to a function pointer matching the + * requested driver function's definition in the API header file. The function pointer + * typedef can be picked up from the corresponding typedefs header file. For example, + * cudaTypedefs.h consists of function pointer typedefs for driver APIs defined in cuda.h. + * + * The API will return ::cudaSuccess and set the returned \p funcPtr to NULL if the + * requested driver function is not supported on the platform, no ABI + * compatible driver function exists for the CUDA runtime version or if the + * driver symbol is invalid. + * + * It will also set the optional \p driverStatus to one of the values in + * ::cudaDriverEntryPointQueryResult with the following meanings: + * - ::cudaDriverEntryPointSuccess - The requested symbol was succesfully found based + * on input arguments and \p pfn is valid + * - ::cudaDriverEntryPointSymbolNotFound - The requested symbol was not found + * - ::cudaDriverEntryPointVersionNotSufficent - The requested symbol was found but is + * not supported by the current runtime version (CUDART_VERSION) + * + * The requested flags can be: + * - ::cudaEnableDefault: This is the default mode. This is equivalent to + * ::cudaEnablePerThreadDefaultStream if the code is compiled with + * --default-stream per-thread compilation flag or the macro CUDA_API_PER_THREAD_DEFAULT_STREAM + * is defined; ::cudaEnableLegacyStream otherwise. + * - ::cudaEnableLegacyStream: This will enable the search for all driver symbols + * that match the requested driver symbol name except the corresponding per-thread versions. + * - ::cudaEnablePerThreadDefaultStream: This will enable the search for all + * driver symbols that match the requested driver symbol name including the per-thread + * versions. If a per-thread version is not found, the API will return the legacy version + * of the driver function. + * + * \param symbol - The base name of the driver API function to look for. As an example, + * for the driver API ::cuMemAlloc_v2, \p symbol would be cuMemAlloc. + * Note that the API will use the CUDA runtime version to return the + * address to the most recent ABI compatible driver symbol, ::cuMemAlloc + * or ::cuMemAlloc_v2. + * \param funcPtr - Location to return the function pointer to the requested driver function + * \param flags - Flags to specify search options. + * \param driverStatus - Optional location to store the status of finding the symbol from + * the driver. See ::cudaDriverEntryPointQueryResult for + * possible values. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorNotSupported + * \note_version_mixing + * \note_init_rt + * \note_callback + * + * \sa + * ::cuGetProcAddress + */ +#if defined(__cplusplus) +extern __host__ cudaError_t CUDARTAPI cudaGetDriverEntryPoint(const char *symbol, void **funcPtr, unsigned long long flags, enum cudaDriverEntryPointQueryResult *driverStatus = NULL); +#else +extern __host__ cudaError_t CUDARTAPI cudaGetDriverEntryPoint(const char *symbol, void **funcPtr, unsigned long long flags, enum cudaDriverEntryPointQueryResult *driverStatus); +#endif + +/** @} */ /* END CUDART_DRIVER_ENTRY_POINT */ + +/** \cond impl_private */ +extern __host__ cudaError_t CUDARTAPI cudaGetExportTable(const void **ppExportTable, const cudaUUID_t *pExportTableId); +/** \endcond impl_private */ + +/** + * \defgroup CUDART_HIGHLEVEL C++ API Routines + * + * ___MANBRIEF___ C++ high level API functions of the CUDA runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the C++ high level API functions of the CUDA runtime + * application programming interface. To use these functions, your + * application needs to be compiled with the \p nvcc compiler. + * + * \brief C++-style interface built on top of CUDA runtime API + */ + +/** + * \defgroup CUDART_DRIVER Interactions with the CUDA Driver API + * + * ___MANBRIEF___ interactions between CUDA Driver API and CUDA Runtime API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the interactions between the CUDA Driver API and the CUDA Runtime API + * + * @{ + * + * \section CUDART_CUDA_primary Primary Contexts + * + * There exists a one to one relationship between CUDA devices in the CUDA Runtime + * API and ::CUcontext s in the CUDA Driver API within a process. The specific + * context which the CUDA Runtime API uses for a device is called the device's + * primary context. From the perspective of the CUDA Runtime API, a device and + * its primary context are synonymous. + * + * \section CUDART_CUDA_init Initialization and Tear-Down + * + * CUDA Runtime API calls operate on the CUDA Driver API ::CUcontext which is current to + * to the calling host thread. + * + * The function ::cudaInitDevice() ensures that the primary context is initialized + * for the requested device but does not make it current to the calling thread. + * + * The function ::cudaSetDevice() initializes the primary context for the + * specified device and makes it current to the calling thread by calling ::cuCtxSetCurrent(). + * + * The CUDA Runtime API will automatically initialize the primary context for + * a device at the first CUDA Runtime API call which requires an active context. + * If no ::CUcontext is current to the calling thread when a CUDA Runtime API call + * which requires an active context is made, then the primary context for a device + * will be selected, made current to the calling thread, and initialized. + * + * The context which the CUDA Runtime API initializes will be initialized using + * the parameters specified by the CUDA Runtime API functions + * ::cudaSetDeviceFlags(), + * ::cudaD3D9SetDirect3DDevice(), + * ::cudaD3D10SetDirect3DDevice(), + * ::cudaD3D11SetDirect3DDevice(), + * ::cudaGLSetGLDevice(), and + * ::cudaVDPAUSetVDPAUDevice(). + * Note that these functions will fail with ::cudaErrorSetOnActiveProcess if they are + * called when the primary context for the specified device has already been initialized. + * (or if the current device has already been initialized, in the case of + * ::cudaSetDeviceFlags()). + * + * Primary contexts will remain active until they are explicitly deinitialized + * using ::cudaDeviceReset(). The function ::cudaDeviceReset() will deinitialize the + * primary context for the calling thread's current device immediately. The context + * will remain current to all of the threads that it was current to. The next CUDA + * Runtime API call on any thread which requires an active context will trigger the + * reinitialization of that device's primary context. + * + * Note that primary contexts are shared resources. It is recommended that + * the primary context not be reset except just before exit or to recover from an + * unspecified launch failure. + * + * \section CUDART_CUDA_context Context Interoperability + * + * Note that the use of multiple ::CUcontext s per device within a single process + * will substantially degrade performance and is strongly discouraged. Instead, + * it is highly recommended that the implicit one-to-one device-to-context mapping + * for the process provided by the CUDA Runtime API be used. + * + * If a non-primary ::CUcontext created by the CUDA Driver API is current to a + * thread then the CUDA Runtime API calls to that thread will operate on that + * ::CUcontext, with some exceptions listed below. Interoperability between data + * types is discussed in the following sections. + * + * The function ::cudaPointerGetAttributes() will return the error + * ::cudaErrorIncompatibleDriverContext if the pointer being queried was allocated by a + * non-primary context. The function ::cudaDeviceEnablePeerAccess() and the rest of + * the peer access API may not be called when a non-primary ::CUcontext is current. + * To use the pointer query and peer access APIs with a context created using the + * CUDA Driver API, it is necessary that the CUDA Driver API be used to access + * these features. + * + * All CUDA Runtime API state (e.g, global variables' addresses and values) travels + * with its underlying ::CUcontext. In particular, if a ::CUcontext is moved from one + * thread to another then all CUDA Runtime API state will move to that thread as well. + * + * Please note that attaching to legacy contexts (those with a version of 3010 as returned + * by ::cuCtxGetApiVersion()) is not possible. The CUDA Runtime will return + * ::cudaErrorIncompatibleDriverContext in such cases. + * + * \section CUDART_CUDA_stream Interactions between CUstream and cudaStream_t + * + * The types ::CUstream and ::cudaStream_t are identical and may be used interchangeably. + * + * \section CUDART_CUDA_event Interactions between CUevent and cudaEvent_t + * + * The types ::CUevent and ::cudaEvent_t are identical and may be used interchangeably. + * + * \section CUDART_CUDA_array Interactions between CUarray and cudaArray_t + * + * The types ::CUarray and struct ::cudaArray * represent the same data type and may be used + * interchangeably by casting the two types between each other. + * + * In order to use a ::CUarray in a CUDA Runtime API function which takes a struct ::cudaArray *, + * it is necessary to explicitly cast the ::CUarray to a struct ::cudaArray *. + * + * In order to use a struct ::cudaArray * in a CUDA Driver API function which takes a ::CUarray, + * it is necessary to explicitly cast the struct ::cudaArray * to a ::CUarray . + * + * \section CUDART_CUDA_graphicsResource Interactions between CUgraphicsResource and cudaGraphicsResource_t + * + * The types ::CUgraphicsResource and ::cudaGraphicsResource_t represent the same data type and may be used + * interchangeably by casting the two types between each other. + * + * In order to use a ::CUgraphicsResource in a CUDA Runtime API function which takes a + * ::cudaGraphicsResource_t, it is necessary to explicitly cast the ::CUgraphicsResource + * to a ::cudaGraphicsResource_t. + * + * In order to use a ::cudaGraphicsResource_t in a CUDA Driver API function which takes a + * ::CUgraphicsResource, it is necessary to explicitly cast the ::cudaGraphicsResource_t + * to a ::CUgraphicsResource. + * + * \section CUDART_CUDA_texture_objects Interactions between CUtexObject and cudaTextureObject_t + * + * The types ::CUtexObject and ::cudaTextureObject_t represent the same data type and may be used + * interchangeably by casting the two types between each other. + * + * In order to use a ::CUtexObject in a CUDA Runtime API function which takes a ::cudaTextureObject_t, + * it is necessary to explicitly cast the ::CUtexObject to a ::cudaTextureObject_t. + * + * In order to use a ::cudaTextureObject_t in a CUDA Driver API function which takes a ::CUtexObject, + * it is necessary to explicitly cast the ::cudaTextureObject_t to a ::CUtexObject. + * + * \section CUDART_CUDA_surface_objects Interactions between CUsurfObject and cudaSurfaceObject_t + * + * The types ::CUsurfObject and ::cudaSurfaceObject_t represent the same data type and may be used + * interchangeably by casting the two types between each other. + * + * In order to use a ::CUsurfObject in a CUDA Runtime API function which takes a ::cudaSurfaceObject_t, + * it is necessary to explicitly cast the ::CUsurfObject to a ::cudaSurfaceObject_t. + * + * In order to use a ::cudaSurfaceObject_t in a CUDA Driver API function which takes a ::CUsurfObject, + * it is necessary to explicitly cast the ::cudaSurfaceObject_t to a ::CUsurfObject. + * + * \section CUDART_CUDA_module Interactions between CUfunction and cudaFunction_t + * + * The types ::CUfunction and ::cudaFunction_t represent the same data type and may be used + * interchangeably by casting the two types between each other. + * + * In order to use a ::cudaFunction_t in a CUDA Driver API function which takes a ::CUfunction, + * it is necessary to explicitly cast the ::cudaFunction_t to a ::CUfunction. + * + */ + + /** + * \brief Get pointer to device entry function that matches entry function \p symbolPtr + * + * Returns in \p functionPtr the device entry function corresponding to the symbol \p symbolPtr. + * + * \param functionPtr - Returns the device entry function + * \param symbolPtr - Pointer to device entry function to search for + * + * \return + * ::cudaSuccess + * + */ +extern __host__ cudaError_t cudaGetFuncBySymbol(cudaFunction_t* functionPtr, const void* symbolPtr); + +/** + * \brief Get pointer to device kernel that matches entry function \p entryFuncAddr + * + * Returns in \p kernelPtr the device kernel corresponding to the entry function \p entryFuncAddr. + * + * \param kernelPtr - Returns the device kernel + * \param entryFuncAddr - Address of device entry function to search kernel for + * + * \return + * ::cudaSuccess + * + * \sa + * \ref ::cudaGetKernel(cudaKernel_t *kernelPtr, const T *entryFuncAddr) "cudaGetKernel (C++ API)" + */ +extern __host__ cudaError_t CUDARTAPI cudaGetKernel(cudaKernel_t *kernelPtr, const void *entryFuncAddr); + +/** @} */ /* END CUDART_DRIVER */ + +#if defined(__CUDA_API_VERSION_INTERNAL) + #undef cudaMemcpy + #undef cudaMemcpyToSymbol + #undef cudaMemcpyFromSymbol + #undef cudaMemcpy2D + #undef cudaMemcpyToArray + #undef cudaMemcpy2DToArray + #undef cudaMemcpyFromArray + #undef cudaMemcpy2DFromArray + #undef cudaMemcpyArrayToArray + #undef cudaMemcpy2DArrayToArray + #undef cudaMemcpy3D + #undef cudaMemcpy3DPeer + #undef cudaMemset + #undef cudaMemset2D + #undef cudaMemset3D + #undef cudaMemcpyAsync + #undef cudaMemcpyToSymbolAsync + #undef cudaMemcpyFromSymbolAsync + #undef cudaMemcpy2DAsync + #undef cudaMemcpyToArrayAsync + #undef cudaMemcpy2DToArrayAsync + #undef cudaMemcpyFromArrayAsync + #undef cudaMemcpy2DFromArrayAsync + #undef cudaMemcpy3DAsync + #undef cudaMemcpy3DPeerAsync + #undef cudaMemsetAsync + #undef cudaMemset2DAsync + #undef cudaMemset3DAsync + #undef cudaStreamQuery + #undef cudaStreamGetFlags + #undef cudaStreamGetId + #undef cudaStreamGetPriority + #undef cudaEventRecord + #undef cudaEventRecordWithFlags + #undef cudaStreamWaitEvent + #undef cudaStreamAddCallback + #undef cudaStreamAttachMemAsync + #undef cudaStreamSynchronize + #undef cudaLaunchKernel + #undef cudaLaunchKernelExC + #undef cudaLaunchHostFunc + #undef cudaMemPrefetchAsync + #undef cudaLaunchCooperativeKernel + #undef cudaSignalExternalSemaphoresAsync + #undef cudaWaitExternalSemaphoresAsync + #undef cudaGraphInstantiateWithParams + #undef cudaGraphUpload + #undef cudaGraphLaunch + #undef cudaStreamBeginCapture + #undef cudaStreamEndCapture + #undef cudaStreamIsCapturing + #undef cudaStreamGetCaptureInfo + #undef cudaStreamGetCaptureInfo_v2 + #undef cudaStreamCopyAttributes + #undef cudaStreamGetAttribute + #undef cudaStreamSetAttribute + #undef cudaMallocAsync + #undef cudaFreeAsync + #undef cudaMallocFromPoolAsync + #undef cudaGetDriverEntryPoint + + #undef cudaGetDeviceProperties + + extern __host__ cudaError_t CUDARTAPI cudaMemcpy(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind); + extern __host__ cudaError_t CUDARTAPI cudaMemcpyToSymbol(const void *symbol, const void *src, size_t count, size_t offset __dv(0), enum cudaMemcpyKind kind __dv(cudaMemcpyHostToDevice)); + extern __host__ cudaError_t CUDARTAPI cudaMemcpyFromSymbol(void *dst, const void *symbol, size_t count, size_t offset __dv(0), enum cudaMemcpyKind kind __dv(cudaMemcpyDeviceToHost)); + extern __host__ cudaError_t CUDARTAPI cudaMemcpy2D(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind); + extern __host__ cudaError_t CUDARTAPI cudaMemcpyToArray(cudaArray_t dst, size_t wOffset, size_t hOffset, const void *src, size_t count, enum cudaMemcpyKind kind); + extern __host__ cudaError_t CUDARTAPI cudaMemcpy2DToArray(cudaArray_t dst, size_t wOffset, size_t hOffset, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind); + extern __host__ cudaError_t CUDARTAPI cudaMemcpyFromArray(void *dst, cudaArray_const_t src, size_t wOffset, size_t hOffset, size_t count, enum cudaMemcpyKind kind); + extern __host__ cudaError_t CUDARTAPI cudaMemcpy2DFromArray(void *dst, size_t dpitch, cudaArray_const_t src, size_t wOffset, size_t hOffset, size_t width, size_t height, enum cudaMemcpyKind kind); + extern __host__ cudaError_t CUDARTAPI cudaMemcpyArrayToArray(cudaArray_t dst, size_t wOffsetDst, size_t hOffsetDst, cudaArray_const_t src, size_t wOffsetSrc, size_t hOffsetSrc, size_t count, enum cudaMemcpyKind kind __dv(cudaMemcpyDeviceToDevice)); + extern __host__ cudaError_t CUDARTAPI cudaMemcpy2DArrayToArray(cudaArray_t dst, size_t wOffsetDst, size_t hOffsetDst, cudaArray_const_t src, size_t wOffsetSrc, size_t hOffsetSrc, size_t width, size_t height, enum cudaMemcpyKind kind __dv(cudaMemcpyDeviceToDevice)); + extern __host__ cudaError_t CUDARTAPI cudaMemcpy3D(const struct cudaMemcpy3DParms *p); + extern __host__ cudaError_t CUDARTAPI cudaMemcpy3DPeer(const struct cudaMemcpy3DPeerParms *p); + extern __host__ cudaError_t CUDARTAPI cudaMemset(void *devPtr, int value, size_t count); + extern __host__ cudaError_t CUDARTAPI cudaMemset2D(void *devPtr, size_t pitch, int value, size_t width, size_t height); + extern __host__ cudaError_t CUDARTAPI cudaMemset3D(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpyAsync(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaMemcpyToSymbolAsync(const void *symbol, const void *src, size_t count, size_t offset, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaMemcpyFromSymbolAsync(void *dst, const void *symbol, size_t count, size_t offset, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy2DAsync(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaMemcpyToArrayAsync(cudaArray_t dst, size_t wOffset, size_t hOffset, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaMemcpy2DToArrayAsync(cudaArray_t dst, size_t wOffset, size_t hOffset, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaMemcpyFromArrayAsync(void *dst, cudaArray_const_t src, size_t wOffset, size_t hOffset, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaMemcpy2DFromArrayAsync(void *dst, size_t dpitch, cudaArray_const_t src, size_t wOffset, size_t hOffset, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream __dv(0)); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy3DAsync(const struct cudaMemcpy3DParms *p, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaMemcpy3DPeerAsync(const struct cudaMemcpy3DPeerParms *p, cudaStream_t stream __dv(0)); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemsetAsync(void *devPtr, int value, size_t count, cudaStream_t stream __dv(0)); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset2DAsync(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream __dv(0)); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset3DAsync(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaStreamQuery(cudaStream_t stream); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamGetFlags(cudaStream_t hStream, unsigned int *flags); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamGetId(cudaStream_t hStream, unsigned long long *streamId); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamGetPriority(cudaStream_t hStream, int *priority); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecord(cudaEvent_t event, cudaStream_t stream __dv(0)); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecordWithFlags(cudaEvent_t event, cudaStream_t stream __dv(0), unsigned int flags __dv(0)); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamWaitEvent(cudaStream_t stream, cudaEvent_t event, unsigned int flags); + extern __host__ cudaError_t CUDARTAPI cudaStreamAddCallback(cudaStream_t stream, cudaStreamCallback_t callback, void *userData, unsigned int flags); + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamAttachMemAsync(cudaStream_t stream, void *devPtr, size_t length, unsigned int flags); + extern __host__ cudaError_t CUDARTAPI cudaStreamSynchronize(cudaStream_t stream); + extern __host__ cudaError_t CUDARTAPI cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream); + extern __host__ cudaError_t CUDARTAPI cudaLaunchKernelExC(const cudaLaunchConfig_t *config, const void *func, void **args); + extern __host__ cudaError_t CUDARTAPI cudaLaunchCooperativeKernel(const void *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream); + extern __host__ cudaError_t CUDARTAPI cudaLaunchHostFunc(cudaStream_t stream, cudaHostFn_t fn, void *userData); + extern __host__ cudaError_t CUDARTAPI cudaMemPrefetchAsync(const void *devPtr, size_t count, int dstDevice, cudaStream_t stream); + extern __host__ cudaError_t CUDARTAPI cudaSignalExternalSemaphoresAsync(const cudaExternalSemaphore_t *extSemArray, const struct cudaExternalSemaphoreSignalParams_v1 *paramsArray, unsigned int numExtSems, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaSignalExternalSemaphoresAsync_ptsz(const cudaExternalSemaphore_t *extSemArray, const struct cudaExternalSemaphoreSignalParams_v1 *paramsArray, unsigned int numExtSems, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaSignalExternalSemaphoresAsync_v2(const cudaExternalSemaphore_t *extSemArray, const struct cudaExternalSemaphoreSignalParams *paramsArray, unsigned int numExtSems, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaWaitExternalSemaphoresAsync(const cudaExternalSemaphore_t *extSemArray, const struct cudaExternalSemaphoreWaitParams_v1 *paramsArray, unsigned int numExtSems, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaWaitExternalSemaphoresAsync_ptsz(const cudaExternalSemaphore_t *extSemArray, const struct cudaExternalSemaphoreWaitParams_v1 *paramsArray, unsigned int numExtSems, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaWaitExternalSemaphoresAsync_v2(const cudaExternalSemaphore_t *extSemArray, const struct cudaExternalSemaphoreWaitParams *paramsArray, unsigned int numExtSems, cudaStream_t stream __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaGraphInstantiateWithParams(cudaGraphExec_t *pGraphExec, cudaGraph_t graph, cudaGraphInstantiateParams *instantiateParams); + extern __host__ cudaError_t CUDARTAPI cudaGraphUpload(cudaGraphExec_t graphExec, cudaStream_t stream); + extern __host__ cudaError_t CUDARTAPI cudaGraphLaunch(cudaGraphExec_t graphExec, cudaStream_t stream); + extern __host__ cudaError_t CUDARTAPI cudaStreamBeginCapture(cudaStream_t stream, enum cudaStreamCaptureMode mode); + extern __host__ cudaError_t CUDARTAPI cudaStreamEndCapture(cudaStream_t stream, cudaGraph_t *pGraph); + extern __host__ cudaError_t CUDARTAPI cudaStreamIsCapturing(cudaStream_t stream, enum cudaStreamCaptureStatus *pCaptureStatus); + extern __host__ cudaError_t CUDARTAPI cudaStreamGetCaptureInfo(cudaStream_t stream, enum cudaStreamCaptureStatus *captureStatus_out, unsigned long long *id_out); + extern __host__ cudaError_t CUDARTAPI cudaStreamGetCaptureInfo_ptsz(cudaStream_t stream, enum cudaStreamCaptureStatus *captureStatus_out, unsigned long long *id_out); + extern __host__ cudaError_t CUDARTAPI cudaStreamGetCaptureInfo_v2(cudaStream_t stream, enum cudaStreamCaptureStatus *captureStatus_out, unsigned long long *id_out __dv(0), cudaGraph_t *graph_out __dv(0), const cudaGraphNode_t **dependencies_out __dv(0), size_t *numDependencies_out __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaStreamUpdateCaptureDependencies_ptsz(cudaStream_t stream, cudaGraphNode_t *dependencies, size_t numDependencies, unsigned int flags __dv(0)); + extern __host__ cudaError_t CUDARTAPI cudaStreamCopyAttributes(cudaStream_t dstStream, cudaStream_t srcStream); + extern __host__ cudaError_t CUDARTAPI cudaStreamGetAttribute(cudaStream_t stream, cudaStreamAttrID attr, cudaStreamAttrValue *value); + extern __host__ cudaError_t CUDARTAPI cudaStreamSetAttribute(cudaStream_t stream, cudaStreamAttrID attr, const cudaStreamAttrValue *param); + + extern __host__ cudaError_t CUDARTAPI cudaMallocAsync(void **devPtr, size_t size, cudaStream_t hStream); + extern __host__ cudaError_t CUDARTAPI cudaFreeAsync(void *devPtr, cudaStream_t hStream); + extern __host__ cudaError_t CUDARTAPI cudaMallocFromPoolAsync(void **ptr, size_t size, cudaMemPool_t memPool, cudaStream_t stream); + extern __host__ cudaError_t CUDARTAPI cudaGetDriverEntryPoint(const char *symbol, void **funcPtr, unsigned long long flags, enum cudaDriverEntryPointQueryResult *driverStatus); + + extern __host__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetDeviceProperties(struct cudaDeviceProp *prop, int device); + +#elif defined(__CUDART_API_PER_THREAD_DEFAULT_STREAM) + // nvcc stubs reference the 'cudaLaunch'/'cudaLaunchKernel' identifier even if it was defined + // to 'cudaLaunch_ptsz'/'cudaLaunchKernel_ptsz'. Redirect through a static inline function. + #undef cudaLaunchKernel + static __inline__ __host__ cudaError_t cudaLaunchKernel(const void *func, + dim3 gridDim, dim3 blockDim, + void **args, size_t sharedMem, + cudaStream_t stream) + { + return cudaLaunchKernel_ptsz(func, gridDim, blockDim, args, sharedMem, stream); + } + #define cudaLaunchKernel __CUDART_API_PTSZ(cudaLaunchKernel) + #undef cudaLaunchKernelExC + static __inline__ __host__ cudaError_t cudaLaunchKernelExC(const cudaLaunchConfig_t *config, + const void *func, + void **args) + { + return cudaLaunchKernelExC_ptsz(config, func, args); + } + #define cudaLaunchKernelExC __CUDART_API_PTSZ(cudaLaunchKernelExC) +#endif + +#if defined(__cplusplus) +} + +#endif /* __cplusplus */ + +#undef EXCLUDE_FROM_RTC +#endif /* !__CUDACC_RTC__ */ + +#undef __dv +#undef __CUDA_DEPRECATED + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_RUNTIME_API_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_RUNTIME_API_H__ +#endif + +#endif /* !__CUDA_RUNTIME_API_H__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_surface_types.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_surface_types.h new file mode 100644 index 0000000000000000000000000000000000000000..4a35c215668e98006c3eaa286deb70461eb1fa62 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_surface_types.h @@ -0,0 +1,76 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_SURFACE_TYPES_H__) +#define __CUDA_SURFACE_TYPES_H__ + +#if defined(__cplusplus) && defined(__CUDACC__) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if !defined(__CUDACC_RTC__) +#define EXCLUDE_FROM_RTC +#include "channel_descriptor.h" +#undef EXCLUDE_FROM_RTC +#endif /* !__CUDACC_RTC__ */ +#include "cuda_runtime_api.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#endif /* __cplusplus && __CUDACC__ */ + +#endif /* !__CUDA_SURFACE_TYPES_H__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_texture_types.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_texture_types.h new file mode 100644 index 0000000000000000000000000000000000000000..4f723db5c682a7b4b05491219df8993f0f6ebd59 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_texture_types.h @@ -0,0 +1,76 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_TEXTURE_TYPES_H__) +#define __CUDA_TEXTURE_TYPES_H__ + +#if defined(__cplusplus) && defined(__CUDACC__) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if !defined(__CUDACC_RTC__) +#define EXCLUDE_FROM_RTC +#include "channel_descriptor.h" +#undef EXCLUDE_FROM_RTC +#endif /* !__CUDACC_RTC__ */ +#include "cuda_runtime_api.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#endif /* __cplusplus && __CUDACC__ */ + +#endif /* !__CUDA_TEXTURE_TYPES_H__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudart_platform.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudart_platform.h new file mode 100644 index 0000000000000000000000000000000000000000..0f022bbe349eba2219a6b74f1ea315c1ce8551b7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudart_platform.h @@ -0,0 +1,57 @@ +/* + * Copyright 2016 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef __CUDART_PLATFORM_H__ +#define __CUDART_PLATFORM_H__ + +#if ((defined(__linux__) || defined(__QNX__)) && (defined(__arm__) || defined(__aarch64__) || defined(__x86_64__))) +#define isEglSupported 1 +#endif + +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_atomic_functions.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_atomic_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..611b715edfb82b6ee731419c30505676e1f07c52 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_atomic_functions.h @@ -0,0 +1,217 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__DEVICE_ATOMIC_FUNCTIONS_H__) +#define __DEVICE_ATOMIC_FUNCTIONS_H__ + +#if defined(__CUDACC_RTC__) +#define __DEVICE_ATOMIC_FUNCTIONS_DECL__ __device__ +#elif defined(_NVHPC_CUDA) +# define __DEVICE_ATOMIC_FUNCTIONS_DECL__ extern __device__ __cudart_builtin__ +#else /* __CUDACC_RTC__ */ +#define __DEVICE_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/* Add !defined(_NVHPC_CUDA) to avoid empty function definition in PGI CUDA + * C++ compiler where the macro __CUDA_ARCH__ is not defined. */ +#if !defined(__CUDA_ARCH__) && !defined(_NVHPC_CUDA) +#define __DEF_IF_HOST { } +#else /* !__CUDA_ARCH__ */ +#define __DEF_IF_HOST ; +#endif /* __CUDA_ARCH__ */ + +#if defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA) +extern "C" +{ +extern __device__ __device_builtin__ int __iAtomicAdd(int *address, int val); +extern __device__ __device_builtin__ unsigned int __uAtomicAdd(unsigned int *address, unsigned int val); +extern __device__ __device_builtin__ int __iAtomicExch(int *address, int val); +extern __device__ __device_builtin__ unsigned int __uAtomicExch(unsigned int *address, unsigned int val); +extern __device__ __device_builtin__ float __fAtomicExch(float *address, float val); +extern __device__ __device_builtin__ int __iAtomicMin(int *address, int val); +extern __device__ __device_builtin__ unsigned int __uAtomicMin(unsigned int *address, unsigned int val); +extern __device__ __device_builtin__ int __iAtomicMax(int *address, int val); +extern __device__ __device_builtin__ unsigned int __uAtomicMax(unsigned int *address, unsigned int val); +extern __device__ __device_builtin__ unsigned int __uAtomicInc(unsigned int *address, unsigned int val); +extern __device__ __device_builtin__ unsigned int __uAtomicDec(unsigned int *address, unsigned int val); +extern __device__ __device_builtin__ int __iAtomicAnd(int *address, int val); +extern __device__ __device_builtin__ unsigned int __uAtomicAnd(unsigned int *address, unsigned int val); +extern __device__ __device_builtin__ int __iAtomicOr(int *address, int val); +extern __device__ __device_builtin__ unsigned int __uAtomicOr(unsigned int *address, unsigned int val); +extern __device__ __device_builtin__ int __iAtomicXor(int *address, int val); +extern __device__ __device_builtin__ unsigned int __uAtomicXor(unsigned int *address, unsigned int val); +extern __device__ __device_builtin__ int __iAtomicCAS(int *address, int compare, int val); +extern __device__ __device_builtin__ unsigned int __uAtomicCAS(unsigned int *address, unsigned int compare, unsigned int val); +} +#endif /* __CUDA_ARCH__ || defined(_NVHPC_CUDA) */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicAdd(int *address, int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicAdd(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicSub(int *address, int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicSub(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicExch(int *address, int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicExch(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ float atomicExch(float *address, float val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicMin(int *address, int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicMin(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicMax(int *address, int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicMax(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicInc(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicDec(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicAnd(int *address, int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicAnd(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicOr(int *address, int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicOr(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicXor(int *address, int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicXor(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicCAS(int *address, int compare, int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicCAS(unsigned int *address, unsigned int compare, unsigned int val) __DEF_IF_HOST + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +#if defined(_WIN32) +# define __DEPRECATED__(msg) __declspec(deprecated(msg)) +#elif (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 5 && !defined(__clang__)))) +# define __DEPRECATED__(msg) __attribute__((deprecated)) +#else +# define __DEPRECATED__(msg) __attribute__((deprecated(msg))) +#endif + +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 +#define __WSB_DEPRECATION_MESSAGE(x) #x"() is not valid on compute_70 and above, and should be replaced with "#x"_sync()."\ + "To continue using "#x"(), specify virtual architecture compute_60 when targeting sm_70 and above, for example, using the pair of compiler options: -arch=compute_60 -code=sm_70." +#elif defined(_NVHPC_CUDA) +#define __WSB_DEPRECATION_MESSAGE(x) #x"() is not valid on cc70 and above, and should be replaced with "#x"_sync()." +#else +#define __WSB_DEPRECATION_MESSAGE(x) #x"() is deprecated in favor of "#x"_sync() and may be removed in a future release (Use -Wno-deprecated-declarations to suppress this warning)." +#endif + +extern "C" +{ +#if defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA) +extern __device__ __device_builtin__ unsigned long long int __ullAtomicAdd(unsigned long long int *address, unsigned long long int val); +extern __device__ __device_builtin__ unsigned long long int __ullAtomicExch(unsigned long long int *address, unsigned long long int val); +extern __device__ __device_builtin__ unsigned long long int __ullAtomicCAS(unsigned long long int *address, unsigned long long int compare, unsigned long long int val); +#endif /* __CUDA_ARCH__ || _NVHPC_CUDA */ +extern __device__ __device_builtin__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__any)) int __any(int cond); +extern __device__ __device_builtin__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__all)) int __all(int cond); +} + + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicAdd(unsigned long long int *address, unsigned long long int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicExch(unsigned long long int *address, unsigned long long int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicCAS(unsigned long long int *address, unsigned long long int compare, unsigned long long int val) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__any)) bool any(bool cond) __DEF_IF_HOST + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__all)) bool all(bool cond) __DEF_IF_HOST + +#undef __DEPRECATED__ +#undef __WSB_DEPRECATION_MESSAGE + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __DEF_IF_HOST +#undef __DEVICE_ATOMIC_FUNCTIONS_DECL__ + +#if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__) +#include "device_atomic_functions.hpp" +#endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */ + +#endif /* !__DEVICE_ATOMIC_FUNCTIONS_H__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_atomic_functions.hpp b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_atomic_functions.hpp new file mode 100644 index 0000000000000000000000000000000000000000..50e427c39b164e6e145c3930cd66cc03806175a6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_atomic_functions.hpp @@ -0,0 +1,224 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__DEVICE_ATOMIC_FUNCTIONS_HPP__) +#define __DEVICE_ATOMIC_FUNCTIONS_HPP__ + +#if defined(__CUDACC_RTC__) +#define __DEVICE_ATOMIC_FUNCTIONS_DECL__ __device__ +#else /* __CUDACC_RTC__ */ +#define __DEVICE_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicAdd(int *address, int val) +{ + return __iAtomicAdd(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicAdd(unsigned int *address, unsigned int val) +{ + return __uAtomicAdd(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicSub(int *address, int val) +{ + return __iAtomicAdd(address, (unsigned int)-(int)val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicSub(unsigned int *address, unsigned int val) +{ + return __uAtomicAdd(address, (unsigned int)-(int)val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicExch(int *address, int val) +{ + return __iAtomicExch(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicExch(unsigned int *address, unsigned int val) +{ + return __uAtomicExch(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ float atomicExch(float *address, float val) +{ + return __fAtomicExch(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicMin(int *address, int val) +{ + return __iAtomicMin(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicMin(unsigned int *address, unsigned int val) +{ + return __uAtomicMin(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicMax(int *address, int val) +{ + return __iAtomicMax(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicMax(unsigned int *address, unsigned int val) +{ + return __uAtomicMax(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicInc(unsigned int *address, unsigned int val) +{ + return __uAtomicInc(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicDec(unsigned int *address, unsigned int val) +{ + return __uAtomicDec(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicAnd(int *address, int val) +{ + return __iAtomicAnd(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicAnd(unsigned int *address, unsigned int val) +{ + return __uAtomicAnd(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicOr(int *address, int val) +{ + return __iAtomicOr(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicOr(unsigned int *address, unsigned int val) +{ + return __uAtomicOr(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicXor(int *address, int val) +{ + return __iAtomicXor(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicXor(unsigned int *address, unsigned int val) +{ + return __uAtomicXor(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicCAS(int *address, int compare, int val) +{ + return __iAtomicCAS(address, compare, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicCAS(unsigned int *address, unsigned int compare, unsigned int val) +{ + return __uAtomicCAS(address, compare, val); +} + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicAdd(unsigned long long int *address, unsigned long long int val) +{ + return __ullAtomicAdd(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicExch(unsigned long long int *address, unsigned long long int val) +{ + return __ullAtomicExch(address, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicCAS(unsigned long long int *address, unsigned long long int compare, unsigned long long int val) +{ + return __ullAtomicCAS(address, compare, val); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ bool any(bool cond) +{ + return (bool)__any((int)cond); +} + +__DEVICE_ATOMIC_FUNCTIONS_DECL__ bool all(bool cond) +{ + return (bool)__all((int)cond); +} + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __DEVICE_ATOMIC_FUNCTIONS_DECL__ + +#endif /* !__DEVICE_ATOMIC_FUNCTIONS_HPP__ */ + diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_functions.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..0094cc9a0a57f53f47421a8ecc400fb84c26babe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_functions.h @@ -0,0 +1,65 @@ +/* + * Copyright 1993-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("device_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "device_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_H_WRAPPER__ +#endif + +#include "crt/device_functions.h" + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_H_WRAPPER__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_H_WRAPPER__ +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_launch_parameters.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_launch_parameters.h new file mode 100644 index 0000000000000000000000000000000000000000..8f552db8faab7d21e90e06a1ea2184a5563d3bf2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_launch_parameters.h @@ -0,0 +1,118 @@ +/* + * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__DEVICE_LAUNCH_PARAMETERS_H__) +#define __DEVICE_LAUNCH_PARAMETERS_H__ + +#include "vector_types.h" + +#if !defined(__STORAGE__) + +#if defined(__CUDACC_RTC__) +#define __STORAGE__ \ + extern const __device__ +#else /* !__CUDACC_RTC__ */ +#define __STORAGE__ \ + extern const +#endif /* __CUDACC_RTC__ */ + +#endif /* __STORAGE__ */ + +#if defined(__cplusplus) +extern "C" { +#endif /* __cplusplus */ + +uint3 __device_builtin__ __STORAGE__ threadIdx; +uint3 __device_builtin__ __STORAGE__ blockIdx; +dim3 __device_builtin__ __STORAGE__ blockDim; +dim3 __device_builtin__ __STORAGE__ gridDim; +int __device_builtin__ __STORAGE__ warpSize; + +#undef __STORAGE__ + +#if defined(__cplusplus) +} +#endif /* __cplusplus */ + +#if !defined(__cudaGet_threadIdx) + +#define __cudaGet_threadIdx() \ + threadIdx + +#endif /* __cudaGet_threadIdx */ + +#if !defined(__cudaGet_blockIdx) + +#define __cudaGet_blockIdx() \ + blockIdx + +#endif /* __cudaGet_blockIdx */ + +#if !defined(__cudaGet_blockDim) + +#define __cudaGet_blockDim() \ + blockDim + +#endif /* __cudaGet_blockDim */ + +#if !defined(__cudaGet_gridDim) + +#define __cudaGet_gridDim() \ + gridDim + +#endif /* __cudaGet_gridDim */ + +#if !defined(__cudaGet_warpSize) + +#define __cudaGet_warpSize() \ + warpSize + +#endif /* __cudaGet_warpSize */ + +#endif /* !__DEVICE_LAUNCH_PARAMETERS_H__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/driver_functions.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/driver_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..94767974220594550d496cad4d14c45349b27737 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/driver_functions.h @@ -0,0 +1,145 @@ +/* + * Copyright 1993-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__DRIVER_FUNCTIONS_H__) +#define __DRIVER_FUNCTIONS_H__ + +#include "builtin_types.h" +#include "crt/host_defines.h" +#include "driver_types.h" + +/** + * \addtogroup CUDART_MEMORY + * + * @{ + */ + +/** + * \brief Returns a cudaPitchedPtr based on input parameters + * + * Returns a ::cudaPitchedPtr based on the specified input parameters \p d, + * \p p, \p xsz, and \p ysz. + * + * \param d - Pointer to allocated memory + * \param p - Pitch of allocated memory in bytes + * \param xsz - Logical width of allocation in elements + * \param ysz - Logical height of allocation in elements + * + * \return + * ::cudaPitchedPtr specified by \p d, \p p, \p xsz, and \p ysz + * + * \sa make_cudaExtent, make_cudaPos + */ +static __inline__ __host__ struct cudaPitchedPtr make_cudaPitchedPtr(void *d, size_t p, size_t xsz, size_t ysz) +{ + struct cudaPitchedPtr s; + + s.ptr = d; + s.pitch = p; + s.xsize = xsz; + s.ysize = ysz; + + return s; +} + +/** + * \brief Returns a cudaPos based on input parameters + * + * Returns a ::cudaPos based on the specified input parameters \p x, + * \p y, and \p z. + * + * \param x - X position + * \param y - Y position + * \param z - Z position + * + * \return + * ::cudaPos specified by \p x, \p y, and \p z + * + * \sa make_cudaExtent, make_cudaPitchedPtr + */ +static __inline__ __host__ struct cudaPos make_cudaPos(size_t x, size_t y, size_t z) +{ + struct cudaPos p; + + p.x = x; + p.y = y; + p.z = z; + + return p; +} + +/** + * \brief Returns a cudaExtent based on input parameters + * + * Returns a ::cudaExtent based on the specified input parameters \p w, + * \p h, and \p d. + * + * \param w - Width in elements when referring to array memory, in bytes when referring to linear memory + * \param h - Height in elements + * \param d - Depth in elements + * + * \return + * ::cudaExtent specified by \p w, \p h, and \p d + * + * \sa make_cudaPitchedPtr, make_cudaPos + */ +static __inline__ __host__ struct cudaExtent make_cudaExtent(size_t w, size_t h, size_t d) +{ + struct cudaExtent e; + + e.width = w; + e.height = h; + e.depth = d; + + return e; +} + +/** @} */ /* END CUDART_MEMORY */ + +#endif /* !__DRIVER_FUNCTIONS_H__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/host_defines.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/host_defines.h new file mode 100644 index 0000000000000000000000000000000000000000..98a9c98a957e8f60e872b94fde762516c5523367 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/host_defines.h @@ -0,0 +1,65 @@ +/* + * Copyright 1993-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("host_defines.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "host_defines.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_DEFINES_H_WRAPPER__ +#endif + +#include "crt/host_defines.h" + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_DEFINES_H_WRAPPER__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_DEFINES_H_WRAPPER__ +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/math_functions.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/math_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..bc806976784e494edc905d8b8bd9ad138054bbea --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/math_functions.h @@ -0,0 +1,65 @@ +/* + * Copyright 1993-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("math_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "math_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_MATH_FUNCTIONS_H_WRAPPER__ +#endif + +#include "crt/math_functions.h" + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_MATH_FUNCTIONS_H_WRAPPER__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_MATH_FUNCTIONS_H_WRAPPER__ +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/mma.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/mma.h new file mode 100644 index 0000000000000000000000000000000000000000..9f36f671c0b3a4e95cbb7bddbe41e75ac783b722 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/mma.h @@ -0,0 +1,60 @@ +/* + * Copyright 1993-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_H_WRAPPER__ +#endif + +#include "crt/mma.h" + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_H_WRAPPER__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_H_WRAPPER__ +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_atomic_functions.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_atomic_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..b852394138c6d88f80a317c1e4930bc7506e8b74 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_atomic_functions.h @@ -0,0 +1,114 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_20_ATOMIC_FUNCTIONS_H__) +#define __SM_20_ATOMIC_FUNCTIONS_H__ + +#if defined(__CUDACC_RTC__) +#define __SM_20_ATOMIC_FUNCTIONS_DECL__ __device__ +#elif defined(_NVHPC_CUDA) +#define __SM_20_ATOMIC_FUNCTIONS_DECL__ extern __device__ __cudart_builtin__ +#else /* __CUDACC_RTC__ */ +#define __SM_20_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +#if defined(_NVHPC_CUDA) +#undef __device_builtin__ +#define __device_builtin__ __location__(device) __location__(host) +#endif /* _NVHPC_CUDA */ + +/* Add !defined(_NVHPC_CUDA) to avoid empty function definition in CUDA + * C++ compiler where the macro __CUDA_ARCH__ is not defined. */ +#if !defined(__CUDA_ARCH__) && !defined(_NVHPC_CUDA) +#define __DEF_IF_HOST { } +#else /* !__CUDA_ARCH__ */ +#define __DEF_IF_HOST ; +#endif /* __CUDA_ARCH__ */ + + +#if defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA) +extern "C" +{ +extern __device__ __device_builtin__ float __fAtomicAdd(float *address, float val); +} +#endif /* __CUDA_ARCH__ */ + +#if defined(_NVHPC_CUDA) +#undef __device_builtin__ +#define __device_builtin__ +#endif /* _NVHPC_CUDA */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__SM_20_ATOMIC_FUNCTIONS_DECL__ float atomicAdd(float *address, float val) __DEF_IF_HOST + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __DEF_IF_HOST +#undef __SM_20_ATOMIC_FUNCTIONS_DECL__ + +#if !defined(__CUDACC_RTC__) && (defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA)) +#include "sm_20_atomic_functions.hpp" +#endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA) */ + +#endif /* !__SM_20_ATOMIC_FUNCTIONS_H__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_atomic_functions.hpp b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_atomic_functions.hpp new file mode 100644 index 0000000000000000000000000000000000000000..ac4aa9bfc6b8d5d4d240e05a2fd557889f30c47f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_atomic_functions.hpp @@ -0,0 +1,85 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_20_ATOMIC_FUNCTIONS_HPP__) +#define __SM_20_ATOMIC_FUNCTIONS_HPP__ + +#if defined(__CUDACC_RTC__) +#define __SM_20_ATOMIC_FUNCTIONS_DECL__ __device__ +#else /* __CUDACC_RTC__ */ +#define __SM_20_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__SM_20_ATOMIC_FUNCTIONS_DECL__ float atomicAdd(float *address, float val) +{ + return __fAtomicAdd(address, val); +} + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_20_ATOMIC_FUNCTIONS_DECL__ + +#endif /* !__SM_20_ATOMIC_FUNCTIONS_HPP__ */ + diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_intrinsics.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_intrinsics.h new file mode 100644 index 0000000000000000000000000000000000000000..864e78da0ed58d630e670c40b20e968764b24752 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_intrinsics.h @@ -0,0 +1,1563 @@ +/* + * Copyright 1993-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_20_INTRINSICS_H__) +#define __SM_20_INTRINSICS_H__ + +#if defined(__CUDACC_RTC__) +#define __SM_20_INTRINSICS_DECL__ __device__ +#else /* __CUDACC_RTC__ */ +#define __SM_20_INTRINSICS_DECL__ static __inline__ __device__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +#if defined(_NVHPC_CUDA) +#undef __device_builtin__ +#define __device_builtin__ __location__(device) __location__(host) +#endif /* _NVHPC_CUDA */ + +#ifndef __CUDA_ARCH__ +#define __DEF_IF_HOST { } +#else /* !__CUDA_ARCH__ */ +#define __DEF_IF_HOST ; +#endif /* __CUDA_ARCH__ */ + +#if defined(_WIN32) +# define __DEPRECATED__(msg) __declspec(deprecated(msg)) +#elif (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 5 && !defined(__clang__)))) +# define __DEPRECATED__(msg) __attribute__((deprecated)) +#else +# define __DEPRECATED__(msg) __attribute__((deprecated(msg))) +#endif + +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 +#define __WSB_DEPRECATION_MESSAGE(x) #x"() is not valid on compute_70 and above, and should be replaced with "#x"_sync()."\ + "To continue using "#x"(), specify virtual architecture compute_60 when targeting sm_70 and above, for example, using the pair of compiler options: -arch=compute_60 -code=sm_70." +#elif defined(_NVHPC_CUDA) +#define __WSB_DEPRECATION_MESSAGE(x) #x"() is not valid on cc70 and above, and should be replaced with "#x"_sync()." +#else +#define __WSB_DEPRECATION_MESSAGE(x) #x"() is deprecated in favor of "#x"_sync() and may be removed in a future release (Use -Wno-deprecated-declarations to suppress this warning)." +#endif + +extern "C" +{ +extern __device__ __device_builtin__ void __threadfence_system(void); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Divide two floating-point values in round-to-nearest-even mode. + * + * Divides two floating-point values \p x by \p y in round-to-nearest-even mode. + * + * \return Returns \p x / \p y. + * + * \note_accuracy_double + * \note_requires_fermi + */ +extern __device__ __device_builtin__ double __ddiv_rn(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Divide two floating-point values in round-towards-zero mode. + * + * Divides two floating-point values \p x by \p y in round-towards-zero mode. + * + * \return Returns \p x / \p y. + * + * \note_accuracy_double + * \note_requires_fermi + */ +extern __device__ __device_builtin__ double __ddiv_rz(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Divide two floating-point values in round-up mode. + * + * Divides two floating-point values \p x by \p y in round-up (to positive infinity) mode. + * + * \return Returns \p x / \p y. + * + * \note_accuracy_double + * \note_requires_fermi + */ +extern __device__ __device_builtin__ double __ddiv_ru(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Divide two floating-point values in round-down mode. + * + * Divides two floating-point values \p x by \p y in round-down (to negative infinity) mode. + * + * \return Returns \p x / \p y. + * + * \note_accuracy_double + * \note_requires_fermi + */ +extern __device__ __device_builtin__ double __ddiv_rd(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * + * \endxmlonly + * in round-to-nearest-even mode. + * + * Compute the reciprocal of \p x in round-to-nearest-even mode. + * + * \return Returns + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * \endxmlonly. + * + * \note_accuracy_double + * \note_requires_fermi + */ +extern __device__ __device_builtin__ double __drcp_rn(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * + * \endxmlonly + * in round-towards-zero mode. + * + * Compute the reciprocal of \p x in round-towards-zero mode. + * + * \return Returns + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * \endxmlonly. + * + * \note_accuracy_double + * \note_requires_fermi + */ +extern __device__ __device_builtin__ double __drcp_rz(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * + * \endxmlonly + * in round-up mode. + * + * Compute the reciprocal of \p x in round-up (to positive infinity) mode. + * + * \return Returns + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * \endxmlonly. + * + * \note_accuracy_double + * \note_requires_fermi + */ +extern __device__ __device_builtin__ double __drcp_ru(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * + * \endxmlonly + * in round-down mode. + * + * Compute the reciprocal of \p x in round-down (to negative infinity) mode. + * + * \return Returns + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * \endxmlonly. + * + * \note_accuracy_double + * \note_requires_fermi + */ +extern __device__ __device_builtin__ double __drcp_rd(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * + * \endxmlonly + * in round-to-nearest-even mode. + * + * Compute the square root of \p x in round-to-nearest-even mode. + * + * \return Returns + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * \endxmlonly. + * + * \note_accuracy_double + * \note_requires_fermi + */ +extern __device__ __device_builtin__ double __dsqrt_rn(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * + * \endxmlonly + * in round-towards-zero mode. + * + * Compute the square root of \p x in round-towards-zero mode. + * + * \return Returns + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * \endxmlonly. + * + * \note_accuracy_double + * \note_requires_fermi + */ +extern __device__ __device_builtin__ double __dsqrt_rz(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * + * \endxmlonly + * in round-up mode. + * + * Compute the square root of \p x in round-up (to positive infinity) mode. + * + * \return Returns + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * \endxmlonly. + * + * \note_accuracy_double + * \note_requires_fermi + */ +extern __device__ __device_builtin__ double __dsqrt_ru(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * + * \endxmlonly + * in round-down mode. + * + * Compute the square root of \p x in round-down (to negative infinity) mode. + * + * \return Returns + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * \endxmlonly. + * + * \note_accuracy_double + * \note_requires_fermi + */ +extern __device__ __device_builtin__ double __dsqrt_rd(double x); +extern __device__ __device_builtin__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__ballot)) unsigned int __ballot(int); +extern __device__ __device_builtin__ int __syncthreads_count(int); +extern __device__ __device_builtin__ int __syncthreads_and(int); +extern __device__ __device_builtin__ int __syncthreads_or(int); +extern __device__ __device_builtin__ long long int clock64(void); + + +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute fused multiply-add operation in round-to-nearest-even mode, ignore \p -ftz=true compiler flag + * + * Behavior is the same as ::__fmaf_rn(\p x, \p y, \p z), the difference is in + * handling denormalized inputs and outputs: \p -ftz compiler flag has no effect. + */ +extern __device__ __device_builtin__ float __fmaf_ieee_rn(float x, float y, float z); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute fused multiply-add operation in round-down mode, ignore \p -ftz=true compiler flag + * + * Behavior is the same as ::__fmaf_rd(\p x, \p y, \p z), the difference is in + * handling denormalized inputs and outputs: \p -ftz compiler flag has no effect. + */ +extern __device__ __device_builtin__ float __fmaf_ieee_rd(float x, float y, float z); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute fused multiply-add operation in round-up mode, ignore \p -ftz=true compiler flag + * + * Behavior is the same as ::__fmaf_ru(\p x, \p y, \p z), the difference is in + * handling denormalized inputs and outputs: \p -ftz compiler flag has no effect. + */ +extern __device__ __device_builtin__ float __fmaf_ieee_ru(float x, float y, float z); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute fused multiply-add operation in round-towards-zero mode, ignore \p -ftz=true compiler flag + * + * Behavior is the same as ::__fmaf_rz(\p x, \p y, \p z), the difference is in + * handling denormalized inputs and outputs: \p -ftz compiler flag has no effect. + */ +extern __device__ __device_builtin__ float __fmaf_ieee_rz(float x, float y, float z); + + +// SM_13 intrinsics + +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret bits in a double as a 64-bit signed integer. + * + * Reinterpret the bits in the double-precision floating-point value \p x + * as a signed 64-bit integer. + * \return Returns reinterpreted value. + */ +extern __device__ __device_builtin__ long long int __double_as_longlong(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret bits in a 64-bit signed integer as a double. + * + * Reinterpret the bits in the 64-bit signed integer value \p x as + * a double-precision floating-point value. + * \return Returns reinterpreted value. + */ +extern __device__ __device_builtin__ double __longlong_as_double(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation in round-to-nearest-even mode. + * + * Computes the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation, rounding the + * result once in round-to-nearest-even mode. + * + * \return Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * . + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * . + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ double __fma_rn(double x, double y, double z); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation in round-towards-zero mode. + * + * Computes the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation, rounding the + * result once in round-towards-zero mode. + * + * \return Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * . + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * . + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ double __fma_rz(double x, double y, double z); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation in round-up mode. + * + * Computes the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation, rounding the + * result once in round-up (to positive infinity) mode. + * + * \return Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * . + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * . + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ double __fma_ru(double x, double y, double z); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation in round-down mode. + * + * Computes the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation, rounding the + * result once in round-down (to negative infinity) mode. + * + * \return Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * . + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * . + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ double __fma_rd(double x, double y, double z); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Add two floating-point values in round-to-nearest-even mode. + * + * Adds two floating-point values \p x and \p y in round-to-nearest-even mode. + * + * \return Returns \p x + \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dadd_rn(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Add two floating-point values in round-towards-zero mode. + * + * Adds two floating-point values \p x and \p y in round-towards-zero mode. + * + * \return Returns \p x + \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dadd_rz(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Add two floating-point values in round-up mode. + * + * Adds two floating-point values \p x and \p y in round-up (to positive infinity) mode. + * + * \return Returns \p x + \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dadd_ru(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Add two floating-point values in round-down mode. + * + * Adds two floating-point values \p x and \p y in round-down (to negative infinity) mode. + * + * \return Returns \p x + \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dadd_rd(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Subtract two floating-point values in round-to-nearest-even mode. + * + * Subtracts two floating-point values \p x and \p y in round-to-nearest-even mode. + * + * \return Returns \p x - \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dsub_rn(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Subtract two floating-point values in round-towards-zero mode. + * + * Subtracts two floating-point values \p x and \p y in round-towards-zero mode. + * + * \return Returns \p x - \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dsub_rz(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Subtract two floating-point values in round-up mode. + * + * Subtracts two floating-point values \p x and \p y in round-up (to positive infinity) mode. + * + * \return Returns \p x - \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dsub_ru(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Subtract two floating-point values in round-down mode. + * + * Subtracts two floating-point values \p x and \p y in round-down (to negative infinity) mode. + * + * \return Returns \p x - \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dsub_rd(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Multiply two floating-point values in round-to-nearest-even mode. + * + * Multiplies two floating-point values \p x and \p y in round-to-nearest-even mode. + * + * \return Returns \p x * \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dmul_rn(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Multiply two floating-point values in round-towards-zero mode. + * + * Multiplies two floating-point values \p x and \p y in round-towards-zero mode. + * + * \return Returns \p x * \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dmul_rz(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Multiply two floating-point values in round-up mode. + * + * Multiplies two floating-point values \p x and \p y in round-up (to positive infinity) mode. + * + * \return Returns \p x * \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dmul_ru(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Multiply two floating-point values in round-down mode. + * + * Multiplies two floating-point values \p x and \p y in round-down (to negative infinity) mode. + * + * \return Returns \p x * \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dmul_rd(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a float in round-to-nearest-even mode. + * + * Convert the double-precision floating-point value \p x to a single-precision + * floating-point value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ float __double2float_rn(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a float in round-towards-zero mode. + * + * Convert the double-precision floating-point value \p x to a single-precision + * floating-point value in round-towards-zero mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ float __double2float_rz(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a float in round-up mode. + * + * Convert the double-precision floating-point value \p x to a single-precision + * floating-point value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ float __double2float_ru(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a float in round-down mode. + * + * Convert the double-precision floating-point value \p x to a single-precision + * floating-point value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ float __double2float_rd(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed int in round-to-nearest-even mode. + * + * Convert the double-precision floating-point value \p x to a + * signed integer value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ int __double2int_rn(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed int in round-up mode. + * + * Convert the double-precision floating-point value \p x to a + * signed integer value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ int __double2int_ru(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed int in round-down mode. + * + * Convert the double-precision floating-point value \p x to a + * signed integer value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ int __double2int_rd(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned int in round-to-nearest-even mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned integer value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ unsigned int __double2uint_rn(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned int in round-up mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned integer value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ unsigned int __double2uint_ru(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned int in round-down mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned integer value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ unsigned int __double2uint_rd(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed 64-bit int in round-to-nearest-even mode. + * + * Convert the double-precision floating-point value \p x to a + * signed 64-bit integer value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ long long int __double2ll_rn(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed 64-bit int in round-up mode. + * + * Convert the double-precision floating-point value \p x to a + * signed 64-bit integer value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ long long int __double2ll_ru(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed 64-bit int in round-down mode. + * + * Convert the double-precision floating-point value \p x to a + * signed 64-bit integer value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ long long int __double2ll_rd(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned 64-bit int in round-to-nearest-even mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned 64-bit integer value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ unsigned long long int __double2ull_rn(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned 64-bit int in round-up mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned 64-bit integer value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ unsigned long long int __double2ull_ru(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned 64-bit int in round-down mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned 64-bit integer value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ unsigned long long int __double2ull_rd(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed int to a double. + * + * Convert the signed integer value \p x to a double-precision floating-point value. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __int2double_rn(int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned int to a double. + * + * Convert the unsigned integer value \p x to a double-precision floating-point value. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __uint2double_rn(unsigned int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed 64-bit int to a double in round-to-nearest-even mode. + * + * Convert the signed 64-bit integer value \p x to a double-precision floating-point + * value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ll2double_rn(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed 64-bit int to a double in round-towards-zero mode. + * + * Convert the signed 64-bit integer value \p x to a double-precision floating-point + * value in round-towards-zero mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ll2double_rz(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed 64-bit int to a double in round-up mode. + * + * Convert the signed 64-bit integer value \p x to a double-precision floating-point + * value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ll2double_ru(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed 64-bit int to a double in round-down mode. + * + * Convert the signed 64-bit integer value \p x to a double-precision floating-point + * value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ll2double_rd(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned 64-bit int to a double in round-to-nearest-even mode. + * + * Convert the unsigned 64-bit integer value \p x to a double-precision floating-point + * value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ull2double_rn(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned 64-bit int to a double in round-towards-zero mode. + * + * Convert the unsigned 64-bit integer value \p x to a double-precision floating-point + * value in round-towards-zero mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ull2double_rz(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned 64-bit int to a double in round-up mode. + * + * Convert the unsigned 64-bit integer value \p x to a double-precision floating-point + * value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ull2double_ru(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned 64-bit int to a double in round-down mode. + * + * Convert the unsigned 64-bit integer value \p x to a double-precision floating-point + * value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ull2double_rd(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret high 32 bits in a double as a signed integer. + * + * Reinterpret the high 32 bits in the double-precision floating-point value \p x + * as a signed integer. + * \return Returns reinterpreted value. + */ +extern __device__ __device_builtin__ int __double2hiint(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret low 32 bits in a double as a signed integer. + * + * Reinterpret the low 32 bits in the double-precision floating-point value \p x + * as a signed integer. + * \return Returns reinterpreted value. + */ +extern __device__ __device_builtin__ int __double2loint(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret high and low 32-bit integer values as a double. + * + * Reinterpret the integer value of \p hi as the high 32 bits of a + * double-precision floating-point value and the integer value of \p lo + * as the low 32 bits of the same double-precision floating-point value. + * \return Returns reinterpreted value. + */ +extern __device__ __device_builtin__ double __hiloint2double(int hi, int lo); + + +} + +#if defined(_NVHPC_CUDA) +#undef __device_builtin__ +#define __device_builtin__ +#endif /* _NVHPC_CUDA */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ +__SM_20_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__ballot)) unsigned int ballot(bool pred) __DEF_IF_HOST + +__SM_20_INTRINSICS_DECL__ int syncthreads_count(bool pred) __DEF_IF_HOST + +__SM_20_INTRINSICS_DECL__ bool syncthreads_and(bool pred) __DEF_IF_HOST + +__SM_20_INTRINSICS_DECL__ bool syncthreads_or(bool pred) __DEF_IF_HOST + +#undef __DEPRECATED__ +#undef __WSB_DEPRECATION_MESSAGE + +__SM_20_INTRINSICS_DECL__ unsigned int __isGlobal(const void *ptr) __DEF_IF_HOST +__SM_20_INTRINSICS_DECL__ unsigned int __isShared(const void *ptr) __DEF_IF_HOST +__SM_20_INTRINSICS_DECL__ unsigned int __isConstant(const void *ptr) __DEF_IF_HOST +__SM_20_INTRINSICS_DECL__ unsigned int __isLocal(const void *ptr) __DEF_IF_HOST +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) +__SM_20_INTRINSICS_DECL__ unsigned int __isGridConstant(const void *ptr) __DEF_IF_HOST +#endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) */ +__SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_global(const void *ptr) __DEF_IF_HOST +__SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_shared(const void *ptr) __DEF_IF_HOST +__SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_constant(const void *ptr) __DEF_IF_HOST +__SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_local(const void *ptr) __DEF_IF_HOST +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) +__SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_grid_constant(const void *ptr) __DEF_IF_HOST +#endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) */ + +__SM_20_INTRINSICS_DECL__ void * __cvta_global_to_generic(size_t rawbits) __DEF_IF_HOST +__SM_20_INTRINSICS_DECL__ void * __cvta_shared_to_generic(size_t rawbits) __DEF_IF_HOST +__SM_20_INTRINSICS_DECL__ void * __cvta_constant_to_generic(size_t rawbits) __DEF_IF_HOST +__SM_20_INTRINSICS_DECL__ void * __cvta_local_to_generic(size_t rawbits) __DEF_IF_HOST +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) +__SM_20_INTRINSICS_DECL__ void * __cvta_grid_constant_to_generic(size_t rawbits) __DEF_IF_HOST +#endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) */ +#endif /* __cplusplus && __CUDACC__ */ + +#undef __DEF_IF_HOST +#undef __SM_20_INTRINSICS_DECL__ + +#if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__) +#include "sm_20_intrinsics.hpp" +#endif /* !__CUDACC_RTC__ */ +#endif /* !__SM_20_INTRINSICS_H__ && defined(__CUDA_ARCH__) */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_intrinsics.hpp b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_intrinsics.hpp new file mode 100644 index 0000000000000000000000000000000000000000..30c1ab99e0d66ebbceb8fe88b1122443cbf5f998 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_intrinsics.hpp @@ -0,0 +1,221 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_20_INTRINSICS_HPP__) +#define __SM_20_INTRINSICS_HPP__ + +#if defined(__CUDACC_RTC__) +#define __SM_20_INTRINSICS_DECL__ __device__ +#else /* __CUDACC_RTC__ */ +#define __SM_20_INTRINSICS_DECL__ static __inline__ __device__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__SM_20_INTRINSICS_DECL__ unsigned int ballot(bool pred) +{ + return __ballot((int)pred); +} + +__SM_20_INTRINSICS_DECL__ int syncthreads_count(bool pred) +{ + return __syncthreads_count((int)pred); +} + +__SM_20_INTRINSICS_DECL__ bool syncthreads_and(bool pred) +{ + return (bool)__syncthreads_and((int)pred); +} + +__SM_20_INTRINSICS_DECL__ bool syncthreads_or(bool pred) +{ + return (bool)__syncthreads_or((int)pred); +} + + +extern "C" { + __device__ unsigned __nv_isGlobal_impl(const void *); + __device__ unsigned __nv_isShared_impl(const void *); + __device__ unsigned __nv_isConstant_impl(const void *); + __device__ unsigned __nv_isLocal_impl(const void *); + __device__ unsigned __nv_isGridConstant_impl(const void *); +} + +__SM_20_INTRINSICS_DECL__ unsigned int __isGlobal(const void *ptr) +{ + return __nv_isGlobal_impl(ptr); +} + +__SM_20_INTRINSICS_DECL__ unsigned int __isShared(const void *ptr) +{ + return __nv_isShared_impl(ptr); +} + +__SM_20_INTRINSICS_DECL__ unsigned int __isConstant(const void *ptr) +{ + return __nv_isConstant_impl(ptr); +} + +__SM_20_INTRINSICS_DECL__ unsigned int __isLocal(const void *ptr) +{ + return __nv_isLocal_impl(ptr); +} + +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) +__SM_20_INTRINSICS_DECL__ unsigned int __isGridConstant(const void *ptr) +{ + return __nv_isGridConstant_impl(ptr); +} +#endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) */ + +extern "C" { + __device__ size_t __nv_cvta_generic_to_global_impl(const void *); + __device__ size_t __nv_cvta_generic_to_shared_impl(const void *); + __device__ size_t __nv_cvta_generic_to_constant_impl(const void *); + __device__ size_t __nv_cvta_generic_to_local_impl(const void *); + __device__ void * __nv_cvta_global_to_generic_impl(size_t); + __device__ void * __nv_cvta_shared_to_generic_impl(size_t); + __device__ void * __nv_cvta_constant_to_generic_impl(size_t); + __device__ void * __nv_cvta_local_to_generic_impl(size_t); +} + +__SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_global(const void *p) +{ + return __nv_cvta_generic_to_global_impl(p); +} + +__SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_shared(const void *p) +{ + return __nv_cvta_generic_to_shared_impl(p); +} + +__SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_constant(const void *p) +{ + return __nv_cvta_generic_to_constant_impl(p); +} + +__SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_local(const void *p) +{ + return __nv_cvta_generic_to_local_impl(p); +} + +__SM_20_INTRINSICS_DECL__ void * __cvta_global_to_generic(size_t rawbits) +{ + return __nv_cvta_global_to_generic_impl(rawbits); +} + +__SM_20_INTRINSICS_DECL__ void * __cvta_shared_to_generic(size_t rawbits) +{ + return __nv_cvta_shared_to_generic_impl(rawbits); +} + +__SM_20_INTRINSICS_DECL__ void * __cvta_constant_to_generic(size_t rawbits) +{ + return __nv_cvta_constant_to_generic_impl(rawbits); +} + +__SM_20_INTRINSICS_DECL__ void * __cvta_local_to_generic(size_t rawbits) +{ + return __nv_cvta_local_to_generic_impl(rawbits); +} + +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) +#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__) +#define __CVTA_PTR_64 1 +#endif + +__SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_grid_constant(const void *ptr) +{ +#if __CVTA_PTR_64 + unsigned long long ret; + asm("cvta.to.param.u64 %0, %1;" : "=l"(ret) : "l"(ptr)); +#else /* !__CVTA_PTR_64 */ + unsigned ret; + asm("cvta.to.param.u32 %0, %1;" : "=r"(ret) : "r"(ptr)); +#endif /* __CVTA_PTR_64 */ + return (size_t)ret; + +} + +__SM_20_INTRINSICS_DECL__ void * __cvta_grid_constant_to_generic(size_t rawbits) +{ + void *ret; +#if __CVTA_PTR_64 + unsigned long long in = rawbits; + asm("cvta.param.u64 %0, %1;" : "=l"(ret) : "l"(in)); +#else /* !__CVTA_PTR_64 */ + unsigned in = rawbits; + asm("cvta.param.u32 %0, %1;" : "=r"(ret) : "r"(in)); +#endif /* __CVTA_PTR_64 */ + return ret; +} +#undef __CVTA_PTR_64 +#endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) */ + + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_20_INTRINSICS_DECL__ + +#endif /* !__SM_20_INTRINSICS_HPP__ */ + diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.h new file mode 100644 index 0000000000000000000000000000000000000000..c2efc2e248350f5800f56e44348ebbc9b04b480c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.h @@ -0,0 +1,221 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_30_INTRINSICS_H__) +#define __SM_30_INTRINSICS_H__ + +#if defined(__CUDACC_RTC__) +#define __SM_30_INTRINSICS_DECL__ __device__ +#elif defined(_NVHPC_CUDA) +#define __SM_30_INTRINSICS_DECL__ extern __device__ __cudart_builtin__ +#else /* !__CUDACC_RTC__ */ +#define __SM_30_INTRINSICS_DECL__ static __device__ __inline__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/* Add !defined(_NVHPC_CUDA) to avoid empty function definition in CUDA + * C++ compiler where the macro __CUDA_ARCH__ is not defined. */ +#if !defined(__CUDA_ARCH__) && !defined(_NVHPC_CUDA) +#define __DEF_IF_HOST { } +#else /* !__CUDA_ARCH__ */ +#define __DEF_IF_HOST ; +#endif /* __CUDA_ARCH__ */ + + +/******************************************************************************* +* * +* Below are declarations of SM-3.0 intrinsics which are included as * +* source (instead of being built in to the compiler) * +* * +*******************************************************************************/ + +#if !defined warpSize && !defined __local_warpSize +#define warpSize 32 +#define __local_warpSize +#endif + +#if defined(_WIN32) +# define __DEPRECATED__(msg) __declspec(deprecated(msg)) +#elif (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 5 && !defined(__clang__)))) +# define __DEPRECATED__(msg) __attribute__((deprecated)) +#else +# define __DEPRECATED__(msg) __attribute__((deprecated(msg))) +#endif + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 +#define __WSB_DEPRECATION_MESSAGE(x) #x"() is deprecated in favor of "#x"_sync() and may be removed in a future release (Use -Wno-deprecated-declarations to suppress this warning)." +#elif defined(_NVHPC_CUDA) +#define __WSB_DEPRECATION_MESSAGE(x) #x"() is not valid on cc70 and above, and should be replaced with "#x"_sync()." +#endif + +__SM_30_INTRINSICS_DECL__ unsigned __fns(unsigned mask, unsigned base, int offset) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ void __barrier_sync(unsigned id) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ void __barrier_sync_count(unsigned id, unsigned cnt) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ void __syncwarp(unsigned mask=0xFFFFFFFF) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ int __all_sync(unsigned mask, int pred) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ int __any_sync(unsigned mask, int pred) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ int __uni_sync(unsigned mask, int pred) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned __ballot_sync(unsigned mask, int pred) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned __activemask() __DEF_IF_HOST + +// Warp register exchange (shuffle) intrinsics. +// Notes: +// a) Warp size is hardcoded to 32 here, because the compiler does not know +// the "warpSize" constant at this time +// b) we cannot map the float __shfl to the int __shfl because it'll mess with +// the register number (especially if you're doing two shfls to move a double). +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) int __shfl(int var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) unsigned int __shfl(unsigned int var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) int __shfl_up(int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) unsigned int __shfl_up(unsigned int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) int __shfl_down(int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) unsigned int __shfl_down(unsigned int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) int __shfl_xor(int var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) unsigned int __shfl_xor(unsigned int var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) float __shfl(float var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) float __shfl_up(float var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) float __shfl_down(float var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) float __shfl_xor(float var, int laneMask, int width=warpSize) __DEF_IF_HOST +#endif + +__SM_30_INTRINSICS_DECL__ int __shfl_sync(unsigned mask, int var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_sync(unsigned mask, unsigned int var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ int __shfl_up_sync(unsigned mask, int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_up_sync(unsigned mask, unsigned int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ int __shfl_down_sync(unsigned mask, int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_down_sync(unsigned mask, unsigned int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ int __shfl_xor_sync(unsigned mask, int var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_xor_sync(unsigned mask, unsigned int var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ float __shfl_sync(unsigned mask, float var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ float __shfl_up_sync(unsigned mask, float var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ float __shfl_down_sync(unsigned mask, float var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ float __shfl_xor_sync(unsigned mask, float var, int laneMask, int width=warpSize) __DEF_IF_HOST + +// 64-bits SHFL +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) unsigned long long __shfl(unsigned long long var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) long long __shfl(long long var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) long long __shfl_up(long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) unsigned long long __shfl_up(unsigned long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) long long __shfl_down(long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) unsigned long long __shfl_down(unsigned long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) long long __shfl_xor(long long var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) unsigned long long __shfl_xor(unsigned long long var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) double __shfl(double var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) double __shfl_up(double var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) double __shfl_down(double var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) double __shfl_xor(double var, int laneMask, int width=warpSize) __DEF_IF_HOST +#endif + +__SM_30_INTRINSICS_DECL__ long long __shfl_sync(unsigned mask, long long var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_sync(unsigned mask, unsigned long long var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ long long __shfl_up_sync(unsigned mask, long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_up_sync(unsigned mask, unsigned long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ long long __shfl_down_sync(unsigned mask, long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_down_sync(unsigned mask, unsigned long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ long long __shfl_xor_sync(unsigned mask, long long var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_xor_sync(unsigned mask, unsigned long long var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ double __shfl_sync(unsigned mask, double var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ double __shfl_up_sync(unsigned mask, double var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ double __shfl_down_sync(unsigned mask, double var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ double __shfl_xor_sync(unsigned mask, double var, int laneMask, int width=warpSize) __DEF_IF_HOST + +// long needs some help to choose between 32-bits and 64-bits +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) long __shfl(long var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) unsigned long __shfl(unsigned long var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) long __shfl_up(long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) unsigned long __shfl_up(unsigned long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) long __shfl_down(long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) unsigned long __shfl_down(unsigned long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) long __shfl_xor(long var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) unsigned long __shfl_xor(unsigned long var, int laneMask, int width=warpSize) __DEF_IF_HOST +#endif + +__SM_30_INTRINSICS_DECL__ long __shfl_sync(unsigned mask, long var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_sync(unsigned mask, unsigned long var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ long __shfl_up_sync(unsigned mask, long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_up_sync(unsigned mask, unsigned long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ long __shfl_down_sync(unsigned mask, long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_down_sync(unsigned mask, unsigned long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ long __shfl_xor_sync(unsigned mask, long var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_xor_sync(unsigned mask, unsigned long var, int laneMask, int width=warpSize) __DEF_IF_HOST + +#undef __DEPRECATED__ +#undef __WSB_DEPRECATION_MESSAGE + +#if defined(__local_warpSize) +#undef warpSize +#undef __local_warpSize +#endif + +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 300 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __DEF_IF_HOST +#undef __SM_30_INTRINSICS_DECL__ + +#if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__) +#include "sm_30_intrinsics.hpp" +#endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */ + +#endif /* !__SM_30_INTRINSICS_H__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.hpp b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.hpp new file mode 100644 index 0000000000000000000000000000000000000000..a5bcac5ee68c0cf547e4de7c08badf37106639dc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.hpp @@ -0,0 +1,604 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_30_INTRINSICS_HPP__) +#define __SM_30_INTRINSICS_HPP__ + +#if defined(__CUDACC_RTC__) +#define __SM_30_INTRINSICS_DECL__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __SM_30_INTRINSICS_DECL__ static __device__ __inline__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +// In here are intrinsics which are built in to the compiler. These may be +// referenced by intrinsic implementations from this file. +extern "C" +{ +} + +/******************************************************************************* +* * +* Below are implementations of SM-3.0 intrinsics which are included as * +* source (instead of being built in to the compiler) * +* * +*******************************************************************************/ + +#if !defined warpSize && !defined __local_warpSize +#define warpSize 32 +#define __local_warpSize +#endif + +__SM_30_INTRINSICS_DECL__ +unsigned __fns(unsigned mask, unsigned base, int offset) { + extern __device__ __device_builtin__ unsigned int __nvvm_fns(unsigned int mask, unsigned int base, int offset); + return __nvvm_fns(mask, base, offset); +} + +__SM_30_INTRINSICS_DECL__ +void __barrier_sync(unsigned id) { + extern __device__ __device_builtin__ void __nvvm_barrier_sync(unsigned id); + return __nvvm_barrier_sync(id); +} + +__SM_30_INTRINSICS_DECL__ +void __barrier_sync_count(unsigned id, unsigned cnt) { + extern __device__ __device_builtin__ void __nvvm_barrier_sync_cnt(unsigned id, unsigned cnt); + return __nvvm_barrier_sync_cnt(id, cnt); +} + +__SM_30_INTRINSICS_DECL__ +void __syncwarp(unsigned mask) { + extern __device__ __device_builtin__ void __nvvm_bar_warp_sync(unsigned mask); + return __nvvm_bar_warp_sync(mask); +} + +__SM_30_INTRINSICS_DECL__ +int __all_sync(unsigned mask, int pred) { + extern __device__ __device_builtin__ int __nvvm_vote_all_sync(unsigned int mask, int pred); + return __nvvm_vote_all_sync(mask, pred); +} + +__SM_30_INTRINSICS_DECL__ +int __any_sync(unsigned mask, int pred) { + extern __device__ __device_builtin__ int __nvvm_vote_any_sync(unsigned int mask, int pred); + return __nvvm_vote_any_sync(mask, pred); +} + +__SM_30_INTRINSICS_DECL__ +int __uni_sync(unsigned mask, int pred) { + extern __device__ __device_builtin__ int __nvvm_vote_uni_sync(unsigned int mask, int pred); + return __nvvm_vote_uni_sync(mask, pred); +} + +__SM_30_INTRINSICS_DECL__ +unsigned __ballot_sync(unsigned mask, int pred) { + extern __device__ __device_builtin__ unsigned int __nvvm_vote_ballot_sync(unsigned int mask, int pred); + return __nvvm_vote_ballot_sync(mask, pred); +} + +__SM_30_INTRINSICS_DECL__ +unsigned __activemask() { + unsigned ret; + asm volatile ("activemask.b32 %0;" : "=r"(ret)); + return ret; +} + +// These are removed starting with compute_70 and onwards +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 + +__SM_30_INTRINSICS_DECL__ int __shfl(int var, int srcLane, int width) { + int ret; + int c = ((warpSize-width) << 8) | 0x1f; + asm volatile ("shfl.idx.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(var), "r"(srcLane), "r"(c)); + return ret; +} + +__SM_30_INTRINSICS_DECL__ unsigned int __shfl(unsigned int var, int srcLane, int width) { + return (unsigned int) __shfl((int)var, srcLane, width); +} + +__SM_30_INTRINSICS_DECL__ int __shfl_up(int var, unsigned int delta, int width) { + int ret; + int c = (warpSize-width) << 8; + asm volatile ("shfl.up.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(var), "r"(delta), "r"(c)); + return ret; +} + +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_up(unsigned int var, unsigned int delta, int width) { + return (unsigned int) __shfl_up((int)var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ int __shfl_down(int var, unsigned int delta, int width) { + int ret; + int c = ((warpSize-width) << 8) | 0x1f; + asm volatile ("shfl.down.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(var), "r"(delta), "r"(c)); + return ret; +} + +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_down(unsigned int var, unsigned int delta, int width) { + return (unsigned int) __shfl_down((int)var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ int __shfl_xor(int var, int laneMask, int width) { + int ret; + int c = ((warpSize-width) << 8) | 0x1f; + asm volatile ("shfl.bfly.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(var), "r"(laneMask), "r"(c)); + return ret; +} + +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_xor(unsigned int var, int laneMask, int width) { + return (unsigned int) __shfl_xor((int)var, laneMask, width); +} + +__SM_30_INTRINSICS_DECL__ float __shfl(float var, int srcLane, int width) { + float ret; + int c; + c = ((warpSize-width) << 8) | 0x1f; + asm volatile ("shfl.idx.b32 %0, %1, %2, %3;" : "=f"(ret) : "f"(var), "r"(srcLane), "r"(c)); + return ret; +} + +__SM_30_INTRINSICS_DECL__ float __shfl_up(float var, unsigned int delta, int width) { + float ret; + int c; + c = (warpSize-width) << 8; + asm volatile ("shfl.up.b32 %0, %1, %2, %3;" : "=f"(ret) : "f"(var), "r"(delta), "r"(c)); + return ret; +} + +__SM_30_INTRINSICS_DECL__ float __shfl_down(float var, unsigned int delta, int width) { + float ret; + int c; + c = ((warpSize-width) << 8) | 0x1f; + asm volatile ("shfl.down.b32 %0, %1, %2, %3;" : "=f"(ret) : "f"(var), "r"(delta), "r"(c)); + return ret; +} + +__SM_30_INTRINSICS_DECL__ float __shfl_xor(float var, int laneMask, int width) { + float ret; + int c; + c = ((warpSize-width) << 8) | 0x1f; + asm volatile ("shfl.bfly.b32 %0, %1, %2, %3;" : "=f"(ret) : "f"(var), "r"(laneMask), "r"(c)); + return ret; +} + +// 64-bits SHFL + +__SM_30_INTRINSICS_DECL__ long long __shfl(long long var, int srcLane, int width) { + int lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); + hi = __shfl(hi, srcLane, width); + lo = __shfl(lo, srcLane, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl(unsigned long long var, int srcLane, int width) { + return (unsigned long long) __shfl((long long) var, srcLane, width); +} + +__SM_30_INTRINSICS_DECL__ long long __shfl_up(long long var, unsigned int delta, int width) { + int lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); + hi = __shfl_up(hi, delta, width); + lo = __shfl_up(lo, delta, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_up(unsigned long long var, unsigned int delta, int width) { + return (unsigned long long) __shfl_up((long long) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ long long __shfl_down(long long var, unsigned int delta, int width) { + int lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); + hi = __shfl_down(hi, delta, width); + lo = __shfl_down(lo, delta, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_down(unsigned long long var, unsigned int delta, int width) { + return (unsigned long long) __shfl_down((long long) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ long long __shfl_xor(long long var, int laneMask, int width) { + int lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); + hi = __shfl_xor(hi, laneMask, width); + lo = __shfl_xor(lo, laneMask, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_xor(unsigned long long var, int laneMask, int width) { + return (unsigned long long) __shfl_xor((long long) var, laneMask, width); +} + +__SM_30_INTRINSICS_DECL__ double __shfl(double var, int srcLane, int width) { + unsigned lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); + hi = __shfl(hi, srcLane, width); + lo = __shfl(lo, srcLane, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ double __shfl_up(double var, unsigned int delta, int width) { + unsigned lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); + hi = __shfl_up(hi, delta, width); + lo = __shfl_up(lo, delta, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ double __shfl_down(double var, unsigned int delta, int width) { + unsigned lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); + hi = __shfl_down(hi, delta, width); + lo = __shfl_down(lo, delta, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ double __shfl_xor(double var, int laneMask, int width) { + unsigned lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); + hi = __shfl_xor(hi, laneMask, width); + lo = __shfl_xor(lo, laneMask, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ long __shfl(long var, int srcLane, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl((long long) var, srcLane, width) : + __shfl((int) var, srcLane, width); +} + +__SM_30_INTRINSICS_DECL__ unsigned long __shfl(unsigned long var, int srcLane, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl((unsigned long long) var, srcLane, width) : + __shfl((unsigned int) var, srcLane, width); +} + +__SM_30_INTRINSICS_DECL__ long __shfl_up(long var, unsigned int delta, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_up((long long) var, delta, width) : + __shfl_up((int) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_up(unsigned long var, unsigned int delta, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_up((unsigned long long) var, delta, width) : + __shfl_up((unsigned int) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ long __shfl_down(long var, unsigned int delta, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_down((long long) var, delta, width) : + __shfl_down((int) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_down(unsigned long var, unsigned int delta, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_down((unsigned long long) var, delta, width) : + __shfl_down((unsigned int) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ long __shfl_xor(long var, int laneMask, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_xor((long long) var, laneMask, width) : + __shfl_xor((int) var, laneMask, width); +} + +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_xor(unsigned long var, int laneMask, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_xor((unsigned long long) var, laneMask, width) : + __shfl_xor((unsigned int) var, laneMask, width); +} + +#endif /* defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 */ + +// Warp register exchange (shuffle) intrinsics. +// Notes: +// a) Warp size is hardcoded to 32 here, because the compiler does not know +// the "warpSize" constant at this time +// b) we cannot map the float __shfl to the int __shfl because it'll mess with +// the register number (especially if you're doing two shfls to move a double). +__SM_30_INTRINSICS_DECL__ int __shfl_sync(unsigned mask, int var, int srcLane, int width) { + extern __device__ __device_builtin__ unsigned __nvvm_shfl_idx_sync(unsigned mask, unsigned a, unsigned b, unsigned c); + int ret; + int c = ((warpSize-width) << 8) | 0x1f; + ret = __nvvm_shfl_idx_sync(mask, var, srcLane, c); + return ret; +} + +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_sync(unsigned mask, unsigned int var, int srcLane, int width) { + return (unsigned int) __shfl_sync(mask, (int)var, srcLane, width); +} + +__SM_30_INTRINSICS_DECL__ int __shfl_up_sync(unsigned mask, int var, unsigned int delta, int width) { + extern __device__ __device_builtin__ unsigned __nvvm_shfl_up_sync(unsigned mask, unsigned a, unsigned b, unsigned c); + int ret; + int c = (warpSize-width) << 8; + ret = __nvvm_shfl_up_sync(mask, var, delta, c); + return ret; +} + +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_up_sync(unsigned mask, unsigned int var, unsigned int delta, int width) { + return (unsigned int) __shfl_up_sync(mask, (int)var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ int __shfl_down_sync(unsigned mask, int var, unsigned int delta, int width) { + extern __device__ __device_builtin__ unsigned __nvvm_shfl_down_sync(unsigned mask, unsigned a, unsigned b, unsigned c); + int ret; + int c = ((warpSize-width) << 8) | 0x1f; + ret = __nvvm_shfl_down_sync(mask, var, delta, c); + return ret; +} + +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_down_sync(unsigned mask, unsigned int var, unsigned int delta, int width) { + return (unsigned int) __shfl_down_sync(mask, (int)var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ int __shfl_xor_sync(unsigned mask, int var, int laneMask, int width) { + extern __device__ __device_builtin__ unsigned __nvvm_shfl_bfly_sync(unsigned mask, unsigned a, unsigned b, unsigned c); + int ret; + int c = ((warpSize-width) << 8) | 0x1f; + ret = __nvvm_shfl_bfly_sync(mask, var, laneMask, c); + return ret; +} + +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_xor_sync(unsigned mask, unsigned int var, int laneMask, int width) { + return (unsigned int) __shfl_xor_sync(mask, (int)var, laneMask, width); +} + +__SM_30_INTRINSICS_DECL__ float __shfl_sync(unsigned mask, float var, int srcLane, int width) { + extern __device__ __device_builtin__ unsigned __nvvm_shfl_idx_sync(unsigned mask, unsigned a, unsigned b, unsigned c); + int ret; + int c; + c = ((warpSize-width) << 8) | 0x1f; + ret = __nvvm_shfl_idx_sync(mask, __float_as_int(var), srcLane, c); + return __int_as_float(ret); +} + +__SM_30_INTRINSICS_DECL__ float __shfl_up_sync(unsigned mask, float var, unsigned int delta, int width) { + extern __device__ __device_builtin__ unsigned __nvvm_shfl_up_sync(unsigned mask, unsigned a, unsigned b, unsigned c); + int ret; + int c; + c = (warpSize-width) << 8; + ret = __nvvm_shfl_up_sync(mask, __float_as_int(var), delta, c); + return __int_as_float(ret); +} + +__SM_30_INTRINSICS_DECL__ float __shfl_down_sync(unsigned mask, float var, unsigned int delta, int width) { + extern __device__ __device_builtin__ unsigned __nvvm_shfl_down_sync(unsigned mask, unsigned a, unsigned b, unsigned c); + int ret; + int c; + c = ((warpSize-width) << 8) | 0x1f; + ret = __nvvm_shfl_down_sync(mask, __float_as_int(var), delta, c); + return __int_as_float(ret); +} + +__SM_30_INTRINSICS_DECL__ float __shfl_xor_sync(unsigned mask, float var, int laneMask, int width) { + extern __device__ __device_builtin__ unsigned __nvvm_shfl_bfly_sync(unsigned mask, unsigned a, unsigned b, unsigned c); + int ret; + int c; + c = ((warpSize-width) << 8) | 0x1f; + ret = __nvvm_shfl_bfly_sync(mask, __float_as_int(var), laneMask, c); + return __int_as_float(ret); +} + +// 64-bits SHFL +__SM_30_INTRINSICS_DECL__ long long __shfl_sync(unsigned mask, long long var, int srcLane, int width) { + int lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); + hi = __shfl_sync(mask, hi, srcLane, width); + lo = __shfl_sync(mask, lo, srcLane, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_sync(unsigned mask, unsigned long long var, int srcLane, int width) { + return (unsigned long long) __shfl_sync(mask, (long long) var, srcLane, width); +} + +__SM_30_INTRINSICS_DECL__ long long __shfl_up_sync(unsigned mask, long long var, unsigned int delta, int width) { + int lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); + hi = __shfl_up_sync(mask, hi, delta, width); + lo = __shfl_up_sync(mask, lo, delta, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_up_sync(unsigned mask, unsigned long long var, unsigned int delta, int width) { + return (unsigned long long) __shfl_up_sync(mask, (long long) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ long long __shfl_down_sync(unsigned mask, long long var, unsigned int delta, int width) { + int lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); + hi = __shfl_down_sync(mask, hi, delta, width); + lo = __shfl_down_sync(mask, lo, delta, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_down_sync(unsigned mask, unsigned long long var, unsigned int delta, int width) { + return (unsigned long long) __shfl_down_sync(mask, (long long) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ long long __shfl_xor_sync(unsigned mask, long long var, int laneMask, int width) { + int lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); + hi = __shfl_xor_sync(mask, hi, laneMask, width); + lo = __shfl_xor_sync(mask, lo, laneMask, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_xor_sync(unsigned mask, unsigned long long var, int laneMask, int width) { + return (unsigned long long) __shfl_xor_sync(mask, (long long) var, laneMask, width); +} + +__SM_30_INTRINSICS_DECL__ double __shfl_sync(unsigned mask, double var, int srcLane, int width) { + unsigned lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); + hi = __shfl_sync(mask, hi, srcLane, width); + lo = __shfl_sync(mask, lo, srcLane, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ double __shfl_up_sync(unsigned mask, double var, unsigned int delta, int width) { + unsigned lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); + hi = __shfl_up_sync(mask, hi, delta, width); + lo = __shfl_up_sync(mask, lo, delta, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ double __shfl_down_sync(unsigned mask, double var, unsigned int delta, int width) { + unsigned lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); + hi = __shfl_down_sync(mask, hi, delta, width); + lo = __shfl_down_sync(mask, lo, delta, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ double __shfl_xor_sync(unsigned mask, double var, int laneMask, int width) { + unsigned lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); + hi = __shfl_xor_sync(mask, hi, laneMask, width); + lo = __shfl_xor_sync(mask, lo, laneMask, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi)); + return var; +} + +// long needs some help to choose between 32-bits and 64-bits + +__SM_30_INTRINSICS_DECL__ long __shfl_sync(unsigned mask, long var, int srcLane, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_sync(mask, (long long) var, srcLane, width) : + __shfl_sync(mask, (int) var, srcLane, width); +} + +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_sync(unsigned mask, unsigned long var, int srcLane, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_sync(mask, (unsigned long long) var, srcLane, width) : + __shfl_sync(mask, (unsigned int) var, srcLane, width); +} + +__SM_30_INTRINSICS_DECL__ long __shfl_up_sync(unsigned mask, long var, unsigned int delta, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_up_sync(mask, (long long) var, delta, width) : + __shfl_up_sync(mask, (int) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_up_sync(unsigned mask, unsigned long var, unsigned int delta, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_up_sync(mask, (unsigned long long) var, delta, width) : + __shfl_up_sync(mask, (unsigned int) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ long __shfl_down_sync(unsigned mask, long var, unsigned int delta, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_down_sync(mask, (long long) var, delta, width) : + __shfl_down_sync(mask, (int) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_down_sync(unsigned mask, unsigned long var, unsigned int delta, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_down_sync(mask, (unsigned long long) var, delta, width) : + __shfl_down_sync(mask, (unsigned int) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ long __shfl_xor_sync(unsigned mask, long var, int laneMask, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_xor_sync(mask, (long long) var, laneMask, width) : + __shfl_xor_sync(mask, (int) var, laneMask, width); +} + +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_xor_sync(unsigned mask, unsigned long var, int laneMask, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_xor_sync(mask, (unsigned long long) var, laneMask, width) : + __shfl_xor_sync(mask, (unsigned int) var, laneMask, width); +} + +#if defined(__local_warpSize) +#undef warpSize +#undef __local_warpSize +#endif + +#endif /* _NVHPC_CUDA || !__CUDA_ARCH__ || __CUDA_ARCH__ >= 300 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_30_INTRINSICS_DECL__ + +#endif /* !__SM_30_INTRINSICS_HPP__ */ + diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_atomic_functions.hpp b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_atomic_functions.hpp new file mode 100644 index 0000000000000000000000000000000000000000..9b8ff847157cc366361319b7932a8ffe0f1fd30f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_atomic_functions.hpp @@ -0,0 +1,134 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 35.235 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.35.235 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_32_ATOMIC_FUNCTIONS_HPP__) +#define __SM_32_ATOMIC_FUNCTIONS_HPP__ + +#if defined(__CUDACC_RTC__) +#define __SM_32_ATOMIC_FUNCTIONS_DECL__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __SM_32_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicMin(long long *address, long long val) +{ + return __illAtomicMin(address, val); +} + +__SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicMax(long long *address, long long val) +{ + return __illAtomicMax(address, val); +} + +__SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicAnd(long long *address, long long val) +{ + return __llAtomicAnd(address, val); +} + +__SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicOr(long long *address, long long val) +{ + return __llAtomicOr(address, val); +} + +__SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicXor(long long *address, long long val) +{ + return __llAtomicXor(address, val); +} + +__SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicMin(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicMin(address, val); +} + +__SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicMax(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicMax(address, val); +} + +__SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicAnd(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicAnd(address, val); +} + +__SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicOr(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicOr(address, val); +} + +__SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicXor(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicXor(address, val); +} + +#endif /* _NVHPC_CUDA || !__CUDA_ARCH__ || __CUDA_ARCH__ >= 320 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_32_ATOMIC_FUNCTIONS_DECL__ + +#endif /* !__SM_32_ATOMIC_FUNCTIONS_HPP__ */ + diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_intrinsics.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_intrinsics.h new file mode 100644 index 0000000000000000000000000000000000000000..6042d763e2d4df602bf2f2aa84e2a0fb87dd8760 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_intrinsics.h @@ -0,0 +1,510 @@ +/* + * Copyright 1993-2020 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_32_INTRINSICS_H__) +#define __SM_32_INTRINSICS_H__ + +#if defined(__CUDACC_RTC__) +#define __SM_32_INTRINSICS_DECL__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __SM_32_INTRINSICS_DECL__ static __device__ __inline__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +#if defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA) +#define __DEF_IF_HOST ; +#else /* defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA) */ +#define __DEF_IF_HOST { } +#endif /* defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA) */ + + +/******************************************************************************* +* * +* Below are declarations of SM-3.5 intrinsics which are included as * +* source (instead of being built in to the compiler) * +* * +*******************************************************************************/ +/****************************************************************************** + * __ldg * + ******************************************************************************/ +__SM_32_INTRINSICS_DECL__ long __ldg(const long *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ unsigned long __ldg(const unsigned long *ptr) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ char __ldg(const char *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ signed char __ldg(const signed char *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ short __ldg(const short *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ int __ldg(const int *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ long long __ldg(const long long *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ char2 __ldg(const char2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ char4 __ldg(const char4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ short2 __ldg(const short2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ short4 __ldg(const short4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ int2 __ldg(const int2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ int4 __ldg(const int4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ longlong2 __ldg(const longlong2 *ptr) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ unsigned char __ldg(const unsigned char *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ unsigned short __ldg(const unsigned short *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ unsigned int __ldg(const unsigned int *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ unsigned long long __ldg(const unsigned long long *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ uchar2 __ldg(const uchar2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ uchar4 __ldg(const uchar4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ ushort2 __ldg(const ushort2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ ushort4 __ldg(const ushort4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ uint2 __ldg(const uint2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ uint4 __ldg(const uint4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ ulonglong2 __ldg(const ulonglong2 *ptr) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ float __ldg(const float *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ double __ldg(const double *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ float2 __ldg(const float2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ float4 __ldg(const float4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ double2 __ldg(const double2 *ptr) __DEF_IF_HOST +/****************************************************************************** + * __ldcg * + ******************************************************************************/ +__SM_32_INTRINSICS_DECL__ long __ldcg(const long *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ unsigned long __ldcg(const unsigned long *ptr) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ char __ldcg(const char *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ signed char __ldcg(const signed char *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ short __ldcg(const short *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ int __ldcg(const int *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ long long __ldcg(const long long *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ char2 __ldcg(const char2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ char4 __ldcg(const char4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ short2 __ldcg(const short2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ short4 __ldcg(const short4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ int2 __ldcg(const int2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ int4 __ldcg(const int4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ longlong2 __ldcg(const longlong2 *ptr) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ unsigned char __ldcg(const unsigned char *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ unsigned short __ldcg(const unsigned short *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ unsigned int __ldcg(const unsigned int *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ unsigned long long __ldcg(const unsigned long long *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ uchar2 __ldcg(const uchar2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ uchar4 __ldcg(const uchar4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ ushort2 __ldcg(const ushort2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ ushort4 __ldcg(const ushort4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ uint2 __ldcg(const uint2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ uint4 __ldcg(const uint4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ ulonglong2 __ldcg(const ulonglong2 *ptr) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ float __ldcg(const float *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ double __ldcg(const double *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ float2 __ldcg(const float2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ float4 __ldcg(const float4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ double2 __ldcg(const double2 *ptr) __DEF_IF_HOST +/****************************************************************************** + * __ldca * + ******************************************************************************/ +__SM_32_INTRINSICS_DECL__ long __ldca(const long *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ unsigned long __ldca(const unsigned long *ptr) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ char __ldca(const char *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ signed char __ldca(const signed char *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ short __ldca(const short *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ int __ldca(const int *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ long long __ldca(const long long *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ char2 __ldca(const char2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ char4 __ldca(const char4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ short2 __ldca(const short2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ short4 __ldca(const short4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ int2 __ldca(const int2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ int4 __ldca(const int4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ longlong2 __ldca(const longlong2 *ptr) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ unsigned char __ldca(const unsigned char *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ unsigned short __ldca(const unsigned short *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ unsigned int __ldca(const unsigned int *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ unsigned long long __ldca(const unsigned long long *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ uchar2 __ldca(const uchar2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ uchar4 __ldca(const uchar4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ ushort2 __ldca(const ushort2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ ushort4 __ldca(const ushort4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ uint2 __ldca(const uint2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ uint4 __ldca(const uint4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ ulonglong2 __ldca(const ulonglong2 *ptr) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ float __ldca(const float *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ double __ldca(const double *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ float2 __ldca(const float2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ float4 __ldca(const float4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ double2 __ldca(const double2 *ptr) __DEF_IF_HOST +/****************************************************************************** + * __ldcs * + ******************************************************************************/ +__SM_32_INTRINSICS_DECL__ long __ldcs(const long *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ unsigned long __ldcs(const unsigned long *ptr) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ char __ldcs(const char *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ signed char __ldcs(const signed char *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ short __ldcs(const short *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ int __ldcs(const int *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ long long __ldcs(const long long *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ char2 __ldcs(const char2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ char4 __ldcs(const char4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ short2 __ldcs(const short2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ short4 __ldcs(const short4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ int2 __ldcs(const int2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ int4 __ldcs(const int4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ longlong2 __ldcs(const longlong2 *ptr) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ unsigned char __ldcs(const unsigned char *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ unsigned short __ldcs(const unsigned short *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ unsigned int __ldcs(const unsigned int *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ unsigned long long __ldcs(const unsigned long long *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ uchar2 __ldcs(const uchar2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ uchar4 __ldcs(const uchar4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ ushort2 __ldcs(const ushort2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ ushort4 __ldcs(const ushort4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ uint2 __ldcs(const uint2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ uint4 __ldcs(const uint4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ ulonglong2 __ldcs(const ulonglong2 *ptr) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ float __ldcs(const float *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ double __ldcs(const double *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ float2 __ldcs(const float2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ float4 __ldcs(const float4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ double2 __ldcs(const double2 *ptr) __DEF_IF_HOST +/****************************************************************************** + * __ldlu * + ******************************************************************************/ +__SM_32_INTRINSICS_DECL__ long __ldlu(const long *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ unsigned long __ldlu(const unsigned long *ptr) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ char __ldlu(const char *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ signed char __ldlu(const signed char *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ short __ldlu(const short *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ int __ldlu(const int *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ long long __ldlu(const long long *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ char2 __ldlu(const char2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ char4 __ldlu(const char4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ short2 __ldlu(const short2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ short4 __ldlu(const short4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ int2 __ldlu(const int2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ int4 __ldlu(const int4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ longlong2 __ldlu(const longlong2 *ptr) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ unsigned char __ldlu(const unsigned char *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ unsigned short __ldlu(const unsigned short *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ unsigned int __ldlu(const unsigned int *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ unsigned long long __ldlu(const unsigned long long *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ uchar2 __ldlu(const uchar2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ uchar4 __ldlu(const uchar4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ ushort2 __ldlu(const ushort2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ ushort4 __ldlu(const ushort4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ uint2 __ldlu(const uint2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ uint4 __ldlu(const uint4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ ulonglong2 __ldlu(const ulonglong2 *ptr) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ float __ldlu(const float *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ double __ldlu(const double *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ float2 __ldlu(const float2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ float4 __ldlu(const float4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ double2 __ldlu(const double2 *ptr) __DEF_IF_HOST +/****************************************************************************** + * __ldcv * + ******************************************************************************/ +__SM_32_INTRINSICS_DECL__ long __ldcv(const long *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ unsigned long __ldcv(const unsigned long *ptr) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ char __ldcv(const char *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ signed char __ldcv(const signed char *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ short __ldcv(const short *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ int __ldcv(const int *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ long long __ldcv(const long long *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ char2 __ldcv(const char2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ char4 __ldcv(const char4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ short2 __ldcv(const short2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ short4 __ldcv(const short4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ int2 __ldcv(const int2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ int4 __ldcv(const int4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ longlong2 __ldcv(const longlong2 *ptr) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ unsigned char __ldcv(const unsigned char *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ unsigned short __ldcv(const unsigned short *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ unsigned int __ldcv(const unsigned int *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ unsigned long long __ldcv(const unsigned long long *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ uchar2 __ldcv(const uchar2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ uchar4 __ldcv(const uchar4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ ushort2 __ldcv(const ushort2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ ushort4 __ldcv(const ushort4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ uint2 __ldcv(const uint2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ uint4 __ldcv(const uint4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ ulonglong2 __ldcv(const ulonglong2 *ptr) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ float __ldcv(const float *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ double __ldcv(const double *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ float2 __ldcv(const float2 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ float4 __ldcv(const float4 *ptr) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ double2 __ldcv(const double2 *ptr) __DEF_IF_HOST +/****************************************************************************** + * __stwb * + ******************************************************************************/ +__SM_32_INTRINSICS_DECL__ void __stwb(long *ptr, long value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(unsigned long *ptr, unsigned long value) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ void __stwb(char *ptr, char value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(signed char *ptr, signed char value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(short *ptr, short value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(int *ptr, int value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(long long *ptr, long long value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(char2 *ptr, char2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(char4 *ptr, char4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(short2 *ptr, short2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(short4 *ptr, short4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(int2 *ptr, int2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(int4 *ptr, int4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(longlong2 *ptr, longlong2 value) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ void __stwb(unsigned char *ptr, unsigned char value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(unsigned short *ptr, unsigned short value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(unsigned int *ptr, unsigned int value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(unsigned long long *ptr, unsigned long long value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(uchar2 *ptr, uchar2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(uchar4 *ptr, uchar4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(ushort2 *ptr, ushort2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(ushort4 *ptr, ushort4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(uint2 *ptr, uint2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(uint4 *ptr, uint4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(ulonglong2 *ptr, ulonglong2 value) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ void __stwb(float *ptr, float value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(double *ptr, double value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(float2 *ptr, float2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(float4 *ptr, float4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwb(double2 *ptr, double2 value) __DEF_IF_HOST +/****************************************************************************** + * __stcg * + ******************************************************************************/ +__SM_32_INTRINSICS_DECL__ void __stcg(long *ptr, long value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(unsigned long *ptr, unsigned long value) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ void __stcg(char *ptr, char value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(signed char *ptr, signed char value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(short *ptr, short value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(int *ptr, int value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(long long *ptr, long long value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(char2 *ptr, char2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(char4 *ptr, char4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(short2 *ptr, short2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(short4 *ptr, short4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(int2 *ptr, int2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(int4 *ptr, int4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(longlong2 *ptr, longlong2 value) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ void __stcg(unsigned char *ptr, unsigned char value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(unsigned short *ptr, unsigned short value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(unsigned int *ptr, unsigned int value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(unsigned long long *ptr, unsigned long long value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(uchar2 *ptr, uchar2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(uchar4 *ptr, uchar4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(ushort2 *ptr, ushort2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(ushort4 *ptr, ushort4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(uint2 *ptr, uint2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(uint4 *ptr, uint4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(ulonglong2 *ptr, ulonglong2 value) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ void __stcg(float *ptr, float value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(double *ptr, double value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(float2 *ptr, float2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(float4 *ptr, float4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcg(double2 *ptr, double2 value) __DEF_IF_HOST +/****************************************************************************** + * __stcs * + ******************************************************************************/ +__SM_32_INTRINSICS_DECL__ void __stcs(long *ptr, long value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(unsigned long *ptr, unsigned long value) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ void __stcs(char *ptr, char value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(signed char *ptr, signed char value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(short *ptr, short value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(int *ptr, int value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(long long *ptr, long long value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(char2 *ptr, char2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(char4 *ptr, char4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(short2 *ptr, short2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(short4 *ptr, short4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(int2 *ptr, int2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(int4 *ptr, int4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(longlong2 *ptr, longlong2 value) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ void __stcs(unsigned char *ptr, unsigned char value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(unsigned short *ptr, unsigned short value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(unsigned int *ptr, unsigned int value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(unsigned long long *ptr, unsigned long long value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(uchar2 *ptr, uchar2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(uchar4 *ptr, uchar4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(ushort2 *ptr, ushort2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(ushort4 *ptr, ushort4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(uint2 *ptr, uint2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(uint4 *ptr, uint4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(ulonglong2 *ptr, ulonglong2 value) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ void __stcs(float *ptr, float value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(double *ptr, double value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(float2 *ptr, float2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(float4 *ptr, float4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stcs(double2 *ptr, double2 value) __DEF_IF_HOST +/****************************************************************************** + * __stwt * + ******************************************************************************/ +__SM_32_INTRINSICS_DECL__ void __stwt(long *ptr, long value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(unsigned long *ptr, unsigned long value) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ void __stwt(char *ptr, char value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(signed char *ptr, signed char value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(short *ptr, short value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(int *ptr, int value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(long long *ptr, long long value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(char2 *ptr, char2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(char4 *ptr, char4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(short2 *ptr, short2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(short4 *ptr, short4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(int2 *ptr, int2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(int4 *ptr, int4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(longlong2 *ptr, longlong2 value) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ void __stwt(unsigned char *ptr, unsigned char value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(unsigned short *ptr, unsigned short value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(unsigned int *ptr, unsigned int value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(unsigned long long *ptr, unsigned long long value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(uchar2 *ptr, uchar2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(uchar4 *ptr, uchar4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(ushort2 *ptr, ushort2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(ushort4 *ptr, ushort4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(uint2 *ptr, uint2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(uint4 *ptr, uint4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(ulonglong2 *ptr, ulonglong2 value) __DEF_IF_HOST + +__SM_32_INTRINSICS_DECL__ void __stwt(float *ptr, float value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(double *ptr, double value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(float2 *ptr, float2 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(float4 *ptr, float4 value) __DEF_IF_HOST +__SM_32_INTRINSICS_DECL__ void __stwt(double2 *ptr, double2 value) __DEF_IF_HOST + + +// SHF is the "funnel shift" operation - an accelerated left/right shift with carry +// operating on 64-bit quantities, which are concatenations of two 32-bit registers. + +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Concatenate \p hi : \p lo, shift left by \p shift & 31 bits, return the most significant 32 bits. + * + * Shift the 64-bit value formed by concatenating argument \p lo and \p hi left by the amount specified by the argument \p shift. + * Argument \p lo holds bits 31:0 and argument \p hi holds bits 63:32 of the 64-bit source value. + * The source is shifted left by the wrapped value of \p shift (\p shift & 31). + * The most significant 32-bits of the result are returned. + * + * \return Returns the most significant 32 bits of the shifted 64-bit value. + */ +__SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_l(unsigned int lo, unsigned int hi, unsigned int shift) __DEF_IF_HOST +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Concatenate \p hi : \p lo, shift left by min(\p shift, 32) bits, return the most significant 32 bits. + * + * Shift the 64-bit value formed by concatenating argument \p lo and \p hi left by the amount specified by the argument \p shift. + * Argument \p lo holds bits 31:0 and argument \p hi holds bits 63:32 of the 64-bit source value. + * The source is shifted left by the clamped value of \p shift (min(\p shift, 32)). + * The most significant 32-bits of the result are returned. + * + * \return Returns the most significant 32 bits of the shifted 64-bit value. + */ +__SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_lc(unsigned int lo, unsigned int hi, unsigned int shift) __DEF_IF_HOST + +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Concatenate \p hi : \p lo, shift right by \p shift & 31 bits, return the least significant 32 bits. + * + * Shift the 64-bit value formed by concatenating argument \p lo and \p hi right by the amount specified by the argument \p shift. + * Argument \p lo holds bits 31:0 and argument \p hi holds bits 63:32 of the 64-bit source value. + * The source is shifted right by the wrapped value of \p shift (\p shift & 31). + * The least significant 32-bits of the result are returned. + * + * \return Returns the least significant 32 bits of the shifted 64-bit value. + */ +__SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_r(unsigned int lo, unsigned int hi, unsigned int shift) __DEF_IF_HOST +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Concatenate \p hi : \p lo, shift right by min(\p shift, 32) bits, return the least significant 32 bits. + * + * Shift the 64-bit value formed by concatenating argument \p lo and \p hi right by the amount specified by the argument \p shift. + * Argument \p lo holds bits 31:0 and argument \p hi holds bits 63:32 of the 64-bit source value. + * The source is shifted right by the clamped value of \p shift (min(\p shift, 32)). + * The least significant 32-bits of the result are returned. + * + * \return Returns the least significant 32 bits of the shifted 64-bit value. + */ +__SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_rc(unsigned int lo, unsigned int hi, unsigned int shift) __DEF_IF_HOST + + +#endif /* _NVHPC_CUDA || !__CUDA_ARCH__ || __CUDA_ARCH__ >= 320 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_32_INTRINSICS_DECL__ + +#if !defined(__CUDACC_RTC__) && (defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA)) +#include "sm_32_intrinsics.hpp" +#endif /* !defined(__CUDACC_RTC__) && (defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA)) */ + +#endif /* !__SM_32_INTRINSICS_H__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_35_atomic_functions.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_35_atomic_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..c8961079aeac4c9e73a7c2825cf9ea10b171af09 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_35_atomic_functions.h @@ -0,0 +1,58 @@ +/* + * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 35.235 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.35.235 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_35_ATOMIC_FUNCTIONS_H__) +#define __SM_35_ATOMIC_FUNCTIONS_H__ + +/******************************************************************************* +* All sm_35 atomics are supported by sm_32 so simply include its header file * +*******************************************************************************/ +#include "sm_32_atomic_functions.h" + +#endif /* !__SM_35_ATOMIC_FUNCTIONS_H__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_35_intrinsics.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_35_intrinsics.h new file mode 100644 index 0000000000000000000000000000000000000000..da1e823a24171ed1ca9414955c6c68159a4411f5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_35_intrinsics.h @@ -0,0 +1,116 @@ +/* + + * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. + + * + + * NOTICE TO LICENSEE: + + * + + * This source code and/or documentation ("Licensed Deliverables") are + + * subject to NVIDIA intellectual property rights under U.S. and + + * international Copyright laws. + + * + + * These Licensed Deliverables contained herein is PROPRIETARY and + + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + + * conditions of a form of NVIDIA software license agreement by and + + * between NVIDIA and Licensee ("License Agreement") or electronically + + * accepted by Licensee. Notwithstanding any terms or conditions to + + * the contrary in the License Agreement, reproduction or disclosure + + * of the Licensed Deliverables to any third party without the express + + * written consent of NVIDIA is prohibited. + + * + + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + + * OF THESE LICENSED DELIVERABLES. + + * + + * U.S. Government End Users. These Licensed Deliverables are a + + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + + * 1995), consisting of "commercial computer software" and "commercial + + * computer software documentation" as such terms are used in 48 + + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + + * U.S. Government End Users acquire the Licensed Deliverables with + + * only those rights set forth herein. + + * + + * Any use of the Licensed Deliverables in individual and commercial + + * software must include, in the user documentation and internal + + * comments to the code, the above Disclaimer and U.S. Government End + + * Users Notice. + + */ + + + +#if !defined(__SM_35_INTRINSICS_H__) + +#define __SM_35_INTRINSICS_H__ + + + +/********************************************************************************** + +* All sm_35 intrinsics are supported by sm_32 so simply include its header file * + +**********************************************************************************/ + +#include "sm_32_intrinsics.h" + + + +#endif /* !__SM_35_INTRINSICS_H__ */ + diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..f2f6da7032f66f3988868ad769d7533d15def5a8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.h @@ -0,0 +1,543 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_60_ATOMIC_FUNCTIONS_H__) +#define __SM_60_ATOMIC_FUNCTIONS_H__ + + +#if defined(__CUDACC_RTC__) +#define __SM_60_ATOMIC_FUNCTIONS_DECL__ __device__ +#elif defined(_NVHPC_CUDA) +#define __SM_60_ATOMIC_FUNCTIONS_DECL__ extern __device__ __cudart_builtin__ +#else /* __CUDACC_RTC__ */ +#define __SM_60_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/* Add !defined(_NVHPC_CUDA) to avoid empty function definition in CUDA + * C++ compiler where the macro __CUDA_ARCH__ is not defined. */ +#if !defined(__CUDA_ARCH__) && !defined(_NVHPC_CUDA) +#define __DEF_IF_HOST { } +#else /* !__CUDA_ARCH__ */ +#define __DEF_IF_HOST ; +#endif /* __CUDA_ARCH__ */ + + + +#ifdef __CUDA_ARCH__ +extern "C" +{ +extern __device__ __device_builtin__ double __dAtomicAdd(double *address, double val); + +extern __device__ __device_builtin__ +int __iAtomicAdd_block(int *address, int val); + +extern __device__ __device_builtin__ +int __iAtomicAdd_system(int *address, int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicAdd_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicAdd_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicAdd_block(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicAdd_system(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +float __fAtomicAdd_block(float *address, float val); + +extern __device__ __device_builtin__ +float __fAtomicAdd_system(float *address, float val); + +extern __device__ __device_builtin__ +double __dAtomicAdd_block(double *address, double val); + +extern __device__ __device_builtin__ +double __dAtomicAdd_system(double *address, double val); + +extern __device__ __device_builtin__ +int __iAtomicExch_block(int *address, int val); + +extern __device__ __device_builtin__ +int __iAtomicExch_system(int *address, int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicExch_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicExch_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicExch_block(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicExch_system(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +float __fAtomicExch_block(float *address, float val); + +extern __device__ __device_builtin__ +float __fAtomicExch_system(float *address, float val); + +extern __device__ __device_builtin__ +int __iAtomicMin_block(int *address, int val); + +extern __device__ __device_builtin__ +int __iAtomicMin_system(int *address, int val); + +extern __device__ __device_builtin__ +long long __illAtomicMin_block(long long *address, long long val); + +extern __device__ __device_builtin__ +long long __illAtomicMin_system(long long *address, long long val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicMin_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicMin_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicMin_block(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicMin_system(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +int __iAtomicMax_block(int *address, int val); + +extern __device__ __device_builtin__ +int __iAtomicMax_system(int *address, int val); + +extern __device__ __device_builtin__ +long long __illAtomicMax_block(long long *address, long long val); + +extern __device__ __device_builtin__ +long long __illAtomicMax_system(long long *address, long long val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicMax_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicMax_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicMax_block(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicMax_system(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicInc_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicInc_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicDec_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicDec_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +int __iAtomicCAS_block(int *address, int compare, int val); + +extern __device__ __device_builtin__ +int __iAtomicCAS_system(int *address, int compare, int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicCAS_block(unsigned int *address, unsigned int compare, + unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicCAS_system(unsigned int *address, unsigned int compare, + unsigned int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicCAS_block(unsigned long long int *address, + unsigned long long int compare, + unsigned long long int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicCAS_system(unsigned long long int *address, + unsigned long long int compare, + unsigned long long int val); + +extern __device__ __device_builtin__ +int __iAtomicAnd_block(int *address, int val); + +extern __device__ __device_builtin__ +int __iAtomicAnd_system(int *address, int val); + +extern __device__ __device_builtin__ +long long __llAtomicAnd_block(long long *address, long long val); + +extern __device__ __device_builtin__ +long long __llAtomicAnd_system(long long *address, long long val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicAnd_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicAnd_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicAnd_block(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicAnd_system(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +int __iAtomicOr_block(int *address, int val); + +extern __device__ __device_builtin__ +int __iAtomicOr_system(int *address, int val); + +extern __device__ __device_builtin__ +long long __llAtomicOr_block(long long *address, long long val); + +extern __device__ __device_builtin__ +long long __llAtomicOr_system(long long *address, long long val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicOr_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicOr_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicOr_block(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicOr_system(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +int __iAtomicXor_block(int *address, int val); + +extern __device__ __device_builtin__ +int __iAtomicXor_system(int *address, int val); + +extern __device__ __device_builtin__ +long long __llAtomicXor_block(long long *address, long long val); + +extern __device__ __device_builtin__ +long long __llAtomicXor_system(long long *address, long long val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicXor_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicXor_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicXor_block(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicXor_system(unsigned long long *address, unsigned long long val); +} +#endif /* __CUDA_ARCH__ */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__SM_60_ATOMIC_FUNCTIONS_DECL__ double atomicAdd(double *address, double val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicAdd_block(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicAdd_system(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicAdd_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicAdd_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicAdd_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicAdd_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +float atomicAdd_block(float *address, float val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +float atomicAdd_system(float *address, float val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +double atomicAdd_block(double *address, double val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +double atomicAdd_system(double *address, double val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicSub_block(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicSub_system(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicSub_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicSub_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicExch_block(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicExch_system(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicExch_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicExch_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicExch_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicExch_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +float atomicExch_block(float *address, float val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +float atomicExch_system(float *address, float val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicMin_block(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicMin_system(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicMin_block(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicMin_system(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicMin_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicMin_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicMin_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicMin_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicMax_block(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicMax_system(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicMax_block(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicMax_system(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicMax_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicMax_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicMax_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicMax_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicInc_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicInc_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicDec_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicDec_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicCAS_block(int *address, int compare, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicCAS_system(int *address, int compare, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicCAS_block(unsigned int *address, unsigned int compare, + unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicCAS_system(unsigned int *address, unsigned int compare, + unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long int atomicCAS_block(unsigned long long int *address, + unsigned long long int compare, + unsigned long long int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long int atomicCAS_system(unsigned long long int *address, + unsigned long long int compare, + unsigned long long int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicAnd_block(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicAnd_system(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicAnd_block(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicAnd_system(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicAnd_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicAnd_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicAnd_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicAnd_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicOr_block(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicOr_system(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicOr_block(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicOr_system(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicOr_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicOr_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicOr_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicOr_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicXor_block(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicXor_system(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicXor_block(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicXor_system(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicXor_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicXor_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicXor_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicXor_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 600 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_60_ATOMIC_FUNCTIONS_DECL__ +#undef __DEF_IF_HOST + +#if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__) +#include "sm_60_atomic_functions.hpp" +#endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */ + +#endif /* !__SM_60_ATOMIC_FUNCTIONS_H__ */ + diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.hpp b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.hpp new file mode 100644 index 0000000000000000000000000000000000000000..858b373238a4d87f8d5fc669bf4145f4f2a6e877 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.hpp @@ -0,0 +1,527 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_60_ATOMIC_FUNCTIONS_HPP__) +#define __SM_60_ATOMIC_FUNCTIONS_HPP__ + +#if defined(__CUDACC_RTC__) +#define __SM_60_ATOMIC_FUNCTIONS_DECL__ __device__ +#else /* __CUDACC_RTC__ */ +#define __SM_60_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__SM_60_ATOMIC_FUNCTIONS_DECL__ double atomicAdd(double *address, double val) +{ + return __dAtomicAdd(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicAdd_block(int *address, int val) +{ + return __iAtomicAdd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicAdd_system(int *address, int val) +{ + return __iAtomicAdd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicAdd_block(unsigned int *address, unsigned int val) +{ + return __uAtomicAdd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicAdd_system(unsigned int *address, unsigned int val) +{ + return __uAtomicAdd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicAdd_block(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicAdd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicAdd_system(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicAdd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +float atomicAdd_block(float *address, float val) +{ + return __fAtomicAdd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +float atomicAdd_system(float *address, float val) +{ + return __fAtomicAdd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +double atomicAdd_block(double *address, double val) +{ + return __dAtomicAdd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +double atomicAdd_system(double *address, double val) +{ + return __dAtomicAdd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicSub_block(int *address, int val) +{ + return __iAtomicAdd_block(address, (unsigned int)-(int)val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicSub_system(int *address, int val) +{ + return __iAtomicAdd_system(address, (unsigned int)-(int)val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicSub_block(unsigned int *address, unsigned int val) +{ + return __uAtomicAdd_block(address, (unsigned int)-(int)val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicSub_system(unsigned int *address, unsigned int val) +{ + return __uAtomicAdd_system(address, (unsigned int)-(int)val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicExch_block(int *address, int val) +{ + return __iAtomicExch_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicExch_system(int *address, int val) +{ + return __iAtomicExch_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicExch_block(unsigned int *address, unsigned int val) +{ + return __uAtomicExch_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicExch_system(unsigned int *address, unsigned int val) +{ + return __uAtomicExch_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicExch_block(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicExch_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicExch_system(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicExch_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +float atomicExch_block(float *address, float val) +{ + return __fAtomicExch_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +float atomicExch_system(float *address, float val) +{ + return __fAtomicExch_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicMin_block(int *address, int val) +{ + return __iAtomicMin_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicMin_system(int *address, int val) +{ + return __iAtomicMin_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicMin_block(long long *address, long long val) +{ + return __illAtomicMin_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicMin_system(long long *address, long long val) +{ + return __illAtomicMin_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicMin_block(unsigned int *address, unsigned int val) +{ + return __uAtomicMin_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicMin_system(unsigned int *address, unsigned int val) +{ + return __uAtomicMin_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicMin_block(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicMin_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicMin_system(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicMin_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicMax_block(int *address, int val) +{ + return __iAtomicMax_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicMax_system(int *address, int val) +{ + return __iAtomicMax_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicMax_block(long long *address, long long val) +{ + return __illAtomicMax_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicMax_system(long long *address, long long val) +{ + return __illAtomicMax_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicMax_block(unsigned int *address, unsigned int val) +{ + return __uAtomicMax_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicMax_system(unsigned int *address, unsigned int val) +{ + return __uAtomicMax_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicMax_block(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicMax_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicMax_system(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicMax_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicInc_block(unsigned int *address, unsigned int val) +{ + return __uAtomicInc_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicInc_system(unsigned int *address, unsigned int val) +{ + return __uAtomicInc_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicDec_block(unsigned int *address, unsigned int val) +{ + return __uAtomicDec_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicDec_system(unsigned int *address, unsigned int val) +{ + return __uAtomicDec_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicCAS_block(int *address, int compare, int val) +{ + return __iAtomicCAS_block(address, compare, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicCAS_system(int *address, int compare, int val) +{ + return __iAtomicCAS_system(address, compare, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicCAS_block(unsigned int *address, unsigned int compare, + unsigned int val) +{ + return __uAtomicCAS_block(address, compare, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicCAS_system(unsigned int *address, unsigned int compare, + unsigned int val) +{ + return __uAtomicCAS_system(address, compare, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long int atomicCAS_block(unsigned long long int *address, + unsigned long long int compare, + unsigned long long int val) +{ + return __ullAtomicCAS_block(address, compare, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long int atomicCAS_system(unsigned long long int *address, + unsigned long long int compare, + unsigned long long int val) +{ + return __ullAtomicCAS_system(address, compare, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicAnd_block(int *address, int val) +{ + return __iAtomicAnd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicAnd_system(int *address, int val) +{ + return __iAtomicAnd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicAnd_block(long long *address, long long val) +{ + return __llAtomicAnd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicAnd_system(long long *address, long long val) +{ + return __llAtomicAnd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicAnd_block(unsigned int *address, unsigned int val) +{ + return __uAtomicAnd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicAnd_system(unsigned int *address, unsigned int val) +{ + return __uAtomicAnd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicAnd_block(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicAnd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicAnd_system(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicAnd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicOr_block(int *address, int val) +{ + return __iAtomicOr_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicOr_system(int *address, int val) +{ + return __iAtomicOr_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicOr_block(long long *address, long long val) +{ + return __llAtomicOr_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicOr_system(long long *address, long long val) +{ + return __llAtomicOr_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicOr_block(unsigned int *address, unsigned int val) +{ + return __uAtomicOr_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicOr_system(unsigned int *address, unsigned int val) +{ + return __uAtomicOr_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicOr_block(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicOr_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicOr_system(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicOr_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicXor_block(int *address, int val) +{ + return __iAtomicXor_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicXor_system(int *address, int val) +{ + return __iAtomicXor_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicXor_block(long long *address, long long val) +{ + return __llAtomicXor_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicXor_system(long long *address, long long val) +{ + return __llAtomicXor_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicXor_block(unsigned int *address, unsigned int val) +{ + return __uAtomicXor_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicXor_system(unsigned int *address, unsigned int val) +{ + return __uAtomicXor_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicXor_block(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicXor_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicXor_system(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicXor_system(address, val); +} + +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 600 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_60_ATOMIC_FUNCTIONS_DECL__ + +#endif /* !__SM_60_ATOMIC_FUNCTIONS_HPP__ */ + diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_61_intrinsics.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_61_intrinsics.h new file mode 100644 index 0000000000000000000000000000000000000000..ff78f1a7e559190773d1be284727b335a2317e1e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_61_intrinsics.h @@ -0,0 +1,123 @@ +/* + * Copyright 2016 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_61_INTRINSICS_H__) +#define __SM_61_INTRINSICS_H__ + +#if defined(__CUDACC_RTC__) +#define __SM_61_INTRINSICS_DECL__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __SM_61_INTRINSICS_DECL__ static __device__ __inline__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 610 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +#ifndef __CUDA_ARCH__ +#define __DEF_IF_HOST { } +#else /* !__CUDA_ARCH__ */ +#define __DEF_IF_HOST ; +#endif /* __CUDA_ARCH__ */ + +/******************************************************************************* +* * +* Below are declarations of SM-6.1 intrinsics which are included as * +* source (instead of being built in to the compiler) * +* * +*******************************************************************************/ + + +/****************************************************************************** + * __dp2a * + ******************************************************************************/ +// Generic [_lo] +__SM_61_INTRINSICS_DECL__ int __dp2a_lo(int srcA, int srcB, int c) __DEF_IF_HOST +__SM_61_INTRINSICS_DECL__ unsigned int __dp2a_lo(unsigned int srcA, unsigned int srcB, unsigned int c) __DEF_IF_HOST +// Vector-style [_lo] +__SM_61_INTRINSICS_DECL__ int __dp2a_lo(short2 srcA, char4 srcB, int c) __DEF_IF_HOST +__SM_61_INTRINSICS_DECL__ unsigned int __dp2a_lo(ushort2 srcA, uchar4 srcB, unsigned int c) __DEF_IF_HOST +// Generic [_hi] +__SM_61_INTRINSICS_DECL__ int __dp2a_hi(int srcA, int srcB, int c) __DEF_IF_HOST +__SM_61_INTRINSICS_DECL__ unsigned int __dp2a_hi(unsigned int srcA, unsigned int srcB, unsigned int c) __DEF_IF_HOST +// Vector-style [_hi] +__SM_61_INTRINSICS_DECL__ int __dp2a_hi(short2 srcA, char4 srcB, int c) __DEF_IF_HOST +__SM_61_INTRINSICS_DECL__ unsigned int __dp2a_hi(ushort2 srcA, uchar4 srcB, unsigned int c) __DEF_IF_HOST + + +/****************************************************************************** + * __dp4a * + ******************************************************************************/ +// Generic +__SM_61_INTRINSICS_DECL__ int __dp4a(int srcA, int srcB, int c) __DEF_IF_HOST +__SM_61_INTRINSICS_DECL__ unsigned int __dp4a(unsigned int srcA, unsigned int srcB, unsigned int c) __DEF_IF_HOST +// Vector-style +__SM_61_INTRINSICS_DECL__ int __dp4a(char4 srcA, char4 srcB, int c) __DEF_IF_HOST +__SM_61_INTRINSICS_DECL__ unsigned int __dp4a(uchar4 srcA, uchar4 srcB, unsigned int c) __DEF_IF_HOST + +#endif /* _NVHPC_CUDA || !__CUDA_ARCH__ || __CUDA_ARCH__ >= 610 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __DEF_IF_HOST +#undef __SM_61_INTRINSICS_DECL__ + +#if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__) +#include "sm_61_intrinsics.hpp" +#endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */ + +#endif /* !__SM_61_INTRINSICS_H__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_61_intrinsics.hpp b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_61_intrinsics.hpp new file mode 100644 index 0000000000000000000000000000000000000000..5a561384b08a65445eed86bfc96a0694e5b9190c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_61_intrinsics.hpp @@ -0,0 +1,161 @@ +/* + * Copyright 2016 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_61_INTRINSICS_HPP__) +#define __SM_61_INTRINSICS_HPP__ + +#if defined(__CUDACC_RTC__) +#define __SM_61_INTRINSICS_DECL__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __SM_61_INTRINSICS_DECL__ static __device__ __inline__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 610 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/******************************************************************************* +* * +* Below are implementations of SM-6.1 intrinsics which are included as * +* source (instead of being built in to the compiler) * +* * +*******************************************************************************/ + +// 4a +__SM_61_INTRINSICS_DECL__ int __dp4a(int srcA, int srcB, int c) { + int ret; + asm volatile ("dp4a.s32.s32 %0, %1, %2, %3;" : "=r"(ret) : "r"(srcA), "r"(srcB), "r"(c)); + return ret; +} + +__SM_61_INTRINSICS_DECL__ unsigned int __dp4a(unsigned int srcA, unsigned int srcB, unsigned int c) { + unsigned int ret; + asm volatile ("dp4a.u32.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(srcA), "r"(srcB), "r"(c)); + return ret; +} + +__SM_61_INTRINSICS_DECL__ int __dp4a(char4 srcA, char4 srcB, int c) { + int ret; + asm volatile ("dp4a.s32.s32 %0, %1, %2, %3;" : "=r"(ret) : "r"(*(int *)&srcA), "r"(*(int *)&srcB), "r"(c)); + return ret; +} + +__SM_61_INTRINSICS_DECL__ unsigned int __dp4a(uchar4 srcA, uchar4 srcB, unsigned int c) { + unsigned int ret; + asm volatile ("dp4a.u32.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(*(unsigned int *)&srcA), "r"(*(unsigned int *)&srcB), "r"(c)); + return ret; +} + +// 2a.lo +__SM_61_INTRINSICS_DECL__ int __dp2a_lo(int srcA, int srcB, int c) { + int ret; + asm volatile ("dp2a.lo.s32.s32 %0, %1, %2, %3;" : "=r"(ret) : "r"(srcA), "r"(srcB), "r"(c)); + return ret; +} + +__SM_61_INTRINSICS_DECL__ unsigned int __dp2a_lo(unsigned int srcA, unsigned int srcB, unsigned int c) { + unsigned int ret; + asm volatile ("dp2a.lo.u32.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(srcA), "r"(srcB), "r"(c)); + return ret; +} + +__SM_61_INTRINSICS_DECL__ int __dp2a_lo(short2 srcA, char4 srcB, int c) { + int ret; + asm volatile ("dp2a.lo.s32.s32 %0, %1, %2, %3;" : "=r"(ret) : "r"(*(int *)&srcA), "r"(*(int *)&srcB), "r"(c)); + return ret; +} + +__SM_61_INTRINSICS_DECL__ unsigned int __dp2a_lo(ushort2 srcA, uchar4 srcB, unsigned int c) { + unsigned int ret; + asm volatile ("dp2a.lo.u32.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(*(unsigned int *)&srcA), "r"(*(unsigned int *)&srcB), "r"(c)); + return ret; +} + +// 2a.hi +__SM_61_INTRINSICS_DECL__ int __dp2a_hi(int srcA, int srcB, int c) { + int ret; + asm volatile ("dp2a.hi.s32.s32 %0, %1, %2, %3;" : "=r"(ret) : "r"(srcA), "r"(srcB), "r"(c)); + return ret; +} + +__SM_61_INTRINSICS_DECL__ unsigned int __dp2a_hi(unsigned int srcA, unsigned int srcB, unsigned int c) { + unsigned int ret; + asm volatile ("dp2a.hi.u32.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(srcA), "r"(srcB), "r"(c)); + return ret; +} + +__SM_61_INTRINSICS_DECL__ int __dp2a_hi(short2 srcA, char4 srcB, int c) { + int ret; + asm volatile ("dp2a.hi.s32.s32 %0, %1, %2, %3;" : "=r"(ret) : "r"(*(int *)&srcA), "r"(*(int *)&srcB), "r"(c)); + return ret; +} + +__SM_61_INTRINSICS_DECL__ unsigned int __dp2a_hi(ushort2 srcA, uchar4 srcB, unsigned int c) { + unsigned int ret; + asm volatile ("dp2a.hi.u32.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(*(unsigned int *)&srcA), "r"(*(unsigned int *)&srcB), "r"(c)); + return ret; +} + + +#endif /* _NVHPC_CUDA || !__CUDA_ARCH__ || __CUDA_ARCH__ >= 610 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_61_INTRINSICS_DECL__ + +#endif /* !__SM_61_INTRINSICS_HPP__ */ + diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/surface_indirect_functions.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/surface_indirect_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..a93faf052f98d81f8cb65bd9591d08ec90c994d9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/surface_indirect_functions.h @@ -0,0 +1,243 @@ +/* + * Copyright 1993-2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + + +#ifndef __SURFACE_INDIRECT_FUNCTIONS_H__ +#define __SURFACE_INDIRECT_FUNCTIONS_H__ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#include "cuda_runtime_api.h" + +template struct __nv_isurf_trait { }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; + +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; + +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; +template<> struct __nv_isurf_trait { typedef void type; }; + + +template +static __device__ typename __nv_isurf_trait::type surf1Dread(T *ptr, cudaSurfaceObject_t obj, int x, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurf1Dread", ptr, obj, x, mode); +} + +template +static __device__ T surf1Dread(cudaSurfaceObject_t surfObject, int x, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap) +{ + T ret; + surf1Dread(&ret, surfObject, x, boundaryMode); + return ret; +} + +template +static __device__ typename __nv_isurf_trait::type surf2Dread(T *ptr, cudaSurfaceObject_t obj, int x, int y, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurf2Dread", ptr, obj, x, y, mode); +} + +template +static __device__ T surf2Dread(cudaSurfaceObject_t surfObject, int x, int y, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap) +{ + T ret; + surf2Dread(&ret, surfObject, x, y, boundaryMode); + return ret; +} + + +template +static __device__ typename __nv_isurf_trait::type surf3Dread(T *ptr, cudaSurfaceObject_t obj, int x, int y, int z, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurf3Dread", ptr, obj, x, y, z, mode); +} + +template +static __device__ T surf3Dread(cudaSurfaceObject_t surfObject, int x, int y, int z, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap) +{ + T ret; + surf3Dread(&ret, surfObject, x, y, z, boundaryMode); + return ret; +} + +template +static __device__ typename __nv_isurf_trait::type surf1DLayeredread(T *ptr, cudaSurfaceObject_t obj, int x, int layer, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurf1DLayeredread", ptr, obj, x, layer, mode); +} + +template +static __device__ T surf1DLayeredread(cudaSurfaceObject_t surfObject, int x, int layer, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap) +{ + T ret; + surf1DLayeredread(&ret, surfObject, x, layer, boundaryMode); + return ret; +} + +template +static __device__ typename __nv_isurf_trait::type surf2DLayeredread(T *ptr, cudaSurfaceObject_t obj, int x, int y, int layer, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurf2DLayeredread", ptr, obj, x, y, layer, mode); +} + +template +static __device__ T surf2DLayeredread(cudaSurfaceObject_t surfObject, int x, int y, int layer, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap) +{ + T ret; + surf2DLayeredread(&ret, surfObject, x, y, layer, boundaryMode); + return ret; +} + +template +static __device__ typename __nv_isurf_trait::type surfCubemapread(T *ptr, cudaSurfaceObject_t obj, int x, int y, int face, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurfCubemapread", ptr, obj, x, y, face, mode); +} + +template +static __device__ T surfCubemapread(cudaSurfaceObject_t surfObject, int x, int y, int face, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap) +{ + T ret; + surfCubemapread(&ret, surfObject, x, y, face, boundaryMode); + return ret; +} + +template +static __device__ typename __nv_isurf_trait::type surfCubemapLayeredread(T *ptr, cudaSurfaceObject_t obj, int x, int y, int layerface, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurfCubemapLayeredread", ptr, obj, x, y, layerface, mode); +} + +template +static __device__ T surfCubemapLayeredread(cudaSurfaceObject_t surfObject, int x, int y, int layerface, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap) +{ + T ret; + surfCubemapLayeredread(&ret, surfObject, x, y, layerface, boundaryMode); + return ret; +} + +template +static __device__ typename __nv_isurf_trait::type surf1Dwrite(T val, cudaSurfaceObject_t obj, int x, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurf1Dwrite_v2", &val, obj, x, mode); +} + +template +static __device__ typename __nv_isurf_trait::type surf2Dwrite(T val, cudaSurfaceObject_t obj, int x, int y, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurf2Dwrite_v2", &val, obj, x, y, mode); +} + +template +static __device__ typename __nv_isurf_trait::type surf3Dwrite(T val, cudaSurfaceObject_t obj, int x, int y, int z, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurf3Dwrite_v2", &val, obj, x, y, z, mode); +} + +template +static __device__ typename __nv_isurf_trait::type surf1DLayeredwrite(T val, cudaSurfaceObject_t obj, int x, int layer, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurf1DLayeredwrite_v2", &val, obj, x, layer, mode); +} + +template +static __device__ typename __nv_isurf_trait::type surf2DLayeredwrite(T val, cudaSurfaceObject_t obj, int x, int y, int layer, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurf2DLayeredwrite_v2", &val, obj, x, y, layer, mode); +} + +template +static __device__ typename __nv_isurf_trait::type surfCubemapwrite(T val, cudaSurfaceObject_t obj, int x, int y, int face, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurfCubemapwrite_v2", &val, obj, x, y, face, mode); +} + +template +static __device__ typename __nv_isurf_trait::type surfCubemapLayeredwrite(T val, cudaSurfaceObject_t obj, int x, int y, int layerface, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap) +{ + __nv_tex_surf_handler("__isurfCubemapLayeredwrite_v2", &val, obj, x, y, layerface, mode); +} + +#endif // __cplusplus && __CUDACC__ + +#endif // __SURFACE_INDIRECT_FUNCTIONS_H__ + + diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/surface_types.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/surface_types.h new file mode 100644 index 0000000000000000000000000000000000000000..739dfe490ddd33d1a39c7b264a32d321eec852c9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/surface_types.h @@ -0,0 +1,108 @@ +/* + * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SURFACE_TYPES_H__) +#define __SURFACE_TYPES_H__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "driver_types.h" + +/** + * \addtogroup CUDART_TYPES + * + * @{ + */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#define cudaSurfaceType1D 0x01 +#define cudaSurfaceType2D 0x02 +#define cudaSurfaceType3D 0x03 +#define cudaSurfaceTypeCubemap 0x0C +#define cudaSurfaceType1DLayered 0xF1 +#define cudaSurfaceType2DLayered 0xF2 +#define cudaSurfaceTypeCubemapLayered 0xFC + +/** + * CUDA Surface boundary modes + */ +enum __device_builtin__ cudaSurfaceBoundaryMode +{ + cudaBoundaryModeZero = 0, /**< Zero boundary mode */ + cudaBoundaryModeClamp = 1, /**< Clamp boundary mode */ + cudaBoundaryModeTrap = 2 /**< Trap boundary mode */ +}; + +/** + * CUDA Surface format modes + */ +enum __device_builtin__ cudaSurfaceFormatMode +{ + cudaFormatModeForced = 0, /**< Forced format mode */ + cudaFormatModeAuto = 1 /**< Auto format mode */ +}; + +/** + * An opaque value that represents a CUDA Surface object + */ +typedef __device_builtin__ unsigned long long cudaSurfaceObject_t; + +/** @} */ +/** @} */ /* END CUDART_TYPES */ + +#endif /* !__SURFACE_TYPES_H__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/texture_fetch_functions.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/texture_fetch_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..704e8518da6b3cf7b77e7b9d34638bc06dd3937f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/texture_fetch_functions.h @@ -0,0 +1,223 @@ +/* + * Copyright 1993-2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__TEXTURE_FETCH_FUNCTIONS_H__) +#define __TEXTURE_FETCH_FUNCTIONS_H__ + + +#if defined(__cplusplus) && defined(__CUDACC__) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" +#include "cuda_texture_types.h" + +#if defined(_WIN32) +# define __DEPRECATED__ __declspec(deprecated) +#else +# define __DEPRECATED__ __attribute__((deprecated)) +#endif + + +template +struct __nv_tex_rmet_ret { }; + +template<> struct __nv_tex_rmet_ret { typedef char type; }; +template<> struct __nv_tex_rmet_ret { typedef signed char type; }; +template<> struct __nv_tex_rmet_ret { typedef unsigned char type; }; +template<> struct __nv_tex_rmet_ret { typedef char1 type; }; +template<> struct __nv_tex_rmet_ret { typedef uchar1 type; }; +template<> struct __nv_tex_rmet_ret { typedef char2 type; }; +template<> struct __nv_tex_rmet_ret { typedef uchar2 type; }; +template<> struct __nv_tex_rmet_ret { typedef char4 type; }; +template<> struct __nv_tex_rmet_ret { typedef uchar4 type; }; + +template<> struct __nv_tex_rmet_ret { typedef short type; }; +template<> struct __nv_tex_rmet_ret { typedef unsigned short type; }; +template<> struct __nv_tex_rmet_ret { typedef short1 type; }; +template<> struct __nv_tex_rmet_ret { typedef ushort1 type; }; +template<> struct __nv_tex_rmet_ret { typedef short2 type; }; +template<> struct __nv_tex_rmet_ret { typedef ushort2 type; }; +template<> struct __nv_tex_rmet_ret { typedef short4 type; }; +template<> struct __nv_tex_rmet_ret { typedef ushort4 type; }; + +template<> struct __nv_tex_rmet_ret { typedef int type; }; +template<> struct __nv_tex_rmet_ret { typedef unsigned int type; }; +template<> struct __nv_tex_rmet_ret { typedef int1 type; }; +template<> struct __nv_tex_rmet_ret { typedef uint1 type; }; +template<> struct __nv_tex_rmet_ret { typedef int2 type; }; +template<> struct __nv_tex_rmet_ret { typedef uint2 type; }; +template<> struct __nv_tex_rmet_ret { typedef int4 type; }; +template<> struct __nv_tex_rmet_ret { typedef uint4 type; }; + +#if !defined(__LP64__) +template<> struct __nv_tex_rmet_ret { typedef long type; }; +template<> struct __nv_tex_rmet_ret { typedef unsigned long type; }; +template<> struct __nv_tex_rmet_ret { typedef long1 type; }; +template<> struct __nv_tex_rmet_ret { typedef ulong1 type; }; +template<> struct __nv_tex_rmet_ret { typedef long2 type; }; +template<> struct __nv_tex_rmet_ret { typedef ulong2 type; }; +template<> struct __nv_tex_rmet_ret { typedef long4 type; }; +template<> struct __nv_tex_rmet_ret { typedef ulong4 type; }; +#endif /* !__LP64__ */ +template<> struct __nv_tex_rmet_ret { typedef float type; }; +template<> struct __nv_tex_rmet_ret { typedef float1 type; }; +template<> struct __nv_tex_rmet_ret { typedef float2 type; }; +template<> struct __nv_tex_rmet_ret { typedef float4 type; }; + + +template struct __nv_tex_rmet_cast { typedef T* type; }; +#if !defined(__LP64__) +template<> struct __nv_tex_rmet_cast { typedef int *type; }; +template<> struct __nv_tex_rmet_cast { typedef unsigned int *type; }; +template<> struct __nv_tex_rmet_cast { typedef int1 *type; }; +template<> struct __nv_tex_rmet_cast { typedef uint1 *type; }; +template<> struct __nv_tex_rmet_cast { typedef int2 *type; }; +template<> struct __nv_tex_rmet_cast { typedef uint2 *type; }; +template<> struct __nv_tex_rmet_cast { typedef int4 *type; }; +template<> struct __nv_tex_rmet_cast { typedef uint4 *type; }; +#endif /* !__LP64__ */ + +template +struct __nv_tex_rmnf_ret { }; + +template <> struct __nv_tex_rmnf_ret { typedef float type; }; +template <> struct __nv_tex_rmnf_ret { typedef float type; }; +template <> struct __nv_tex_rmnf_ret { typedef float type; }; +template <> struct __nv_tex_rmnf_ret { typedef float type; }; +template <> struct __nv_tex_rmnf_ret { typedef float type; }; +template <> struct __nv_tex_rmnf_ret { typedef float1 type; }; +template <> struct __nv_tex_rmnf_ret { typedef float1 type; }; +template <> struct __nv_tex_rmnf_ret { typedef float1 type; }; +template <> struct __nv_tex_rmnf_ret { typedef float1 type; }; +template <> struct __nv_tex_rmnf_ret { typedef float2 type; }; +template <> struct __nv_tex_rmnf_ret { typedef float2 type; }; +template <> struct __nv_tex_rmnf_ret { typedef float2 type; }; +template <> struct __nv_tex_rmnf_ret { typedef float2 type; }; +template <> struct __nv_tex_rmnf_ret { typedef float4 type; }; +template <> struct __nv_tex_rmnf_ret { typedef float4 type; }; +template <> struct __nv_tex_rmnf_ret { typedef float4 type; }; +template <> struct __nv_tex_rmnf_ret { typedef float4 type; }; + + +template +struct __nv_tex2dgather_ret { }; +template <> struct __nv_tex2dgather_ret { typedef char4 type; }; +template <> struct __nv_tex2dgather_ret { typedef char4 type; }; +template <> struct __nv_tex2dgather_ret { typedef char4 type; }; +template <> struct __nv_tex2dgather_ret { typedef char4 type; }; +template <> struct __nv_tex2dgather_ret { typedef char4 type; }; +template <> struct __nv_tex2dgather_ret { typedef char4 type; }; +template <> struct __nv_tex2dgather_ret { typedef uchar4 type; }; +template <> struct __nv_tex2dgather_ret { typedef uchar4 type; }; +template <> struct __nv_tex2dgather_ret { typedef uchar4 type; }; +template <> struct __nv_tex2dgather_ret { typedef uchar4 type; }; +template <> struct __nv_tex2dgather_ret { typedef uchar4 type; }; + +template <> struct __nv_tex2dgather_ret { typedef short4 type; }; +template <> struct __nv_tex2dgather_ret { typedef short4 type; }; +template <> struct __nv_tex2dgather_ret { typedef short4 type; }; +template <> struct __nv_tex2dgather_ret { typedef short4 type; }; +template <> struct __nv_tex2dgather_ret { typedef short4 type; }; +template <> struct __nv_tex2dgather_ret { typedef ushort4 type; }; +template <> struct __nv_tex2dgather_ret { typedef ushort4 type; }; +template <> struct __nv_tex2dgather_ret { typedef ushort4 type; }; +template <> struct __nv_tex2dgather_ret { typedef ushort4 type; }; +template <> struct __nv_tex2dgather_ret { typedef ushort4 type; }; + +template <> struct __nv_tex2dgather_ret { typedef int4 type; }; +template <> struct __nv_tex2dgather_ret { typedef int4 type; }; +template <> struct __nv_tex2dgather_ret { typedef int4 type; }; +template <> struct __nv_tex2dgather_ret { typedef int4 type; }; +template <> struct __nv_tex2dgather_ret { typedef int4 type; }; +template <> struct __nv_tex2dgather_ret { typedef uint4 type; }; +template <> struct __nv_tex2dgather_ret { typedef uint4 type; }; +template <> struct __nv_tex2dgather_ret { typedef uint4 type; }; +template <> struct __nv_tex2dgather_ret { typedef uint4 type; }; +template <> struct __nv_tex2dgather_ret { typedef uint4 type; }; + +template <> struct __nv_tex2dgather_ret { typedef float4 type; }; +template <> struct __nv_tex2dgather_ret { typedef float4 type; }; +template <> struct __nv_tex2dgather_ret { typedef float4 type; }; +template <> struct __nv_tex2dgather_ret { typedef float4 type; }; +template <> struct __nv_tex2dgather_ret { typedef float4 type; }; + + +template struct __nv_tex2dgather_rmnf_ret { }; +template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; +template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; +template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; +template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; +template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; +template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; +template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; +template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; +template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; +template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; +template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; +template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; +template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; +template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; +template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; +template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; +template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; +template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; +template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; +template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; +template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; + +#undef __DEPRECATED__ + +#endif /* __cplusplus && __CUDACC__ */ + +#endif /* !__TEXTURE_FETCH_FUNCTIONS_H__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..bee6cd32c36d94bde65aad1c867352493d07a0dc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.h @@ -0,0 +1,175 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__VECTOR_FUNCTIONS_H__) +#define __VECTOR_FUNCTIONS_H__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +#if defined(__CUDACC_RTC__) +#define __VECTOR_FUNCTIONS_DECL__ __host__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __VECTOR_FUNCTIONS_DECL__ static __inline__ __host__ __device__ +#endif /* __CUDACC_RTC__ */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__VECTOR_FUNCTIONS_DECL__ char1 make_char1(signed char x); + +__VECTOR_FUNCTIONS_DECL__ uchar1 make_uchar1(unsigned char x); + +__VECTOR_FUNCTIONS_DECL__ char2 make_char2(signed char x, signed char y); + +__VECTOR_FUNCTIONS_DECL__ uchar2 make_uchar2(unsigned char x, unsigned char y); + +__VECTOR_FUNCTIONS_DECL__ char3 make_char3(signed char x, signed char y, signed char z); + +__VECTOR_FUNCTIONS_DECL__ uchar3 make_uchar3(unsigned char x, unsigned char y, unsigned char z); + +__VECTOR_FUNCTIONS_DECL__ char4 make_char4(signed char x, signed char y, signed char z, signed char w); + +__VECTOR_FUNCTIONS_DECL__ uchar4 make_uchar4(unsigned char x, unsigned char y, unsigned char z, unsigned char w); + +__VECTOR_FUNCTIONS_DECL__ short1 make_short1(short x); + +__VECTOR_FUNCTIONS_DECL__ ushort1 make_ushort1(unsigned short x); + +__VECTOR_FUNCTIONS_DECL__ short2 make_short2(short x, short y); + +__VECTOR_FUNCTIONS_DECL__ ushort2 make_ushort2(unsigned short x, unsigned short y); + +__VECTOR_FUNCTIONS_DECL__ short3 make_short3(short x,short y, short z); + +__VECTOR_FUNCTIONS_DECL__ ushort3 make_ushort3(unsigned short x, unsigned short y, unsigned short z); + +__VECTOR_FUNCTIONS_DECL__ short4 make_short4(short x, short y, short z, short w); + +__VECTOR_FUNCTIONS_DECL__ ushort4 make_ushort4(unsigned short x, unsigned short y, unsigned short z, unsigned short w); + +__VECTOR_FUNCTIONS_DECL__ int1 make_int1(int x); + +__VECTOR_FUNCTIONS_DECL__ uint1 make_uint1(unsigned int x); + +__VECTOR_FUNCTIONS_DECL__ int2 make_int2(int x, int y); + +__VECTOR_FUNCTIONS_DECL__ uint2 make_uint2(unsigned int x, unsigned int y); + +__VECTOR_FUNCTIONS_DECL__ int3 make_int3(int x, int y, int z); + +__VECTOR_FUNCTIONS_DECL__ uint3 make_uint3(unsigned int x, unsigned int y, unsigned int z); + +__VECTOR_FUNCTIONS_DECL__ int4 make_int4(int x, int y, int z, int w); + +__VECTOR_FUNCTIONS_DECL__ uint4 make_uint4(unsigned int x, unsigned int y, unsigned int z, unsigned int w); + +__VECTOR_FUNCTIONS_DECL__ long1 make_long1(long int x); + +__VECTOR_FUNCTIONS_DECL__ ulong1 make_ulong1(unsigned long int x); + +__VECTOR_FUNCTIONS_DECL__ long2 make_long2(long int x, long int y); + +__VECTOR_FUNCTIONS_DECL__ ulong2 make_ulong2(unsigned long int x, unsigned long int y); + +__VECTOR_FUNCTIONS_DECL__ long3 make_long3(long int x, long int y, long int z); + +__VECTOR_FUNCTIONS_DECL__ ulong3 make_ulong3(unsigned long int x, unsigned long int y, unsigned long int z); + +__VECTOR_FUNCTIONS_DECL__ long4 make_long4(long int x, long int y, long int z, long int w); + +__VECTOR_FUNCTIONS_DECL__ ulong4 make_ulong4(unsigned long int x, unsigned long int y, unsigned long int z, unsigned long int w); + +__VECTOR_FUNCTIONS_DECL__ float1 make_float1(float x); + +__VECTOR_FUNCTIONS_DECL__ float2 make_float2(float x, float y); + +__VECTOR_FUNCTIONS_DECL__ float3 make_float3(float x, float y, float z); + +__VECTOR_FUNCTIONS_DECL__ float4 make_float4(float x, float y, float z, float w); + +__VECTOR_FUNCTIONS_DECL__ longlong1 make_longlong1(long long int x); + +__VECTOR_FUNCTIONS_DECL__ ulonglong1 make_ulonglong1(unsigned long long int x); + +__VECTOR_FUNCTIONS_DECL__ longlong2 make_longlong2(long long int x, long long int y); + +__VECTOR_FUNCTIONS_DECL__ ulonglong2 make_ulonglong2(unsigned long long int x, unsigned long long int y); + +__VECTOR_FUNCTIONS_DECL__ longlong3 make_longlong3(long long int x, long long int y, long long int z); + +__VECTOR_FUNCTIONS_DECL__ ulonglong3 make_ulonglong3(unsigned long long int x, unsigned long long int y, unsigned long long int z); + +__VECTOR_FUNCTIONS_DECL__ longlong4 make_longlong4(long long int x, long long int y, long long int z, long long int w); + +__VECTOR_FUNCTIONS_DECL__ ulonglong4 make_ulonglong4(unsigned long long int x, unsigned long long int y, unsigned long long int z, unsigned long long int w); + +__VECTOR_FUNCTIONS_DECL__ double1 make_double1(double x); + +__VECTOR_FUNCTIONS_DECL__ double2 make_double2(double x, double y); + +__VECTOR_FUNCTIONS_DECL__ double3 make_double3(double x, double y, double z); + +__VECTOR_FUNCTIONS_DECL__ double4 make_double4(double x, double y, double z, double w); + +#undef __VECTOR_FUNCTIONS_DECL__ + +#if !defined(__CUDACC_RTC__) +#include "vector_functions.hpp" +#endif /* !__CUDACC_RTC__ */ + +#endif /* !__VECTOR_FUNCTIONS_H__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.hpp b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.hpp new file mode 100644 index 0000000000000000000000000000000000000000..ab69cf38045a7c44dae67e7149d49ac4c6148747 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.hpp @@ -0,0 +1,316 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__VECTOR_FUNCTIONS_HPP__) +#define __VECTOR_FUNCTIONS_HPP__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +#if defined(__CUDACC_RTC__) +#define __VECTOR_FUNCTIONS_DECL__ __host__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __VECTOR_FUNCTIONS_DECL__ static __inline__ __host__ __device__ +#endif /* __CUDACC_RTC__ */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__VECTOR_FUNCTIONS_DECL__ char1 make_char1(signed char x) +{ + char1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ uchar1 make_uchar1(unsigned char x) +{ + uchar1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ char2 make_char2(signed char x, signed char y) +{ + char2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ uchar2 make_uchar2(unsigned char x, unsigned char y) +{ + uchar2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ char3 make_char3(signed char x, signed char y, signed char z) +{ + char3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ uchar3 make_uchar3(unsigned char x, unsigned char y, unsigned char z) +{ + uchar3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ char4 make_char4(signed char x, signed char y, signed char z, signed char w) +{ + char4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ uchar4 make_uchar4(unsigned char x, unsigned char y, unsigned char z, unsigned char w) +{ + uchar4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ short1 make_short1(short x) +{ + short1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ushort1 make_ushort1(unsigned short x) +{ + ushort1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ short2 make_short2(short x, short y) +{ + short2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ushort2 make_ushort2(unsigned short x, unsigned short y) +{ + ushort2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ short3 make_short3(short x,short y, short z) +{ + short3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ushort3 make_ushort3(unsigned short x, unsigned short y, unsigned short z) +{ + ushort3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ short4 make_short4(short x, short y, short z, short w) +{ + short4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ushort4 make_ushort4(unsigned short x, unsigned short y, unsigned short z, unsigned short w) +{ + ushort4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ int1 make_int1(int x) +{ + int1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ uint1 make_uint1(unsigned int x) +{ + uint1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ int2 make_int2(int x, int y) +{ + int2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ uint2 make_uint2(unsigned int x, unsigned int y) +{ + uint2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ int3 make_int3(int x, int y, int z) +{ + int3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ uint3 make_uint3(unsigned int x, unsigned int y, unsigned int z) +{ + uint3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ int4 make_int4(int x, int y, int z, int w) +{ + int4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ uint4 make_uint4(unsigned int x, unsigned int y, unsigned int z, unsigned int w) +{ + uint4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ long1 make_long1(long int x) +{ + long1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ulong1 make_ulong1(unsigned long int x) +{ + ulong1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ long2 make_long2(long int x, long int y) +{ + long2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ulong2 make_ulong2(unsigned long int x, unsigned long int y) +{ + ulong2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ long3 make_long3(long int x, long int y, long int z) +{ + long3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ulong3 make_ulong3(unsigned long int x, unsigned long int y, unsigned long int z) +{ + ulong3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ long4 make_long4(long int x, long int y, long int z, long int w) +{ + long4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ulong4 make_ulong4(unsigned long int x, unsigned long int y, unsigned long int z, unsigned long int w) +{ + ulong4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ float1 make_float1(float x) +{ + float1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ float2 make_float2(float x, float y) +{ + float2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ float3 make_float3(float x, float y, float z) +{ + float3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ float4 make_float4(float x, float y, float z, float w) +{ + float4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ longlong1 make_longlong1(long long int x) +{ + longlong1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ulonglong1 make_ulonglong1(unsigned long long int x) +{ + ulonglong1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ longlong2 make_longlong2(long long int x, long long int y) +{ + longlong2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ulonglong2 make_ulonglong2(unsigned long long int x, unsigned long long int y) +{ + ulonglong2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ longlong3 make_longlong3(long long int x, long long int y, long long int z) +{ + longlong3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ulonglong3 make_ulonglong3(unsigned long long int x, unsigned long long int y, unsigned long long int z) +{ + ulonglong3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ longlong4 make_longlong4(long long int x, long long int y, long long int z, long long int w) +{ + longlong4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ulonglong4 make_ulonglong4(unsigned long long int x, unsigned long long int y, unsigned long long int z, unsigned long long int w) +{ + ulonglong4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ double1 make_double1(double x) +{ + double1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ double2 make_double2(double x, double y) +{ + double2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ double3 make_double3(double x, double y, double z) +{ + double3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ double4 make_double4(double x, double y, double z, double w) +{ + double4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +#undef __VECTOR_FUNCTIONS_DECL__ + +#endif /* !__VECTOR_FUNCTIONS_HPP__ */ + diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_types.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_types.h new file mode 100644 index 0000000000000000000000000000000000000000..4cfabcff8a25adaf3f589d38d531bc63cae2fcf6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_types.h @@ -0,0 +1,443 @@ +/* + * Copyright 1993-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__VECTOR_TYPES_H__) +#define __VECTOR_TYPES_H__ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_VECTOR_TYPES_H__ +#endif + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#ifndef __DOXYGEN_ONLY__ +#include "crt/host_defines.h" +#endif + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if !defined(__CUDACC__) && !defined(__CUDACC_RTC__) && \ + defined(_WIN32) && !defined(_WIN64) + +#pragma warning(push) +#pragma warning(disable: 4201 4408) + +#define __cuda_builtin_vector_align8(tag, members) \ +struct __device_builtin__ tag \ +{ \ + union \ + { \ + struct { members }; \ + struct { long long int :1,:0; }; \ + }; \ +} + +#else /* !__CUDACC__ && !__CUDACC_RTC__ && _WIN32 && !_WIN64 */ + +#define __cuda_builtin_vector_align8(tag, members) \ +struct __device_builtin__ __align__(8) tag \ +{ \ + members \ +} + +#endif /* !__CUDACC__ && !__CUDACC_RTC__ && _WIN32 && !_WIN64 */ + +struct __device_builtin__ char1 +{ + signed char x; +}; + +struct __device_builtin__ uchar1 +{ + unsigned char x; +}; + + +struct __device_builtin__ __align__(2) char2 +{ + signed char x, y; +}; + +struct __device_builtin__ __align__(2) uchar2 +{ + unsigned char x, y; +}; + +struct __device_builtin__ char3 +{ + signed char x, y, z; +}; + +struct __device_builtin__ uchar3 +{ + unsigned char x, y, z; +}; + +struct __device_builtin__ __align__(4) char4 +{ + signed char x, y, z, w; +}; + +struct __device_builtin__ __align__(4) uchar4 +{ + unsigned char x, y, z, w; +}; + +struct __device_builtin__ short1 +{ + short x; +}; + +struct __device_builtin__ ushort1 +{ + unsigned short x; +}; + +struct __device_builtin__ __align__(4) short2 +{ + short x, y; +}; + +struct __device_builtin__ __align__(4) ushort2 +{ + unsigned short x, y; +}; + +struct __device_builtin__ short3 +{ + short x, y, z; +}; + +struct __device_builtin__ ushort3 +{ + unsigned short x, y, z; +}; + +__cuda_builtin_vector_align8(short4, short x; short y; short z; short w;); +__cuda_builtin_vector_align8(ushort4, unsigned short x; unsigned short y; unsigned short z; unsigned short w;); + +struct __device_builtin__ int1 +{ + int x; +}; + +struct __device_builtin__ uint1 +{ + unsigned int x; +}; + +__cuda_builtin_vector_align8(int2, int x; int y;); +__cuda_builtin_vector_align8(uint2, unsigned int x; unsigned int y;); + +struct __device_builtin__ int3 +{ + int x, y, z; +}; + +struct __device_builtin__ uint3 +{ + unsigned int x, y, z; +}; + +struct __device_builtin__ __builtin_align__(16) int4 +{ + int x, y, z, w; +}; + +struct __device_builtin__ __builtin_align__(16) uint4 +{ + unsigned int x, y, z, w; +}; + +struct __device_builtin__ long1 +{ + long int x; +}; + +struct __device_builtin__ ulong1 +{ + unsigned long x; +}; + +#if defined(_WIN32) +__cuda_builtin_vector_align8(long2, long int x; long int y;); +__cuda_builtin_vector_align8(ulong2, unsigned long int x; unsigned long int y;); +#else /* !_WIN32 */ + +struct __device_builtin__ __align__(2*sizeof(long int)) long2 +{ + long int x, y; +}; + +struct __device_builtin__ __align__(2*sizeof(unsigned long int)) ulong2 +{ + unsigned long int x, y; +}; + +#endif /* _WIN32 */ + +struct __device_builtin__ long3 +{ + long int x, y, z; +}; + +struct __device_builtin__ ulong3 +{ + unsigned long int x, y, z; +}; + +struct __device_builtin__ __builtin_align__(16) long4 +{ + long int x, y, z, w; +}; + +struct __device_builtin__ __builtin_align__(16) ulong4 +{ + unsigned long int x, y, z, w; +}; + +struct __device_builtin__ float1 +{ + float x; +}; + +#if !defined(__CUDACC__) && defined(__arm__) && \ + defined(__ARM_PCS_VFP) && __GNUC__ == 4 && __GNUC_MINOR__ == 6 + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-pedantic" + +struct __device_builtin__ __attribute__((aligned(8))) float2 +{ + float x; float y; float __cuda_gnu_arm_ice_workaround[0]; +}; + +#pragma GCC poison __cuda_gnu_arm_ice_workaround +#pragma GCC diagnostic pop + +#else /* !__CUDACC__ && __arm__ && __ARM_PCS_VFP && + __GNUC__ == 4&& __GNUC_MINOR__ == 6 */ + +__cuda_builtin_vector_align8(float2, float x; float y;); + +#endif /* !__CUDACC__ && __arm__ && __ARM_PCS_VFP && + __GNUC__ == 4&& __GNUC_MINOR__ == 6 */ + +struct __device_builtin__ float3 +{ + float x, y, z; +}; + +struct __device_builtin__ __builtin_align__(16) float4 +{ + float x, y, z, w; +}; + +struct __device_builtin__ longlong1 +{ + long long int x; +}; + +struct __device_builtin__ ulonglong1 +{ + unsigned long long int x; +}; + +struct __device_builtin__ __builtin_align__(16) longlong2 +{ + long long int x, y; +}; + +struct __device_builtin__ __builtin_align__(16) ulonglong2 +{ + unsigned long long int x, y; +}; + +struct __device_builtin__ longlong3 +{ + long long int x, y, z; +}; + +struct __device_builtin__ ulonglong3 +{ + unsigned long long int x, y, z; +}; + +struct __device_builtin__ __builtin_align__(16) longlong4 +{ + long long int x, y, z ,w; +}; + +struct __device_builtin__ __builtin_align__(16) ulonglong4 +{ + unsigned long long int x, y, z, w; +}; + +struct __device_builtin__ double1 +{ + double x; +}; + +struct __device_builtin__ __builtin_align__(16) double2 +{ + double x, y; +}; + +struct __device_builtin__ double3 +{ + double x, y, z; +}; + +struct __device_builtin__ __builtin_align__(16) double4 +{ + double x, y, z, w; +}; + +#if !defined(__CUDACC__) && defined(_WIN32) && !defined(_WIN64) + +#pragma warning(pop) + +#endif /* !__CUDACC__ && _WIN32 && !_WIN64 */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +typedef __device_builtin__ struct char1 char1; +typedef __device_builtin__ struct uchar1 uchar1; +typedef __device_builtin__ struct char2 char2; +typedef __device_builtin__ struct uchar2 uchar2; +typedef __device_builtin__ struct char3 char3; +typedef __device_builtin__ struct uchar3 uchar3; +typedef __device_builtin__ struct char4 char4; +typedef __device_builtin__ struct uchar4 uchar4; +typedef __device_builtin__ struct short1 short1; +typedef __device_builtin__ struct ushort1 ushort1; +typedef __device_builtin__ struct short2 short2; +typedef __device_builtin__ struct ushort2 ushort2; +typedef __device_builtin__ struct short3 short3; +typedef __device_builtin__ struct ushort3 ushort3; +typedef __device_builtin__ struct short4 short4; +typedef __device_builtin__ struct ushort4 ushort4; +typedef __device_builtin__ struct int1 int1; +typedef __device_builtin__ struct uint1 uint1; +typedef __device_builtin__ struct int2 int2; +typedef __device_builtin__ struct uint2 uint2; +typedef __device_builtin__ struct int3 int3; +typedef __device_builtin__ struct uint3 uint3; +typedef __device_builtin__ struct int4 int4; +typedef __device_builtin__ struct uint4 uint4; +typedef __device_builtin__ struct long1 long1; +typedef __device_builtin__ struct ulong1 ulong1; +typedef __device_builtin__ struct long2 long2; +typedef __device_builtin__ struct ulong2 ulong2; +typedef __device_builtin__ struct long3 long3; +typedef __device_builtin__ struct ulong3 ulong3; +typedef __device_builtin__ struct long4 long4; +typedef __device_builtin__ struct ulong4 ulong4; +typedef __device_builtin__ struct float1 float1; +typedef __device_builtin__ struct float2 float2; +typedef __device_builtin__ struct float3 float3; +typedef __device_builtin__ struct float4 float4; +typedef __device_builtin__ struct longlong1 longlong1; +typedef __device_builtin__ struct ulonglong1 ulonglong1; +typedef __device_builtin__ struct longlong2 longlong2; +typedef __device_builtin__ struct ulonglong2 ulonglong2; +typedef __device_builtin__ struct longlong3 longlong3; +typedef __device_builtin__ struct ulonglong3 ulonglong3; +typedef __device_builtin__ struct longlong4 longlong4; +typedef __device_builtin__ struct ulonglong4 ulonglong4; +typedef __device_builtin__ struct double1 double1; +typedef __device_builtin__ struct double2 double2; +typedef __device_builtin__ struct double3 double3; +typedef __device_builtin__ struct double4 double4; + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +struct __device_builtin__ dim3 +{ + unsigned int x, y, z; +#if defined(__cplusplus) +#if __cplusplus >= 201103L + __host__ __device__ constexpr dim3(unsigned int vx = 1, unsigned int vy = 1, unsigned int vz = 1) : x(vx), y(vy), z(vz) {} + __host__ __device__ constexpr dim3(uint3 v) : x(v.x), y(v.y), z(v.z) {} + __host__ __device__ constexpr operator uint3(void) const { return uint3{x, y, z}; } +#else + __host__ __device__ dim3(unsigned int vx = 1, unsigned int vy = 1, unsigned int vz = 1) : x(vx), y(vy), z(vz) {} + __host__ __device__ dim3(uint3 v) : x(v.x), y(v.y), z(v.z) {} + __host__ __device__ operator uint3(void) const { uint3 t; t.x = x; t.y = y; t.z = z; return t; } +#endif +#endif /* __cplusplus */ +}; + +typedef __device_builtin__ struct dim3 dim3; + +#undef __cuda_builtin_vector_align8 + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_VECTOR_TYPES_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_VECTOR_TYPES_H__ +#endif + +#endif /* !__VECTOR_TYPES_H__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/include/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/include/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e5ecb6f8849503a6fc22e5a8740359caa21a7c7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/include/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_backend_v8.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_backend_v8.h new file mode 100644 index 0000000000000000000000000000000000000000..b0f41de3b1e87286037ed7d0351057d93287d88f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_backend_v8.h @@ -0,0 +1,608 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CUDNN_BACKEND_H_ +#define _CUDNN_BACKEND_H_ + +/* + * The content in this header file is under development to be included in cudnn.h in the future + * Production code should have all include of this header file remove. + */ + +#include "cudnn_ops_infer.h" +#include "cudnn_cnn_infer.h" + +/* NOTE: definition in extern "C" to be copied later to public header */ +#if defined(__cplusplus) +extern "C" { +#endif + +typedef void *cudnnBackendDescriptor_t; + +typedef struct cudnnFractionStruct { + int64_t numerator; + int64_t denominator; +} cudnnFraction_t; + +typedef enum { + CUDNN_POINTWISE_ADD = 0, + CUDNN_POINTWISE_ADD_SQUARE = 5, + CUDNN_POINTWISE_DIV = 6, + CUDNN_POINTWISE_MAX = 3, + CUDNN_POINTWISE_MIN = 2, + CUDNN_POINTWISE_MOD = 7, + CUDNN_POINTWISE_MUL = 1, + CUDNN_POINTWISE_POW = 8, + CUDNN_POINTWISE_SUB = 9, + + CUDNN_POINTWISE_ABS = 10, + CUDNN_POINTWISE_CEIL = 11, + CUDNN_POINTWISE_COS = 12, + CUDNN_POINTWISE_EXP = 13, + CUDNN_POINTWISE_FLOOR = 14, + CUDNN_POINTWISE_LOG = 15, + CUDNN_POINTWISE_NEG = 16, + CUDNN_POINTWISE_RSQRT = 17, + CUDNN_POINTWISE_SIN = 18, + CUDNN_POINTWISE_SQRT = 4, + CUDNN_POINTWISE_TAN = 19, + CUDNN_POINTWISE_ERF = 20, + CUDNN_POINTWISE_IDENTITY = 21, + CUDNN_POINTWISE_RECIPROCAL = 22, + + CUDNN_POINTWISE_RELU_FWD = 100, + CUDNN_POINTWISE_TANH_FWD = 101, + CUDNN_POINTWISE_SIGMOID_FWD = 102, + CUDNN_POINTWISE_ELU_FWD = 103, + CUDNN_POINTWISE_GELU_FWD = 104, + CUDNN_POINTWISE_SOFTPLUS_FWD = 105, + CUDNN_POINTWISE_SWISH_FWD = 106, + CUDNN_POINTWISE_GELU_APPROX_TANH_FWD = 107, + + CUDNN_POINTWISE_RELU_BWD = 200, + CUDNN_POINTWISE_TANH_BWD = 201, + CUDNN_POINTWISE_SIGMOID_BWD = 202, + CUDNN_POINTWISE_ELU_BWD = 203, + CUDNN_POINTWISE_GELU_BWD = 204, + CUDNN_POINTWISE_SOFTPLUS_BWD = 205, + CUDNN_POINTWISE_SWISH_BWD = 206, + CUDNN_POINTWISE_GELU_APPROX_TANH_BWD = 207, + + CUDNN_POINTWISE_CMP_EQ = 300, + CUDNN_POINTWISE_CMP_NEQ = 301, + CUDNN_POINTWISE_CMP_GT = 302, + CUDNN_POINTWISE_CMP_GE = 303, + CUDNN_POINTWISE_CMP_LT = 304, + CUDNN_POINTWISE_CMP_LE = 305, + + CUDNN_POINTWISE_LOGICAL_AND = 400, + CUDNN_POINTWISE_LOGICAL_OR = 401, + CUDNN_POINTWISE_LOGICAL_NOT = 402, + + CUDNN_POINTWISE_GEN_INDEX = 501, + + CUDNN_POINTWISE_BINARY_SELECT = 601, +} cudnnPointwiseMode_t; + +typedef enum { + CUDNN_RESAMPLE_NEAREST = 0, + CUDNN_RESAMPLE_BILINEAR = 1, + CUDNN_RESAMPLE_AVGPOOL = 2, + CUDNN_RESAMPLE_AVGPOOL_INCLUDE_PADDING = 2, + CUDNN_RESAMPLE_AVGPOOL_EXCLUDE_PADDING = 4, + CUDNN_RESAMPLE_MAXPOOL = 3, +} cudnnResampleMode_t; + +typedef enum { + CUDNN_SIGNAL_SET = 0, + CUDNN_SIGNAL_WAIT = 1, +} cudnnSignalMode_t; + +typedef enum { + CUDNN_GENSTATS_SUM_SQSUM = 0, +} cudnnGenStatsMode_t; + +typedef enum { + CUDNN_BN_FINALIZE_STATISTICS_TRAINING = 0, + CUDNN_BN_FINALIZE_STATISTICS_INFERENCE = 1, +} cudnnBnFinalizeStatsMode_t; + +typedef enum { + CUDNN_RNG_DISTRIBUTION_BERNOULLI, + CUDNN_RNG_DISTRIBUTION_UNIFORM, + CUDNN_RNG_DISTRIBUTION_NORMAL, +} cudnnRngDistribution_t; + +typedef enum { + CUDNN_ATTR_POINTWISE_MODE = 0, + CUDNN_ATTR_POINTWISE_MATH_PREC = 1, + CUDNN_ATTR_POINTWISE_NAN_PROPAGATION = 2, + CUDNN_ATTR_POINTWISE_RELU_LOWER_CLIP = 3, + CUDNN_ATTR_POINTWISE_RELU_UPPER_CLIP = 4, + CUDNN_ATTR_POINTWISE_RELU_LOWER_CLIP_SLOPE = 5, + CUDNN_ATTR_POINTWISE_ELU_ALPHA = 6, + CUDNN_ATTR_POINTWISE_SOFTPLUS_BETA = 7, + CUDNN_ATTR_POINTWISE_SWISH_BETA = 8, + CUDNN_ATTR_POINTWISE_AXIS = 9, + + CUDNN_ATTR_CONVOLUTION_COMP_TYPE = 100, + CUDNN_ATTR_CONVOLUTION_CONV_MODE = 101, + CUDNN_ATTR_CONVOLUTION_DILATIONS = 102, + CUDNN_ATTR_CONVOLUTION_FILTER_STRIDES = 103, + CUDNN_ATTR_CONVOLUTION_POST_PADDINGS = 104, + CUDNN_ATTR_CONVOLUTION_PRE_PADDINGS = 105, + CUDNN_ATTR_CONVOLUTION_SPATIAL_DIMS = 106, + + CUDNN_ATTR_ENGINEHEUR_MODE = 200, + CUDNN_ATTR_ENGINEHEUR_OPERATION_GRAPH = 201, + CUDNN_ATTR_ENGINEHEUR_RESULTS = 202, + + CUDNN_ATTR_ENGINECFG_ENGINE = 300, + CUDNN_ATTR_ENGINECFG_INTERMEDIATE_INFO = 301, + CUDNN_ATTR_ENGINECFG_KNOB_CHOICES = 302, + + CUDNN_ATTR_EXECUTION_PLAN_HANDLE = 400, + CUDNN_ATTR_EXECUTION_PLAN_ENGINE_CONFIG = 401, + CUDNN_ATTR_EXECUTION_PLAN_WORKSPACE_SIZE = 402, + CUDNN_ATTR_EXECUTION_PLAN_COMPUTED_INTERMEDIATE_UIDS = 403, + CUDNN_ATTR_EXECUTION_PLAN_RUN_ONLY_INTERMEDIATE_UIDS = 404, + CUDNN_ATTR_EXECUTION_PLAN_JSON_REPRESENTATION = 405, + + CUDNN_ATTR_INTERMEDIATE_INFO_UNIQUE_ID = 500, + CUDNN_ATTR_INTERMEDIATE_INFO_SIZE = 501, + CUDNN_ATTR_INTERMEDIATE_INFO_DEPENDENT_DATA_UIDS = 502, + CUDNN_ATTR_INTERMEDIATE_INFO_DEPENDENT_ATTRIBUTES = 503, + + CUDNN_ATTR_KNOB_CHOICE_KNOB_TYPE = 600, + CUDNN_ATTR_KNOB_CHOICE_KNOB_VALUE = 601, + + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_ALPHA = 700, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_BETA = 701, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_CONV_DESC = 702, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_W = 703, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_X = 704, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_Y = 705, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_ALPHA = 706, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_BETA = 707, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_CONV_DESC = 708, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_W = 709, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_DX = 710, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_DY = 711, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_ALPHA = 712, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_BETA = 713, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_CONV_DESC = 714, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_DW = 715, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_X = 716, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_DY = 717, + + CUDNN_ATTR_OPERATION_POINTWISE_PW_DESCRIPTOR = 750, + CUDNN_ATTR_OPERATION_POINTWISE_XDESC = 751, + CUDNN_ATTR_OPERATION_POINTWISE_BDESC = 752, + CUDNN_ATTR_OPERATION_POINTWISE_YDESC = 753, + CUDNN_ATTR_OPERATION_POINTWISE_ALPHA1 = 754, + CUDNN_ATTR_OPERATION_POINTWISE_ALPHA2 = 755, + CUDNN_ATTR_OPERATION_POINTWISE_DXDESC = 756, + CUDNN_ATTR_OPERATION_POINTWISE_DYDESC = 757, + CUDNN_ATTR_OPERATION_POINTWISE_TDESC = 758, + + CUDNN_ATTR_OPERATION_GENSTATS_MODE = 770, + CUDNN_ATTR_OPERATION_GENSTATS_MATH_PREC = 771, + CUDNN_ATTR_OPERATION_GENSTATS_XDESC = 772, + CUDNN_ATTR_OPERATION_GENSTATS_SUMDESC = 773, + CUDNN_ATTR_OPERATION_GENSTATS_SQSUMDESC = 774, + + CUDNN_ATTR_OPERATION_BN_FINALIZE_STATS_MODE = 780, + CUDNN_ATTR_OPERATION_BN_FINALIZE_MATH_PREC = 781, + CUDNN_ATTR_OPERATION_BN_FINALIZE_Y_SUM_DESC = 782, + CUDNN_ATTR_OPERATION_BN_FINALIZE_Y_SQ_SUM_DESC = 783, + CUDNN_ATTR_OPERATION_BN_FINALIZE_SCALE_DESC = 784, + CUDNN_ATTR_OPERATION_BN_FINALIZE_BIAS_DESC = 785, + CUDNN_ATTR_OPERATION_BN_FINALIZE_PREV_RUNNING_MEAN_DESC = 786, + CUDNN_ATTR_OPERATION_BN_FINALIZE_PREV_RUNNING_VAR_DESC = 787, + CUDNN_ATTR_OPERATION_BN_FINALIZE_UPDATED_RUNNING_MEAN_DESC = 788, + CUDNN_ATTR_OPERATION_BN_FINALIZE_UPDATED_RUNNING_VAR_DESC = 789, + CUDNN_ATTR_OPERATION_BN_FINALIZE_SAVED_MEAN_DESC = 790, + CUDNN_ATTR_OPERATION_BN_FINALIZE_SAVED_INV_STD_DESC = 791, + CUDNN_ATTR_OPERATION_BN_FINALIZE_EQ_SCALE_DESC = 792, + CUDNN_ATTR_OPERATION_BN_FINALIZE_EQ_BIAS_DESC = 793, + CUDNN_ATTR_OPERATION_BN_FINALIZE_ACCUM_COUNT_DESC = 794, + CUDNN_ATTR_OPERATION_BN_FINALIZE_EPSILON_DESC = 795, + CUDNN_ATTR_OPERATION_BN_FINALIZE_EXP_AVERATE_FACTOR_DESC = 796, + + CUDNN_ATTR_OPERATIONGRAPH_HANDLE = 800, + CUDNN_ATTR_OPERATIONGRAPH_OPS = 801, + CUDNN_ATTR_OPERATIONGRAPH_ENGINE_GLOBAL_COUNT = 802, + + CUDNN_ATTR_TENSOR_BYTE_ALIGNMENT = 900, + CUDNN_ATTR_TENSOR_DATA_TYPE = 901, + CUDNN_ATTR_TENSOR_DIMENSIONS = 902, + CUDNN_ATTR_TENSOR_STRIDES = 903, + CUDNN_ATTR_TENSOR_VECTOR_COUNT = 904, + CUDNN_ATTR_TENSOR_VECTORIZED_DIMENSION = 905, + CUDNN_ATTR_TENSOR_UNIQUE_ID = 906, + CUDNN_ATTR_TENSOR_IS_VIRTUAL = 907, + CUDNN_ATTR_TENSOR_IS_BY_VALUE = 908, + CUDNN_ATTR_TENSOR_REORDERING_MODE = 909, + CUDNN_ATTR_TENSOR_RAGGED_OFFSET_DESC = 913, + + CUDNN_ATTR_VARIANT_PACK_UNIQUE_IDS = 1000, + CUDNN_ATTR_VARIANT_PACK_DATA_POINTERS = 1001, + CUDNN_ATTR_VARIANT_PACK_INTERMEDIATES = 1002, + CUDNN_ATTR_VARIANT_PACK_WORKSPACE = 1003, + + CUDNN_ATTR_LAYOUT_INFO_TENSOR_UID = 1100, + CUDNN_ATTR_LAYOUT_INFO_TYPES = 1101, + + CUDNN_ATTR_KNOB_INFO_TYPE = 1200, + CUDNN_ATTR_KNOB_INFO_MAXIMUM_VALUE = 1201, + CUDNN_ATTR_KNOB_INFO_MINIMUM_VALUE = 1202, + CUDNN_ATTR_KNOB_INFO_STRIDE = 1203, + + CUDNN_ATTR_ENGINE_OPERATION_GRAPH = 1300, + CUDNN_ATTR_ENGINE_GLOBAL_INDEX = 1301, + CUDNN_ATTR_ENGINE_KNOB_INFO = 1302, + CUDNN_ATTR_ENGINE_NUMERICAL_NOTE = 1303, + CUDNN_ATTR_ENGINE_LAYOUT_INFO = 1304, + CUDNN_ATTR_ENGINE_BEHAVIOR_NOTE = 1305, + + CUDNN_ATTR_MATMUL_COMP_TYPE = 1500, + CUDNN_ATTR_MATMUL_PADDING_VALUE = 1503, + + CUDNN_ATTR_OPERATION_MATMUL_ADESC = 1520, + CUDNN_ATTR_OPERATION_MATMUL_BDESC = 1521, + CUDNN_ATTR_OPERATION_MATMUL_CDESC = 1522, + CUDNN_ATTR_OPERATION_MATMUL_DESC = 1523, + CUDNN_ATTR_OPERATION_MATMUL_IRREGULARLY_STRIDED_BATCH_COUNT = 1524, + CUDNN_ATTR_OPERATION_MATMUL_GEMM_M_OVERRIDE_DESC = 1525, + CUDNN_ATTR_OPERATION_MATMUL_GEMM_N_OVERRIDE_DESC = 1526, + CUDNN_ATTR_OPERATION_MATMUL_GEMM_K_OVERRIDE_DESC = 1527, + + CUDNN_ATTR_REDUCTION_OPERATOR = 1600, + CUDNN_ATTR_REDUCTION_COMP_TYPE = 1601, + + CUDNN_ATTR_OPERATION_REDUCTION_XDESC = 1610, + CUDNN_ATTR_OPERATION_REDUCTION_YDESC = 1611, + CUDNN_ATTR_OPERATION_REDUCTION_DESC = 1612, + + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_MATH_PREC = 1620, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_MEAN_DESC = 1621, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_INVSTD_DESC = 1622, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_BN_SCALE_DESC = 1623, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_X_DESC = 1624, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DY_DESC = 1625, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DBN_SCALE_DESC = 1626, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DBN_BIAS_DESC = 1627, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_DY_SCALE_DESC = 1628, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_X_SCALE_DESC = 1629, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_BIAS = 1630, + + CUDNN_ATTR_RESAMPLE_MODE = 1700, + CUDNN_ATTR_RESAMPLE_COMP_TYPE = 1701, + CUDNN_ATTR_RESAMPLE_SPATIAL_DIMS = 1702, + CUDNN_ATTR_RESAMPLE_POST_PADDINGS = 1703, + CUDNN_ATTR_RESAMPLE_PRE_PADDINGS = 1704, + CUDNN_ATTR_RESAMPLE_STRIDES = 1705, + CUDNN_ATTR_RESAMPLE_WINDOW_DIMS = 1706, + CUDNN_ATTR_RESAMPLE_NAN_PROPAGATION = 1707, + CUDNN_ATTR_RESAMPLE_PADDING_MODE = 1708, + + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_XDESC = 1710, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_YDESC = 1711, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_IDXDESC = 1712, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_ALPHA = 1713, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_BETA = 1714, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_DESC = 1716, + + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DXDESC = 1720, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DYDESC = 1721, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_IDXDESC = 1722, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_ALPHA = 1723, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_BETA = 1724, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DESC = 1725, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_XDESC = 1726, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_YDESC = 1727, + + CUDNN_ATTR_OPERATION_CONCAT_AXIS = 1800, + CUDNN_ATTR_OPERATION_CONCAT_INPUT_DESCS = 1801, + CUDNN_ATTR_OPERATION_CONCAT_INPLACE_INDEX = 1802, + CUDNN_ATTR_OPERATION_CONCAT_OUTPUT_DESC = 1803, + + CUDNN_ATTR_OPERATION_SIGNAL_MODE = 1900, + CUDNN_ATTR_OPERATION_SIGNAL_FLAGDESC = 1901, + CUDNN_ATTR_OPERATION_SIGNAL_VALUE = 1902, + CUDNN_ATTR_OPERATION_SIGNAL_XDESC = 1903, + CUDNN_ATTR_OPERATION_SIGNAL_YDESC = 1904, + + CUDNN_ATTR_OPERATION_NORM_FWD_MODE = 2000, + CUDNN_ATTR_OPERATION_NORM_FWD_PHASE = 2001, + CUDNN_ATTR_OPERATION_NORM_FWD_XDESC = 2002, + CUDNN_ATTR_OPERATION_NORM_FWD_MEAN_DESC = 2003, + CUDNN_ATTR_OPERATION_NORM_FWD_INV_VARIANCE_DESC = 2004, + CUDNN_ATTR_OPERATION_NORM_FWD_SCALE_DESC = 2005, + CUDNN_ATTR_OPERATION_NORM_FWD_BIAS_DESC = 2006, + CUDNN_ATTR_OPERATION_NORM_FWD_EPSILON_DESC = 2007, + CUDNN_ATTR_OPERATION_NORM_FWD_EXP_AVG_FACTOR_DESC = 2008, + CUDNN_ATTR_OPERATION_NORM_FWD_INPUT_RUNNING_MEAN_DESC = 2009, + CUDNN_ATTR_OPERATION_NORM_FWD_INPUT_RUNNING_VAR_DESC = 2010, + CUDNN_ATTR_OPERATION_NORM_FWD_OUTPUT_RUNNING_MEAN_DESC = 2011, + CUDNN_ATTR_OPERATION_NORM_FWD_OUTPUT_RUNNING_VAR_DESC = 2012, + CUDNN_ATTR_OPERATION_NORM_FWD_YDESC = 2013, + CUDNN_ATTR_OPERATION_NORM_FWD_PEER_STAT_DESCS = 2014, + + CUDNN_ATTR_OPERATION_NORM_BWD_MODE = 2100, + CUDNN_ATTR_OPERATION_NORM_BWD_XDESC = 2101, + CUDNN_ATTR_OPERATION_NORM_BWD_MEAN_DESC = 2102, + CUDNN_ATTR_OPERATION_NORM_BWD_INV_VARIANCE_DESC = 2103, + CUDNN_ATTR_OPERATION_NORM_BWD_DYDESC = 2104, + CUDNN_ATTR_OPERATION_NORM_BWD_SCALE_DESC = 2105, + CUDNN_ATTR_OPERATION_NORM_BWD_EPSILON_DESC = 2106, + CUDNN_ATTR_OPERATION_NORM_BWD_DSCALE_DESC = 2107, + CUDNN_ATTR_OPERATION_NORM_BWD_DBIAS_DESC = 2108, + CUDNN_ATTR_OPERATION_NORM_BWD_DXDESC = 2109, + CUDNN_ATTR_OPERATION_NORM_BWD_PEER_STAT_DESCS = 2110, + + CUDNN_ATTR_OPERATION_RESHAPE_XDESC = 2200, + CUDNN_ATTR_OPERATION_RESHAPE_YDESC = 2201, + + CUDNN_ATTR_RNG_DISTRIBUTION = 2300, + CUDNN_ATTR_RNG_NORMAL_DIST_MEAN = 2301, + CUDNN_ATTR_RNG_NORMAL_DIST_STANDARD_DEVIATION = 2302, + CUDNN_ATTR_RNG_UNIFORM_DIST_MAXIMUM = 2303, + CUDNN_ATTR_RNG_UNIFORM_DIST_MINIMUM = 2304, + CUDNN_ATTR_RNG_BERNOULLI_DIST_PROBABILITY = 2305, + + CUDNN_ATTR_OPERATION_RNG_YDESC = 2310, + CUDNN_ATTR_OPERATION_RNG_SEED = 2311, + CUDNN_ATTR_OPERATION_RNG_DESC = 2312, + CUDNN_ATTR_OPERATION_RNG_OFFSET_DESC = 2313, + +} cudnnBackendAttributeName_t; + +typedef enum { + CUDNN_TYPE_HANDLE = 0, + CUDNN_TYPE_DATA_TYPE, + CUDNN_TYPE_BOOLEAN, + CUDNN_TYPE_INT64, + CUDNN_TYPE_FLOAT, + CUDNN_TYPE_DOUBLE, + CUDNN_TYPE_VOID_PTR, + CUDNN_TYPE_CONVOLUTION_MODE, + CUDNN_TYPE_HEUR_MODE, + CUDNN_TYPE_KNOB_TYPE, + CUDNN_TYPE_NAN_PROPOGATION, + CUDNN_TYPE_NUMERICAL_NOTE, + CUDNN_TYPE_LAYOUT_TYPE, + CUDNN_TYPE_ATTRIB_NAME, + CUDNN_TYPE_POINTWISE_MODE, + CUDNN_TYPE_BACKEND_DESCRIPTOR, + CUDNN_TYPE_GENSTATS_MODE, + CUDNN_TYPE_BN_FINALIZE_STATS_MODE, + CUDNN_TYPE_REDUCTION_OPERATOR_TYPE, + CUDNN_TYPE_BEHAVIOR_NOTE, + CUDNN_TYPE_TENSOR_REORDERING_MODE, + CUDNN_TYPE_RESAMPLE_MODE, + CUDNN_TYPE_PADDING_MODE, + CUDNN_TYPE_INT32, + CUDNN_TYPE_CHAR, + CUDNN_TYPE_SIGNAL_MODE, + CUDNN_TYPE_FRACTION, + CUDNN_TYPE_NORM_MODE, + CUDNN_TYPE_NORM_FWD_PHASE, + CUDNN_TYPE_RNG_DISTRIBUTION +} cudnnBackendAttributeType_t; + +typedef enum { + CUDNN_BACKEND_POINTWISE_DESCRIPTOR = 0, + CUDNN_BACKEND_CONVOLUTION_DESCRIPTOR, + CUDNN_BACKEND_ENGINE_DESCRIPTOR, + CUDNN_BACKEND_ENGINECFG_DESCRIPTOR, + CUDNN_BACKEND_ENGINEHEUR_DESCRIPTOR, + CUDNN_BACKEND_EXECUTION_PLAN_DESCRIPTOR, + CUDNN_BACKEND_INTERMEDIATE_INFO_DESCRIPTOR, + CUDNN_BACKEND_KNOB_CHOICE_DESCRIPTOR, + CUDNN_BACKEND_KNOB_INFO_DESCRIPTOR, + CUDNN_BACKEND_LAYOUT_INFO_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_CONVOLUTION_FORWARD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_FILTER_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_DATA_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_POINTWISE_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_GEN_STATS_DESCRIPTOR, + CUDNN_BACKEND_OPERATIONGRAPH_DESCRIPTOR, + CUDNN_BACKEND_VARIANT_PACK_DESCRIPTOR, + CUDNN_BACKEND_TENSOR_DESCRIPTOR, + CUDNN_BACKEND_MATMUL_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_MATMUL_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_BN_FINALIZE_STATISTICS_DESCRIPTOR, + CUDNN_BACKEND_REDUCTION_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_REDUCTION_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_BN_BWD_WEIGHTS_DESCRIPTOR, + CUDNN_BACKEND_RESAMPLE_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_RESAMPLE_FWD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_RESAMPLE_BWD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_CONCAT_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_SIGNAL_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_NORM_FORWARD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_NORM_BACKWARD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_RESHAPE_DESCRIPTOR, + CUDNN_BACKEND_RNG_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_RNG_DESCRIPTOR +} cudnnBackendDescriptorType_t; + +typedef enum { + CUDNN_NUMERICAL_NOTE_TENSOR_CORE = 0, + CUDNN_NUMERICAL_NOTE_DOWN_CONVERT_INPUTS, + CUDNN_NUMERICAL_NOTE_REDUCED_PRECISION_REDUCTION, + CUDNN_NUMERICAL_NOTE_FFT, + CUDNN_NUMERICAL_NOTE_NONDETERMINISTIC, + CUDNN_NUMERICAL_NOTE_WINOGRAD, + CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_4x4, + CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_6x6, + CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_13x13, + CUDNN_NUMERICAL_NOTE_TYPE_COUNT, +} cudnnBackendNumericalNote_t; + +typedef enum { + CUDNN_BEHAVIOR_NOTE_RUNTIME_COMPILATION = 0, + CUDNN_BEHAVIOR_NOTE_REQUIRES_FILTER_INT8x32_REORDER = 1, + CUDNN_BEHAVIOR_NOTE_REQUIRES_BIAS_INT8x32_REORDER = 2, + CUDNN_BEHAVIOR_NOTE_TYPE_COUNT, +} cudnnBackendBehaviorNote_t; + +typedef enum { + CUDNN_KNOB_TYPE_SPLIT_K = 0, + CUDNN_KNOB_TYPE_SWIZZLE = 1, + CUDNN_KNOB_TYPE_TILE_SIZE = 2, + CUDNN_KNOB_TYPE_USE_TEX = 3, + CUDNN_KNOB_TYPE_EDGE = 4, + CUDNN_KNOB_TYPE_KBLOCK = 5, + CUDNN_KNOB_TYPE_LDGA = 6, + CUDNN_KNOB_TYPE_LDGB = 7, + CUDNN_KNOB_TYPE_CHUNK_K = 8, + CUDNN_KNOB_TYPE_SPLIT_H = 9, + CUDNN_KNOB_TYPE_WINO_TILE = 10, + CUDNN_KNOB_TYPE_MULTIPLY = 11, + CUDNN_KNOB_TYPE_SPLIT_K_BUF = 12, + CUDNN_KNOB_TYPE_TILEK = 13, + CUDNN_KNOB_TYPE_STAGES = 14, + CUDNN_KNOB_TYPE_REDUCTION_MODE = 15, + CUDNN_KNOB_TYPE_CTA_SPLIT_K_MODE = 16, + CUDNN_KNOB_TYPE_SPLIT_K_SLC = 17, + CUDNN_KNOB_TYPE_IDX_MODE = 18, + CUDNN_KNOB_TYPE_SLICED = 19, + CUDNN_KNOB_TYPE_SPLIT_RS = 20, + CUDNN_KNOB_TYPE_SINGLEBUFFER = 21, + CUDNN_KNOB_TYPE_LDGC = 22, + CUDNN_KNOB_TYPE_SPECFILT = 23, + CUDNN_KNOB_TYPE_KERNEL_CFG = 24, + CUDNN_KNOB_TYPE_WORKSPACE = 25, + CUDNN_KNOB_TYPE_TILE_CGA = 26, + CUDNN_KNOB_TYPE_TILE_CGA_M = 27, + CUDNN_KNOB_TYPE_TILE_CGA_N = 28, + CUDNN_KNOB_TYPE_BLOCK_SIZE = 29, + CUDNN_KNOB_TYPE_OCCUPANCY = 30, + CUDNN_KNOB_TYPE_ARRAY_SIZE_PER_THREAD = 31, + CUDNN_KNOB_TYPE_NUM_C_PER_BLOCK = 32, + CUDNN_KNOB_TYPE_COUNTS, +} cudnnBackendKnobType_t; + +typedef enum { + CUDNN_LAYOUT_TYPE_PREFERRED_NCHW = 0, + CUDNN_LAYOUT_TYPE_PREFERRED_NHWC = 1, + CUDNN_LAYOUT_TYPE_PREFERRED_PAD4CK = 2, + CUDNN_LAYOUT_TYPE_PREFERRED_PAD8CK = 3, + CUDNN_LAYOUT_TYPE_COUNT = 4, +} cudnnBackendLayoutType_t; + +typedef enum { + CUDNN_HEUR_MODE_INSTANT = 0, + CUDNN_HEUR_MODE_B = 1, + CUDNN_HEUR_MODE_FALLBACK = 2, + CUDNN_HEUR_MODE_A = 3, + CUDNN_HEUR_MODES_COUNT = 4, +} cudnnBackendHeurMode_t; + +typedef enum { + CUDNN_TENSOR_REORDERING_NONE = 0, + CUDNN_TENSOR_REORDERING_INT8x32 = 1, + CUDNN_TENSOR_REORDERING_F16x16 = 2, +} cudnnBackendTensorReordering_t; + +typedef enum { + CUDNN_ZERO_PAD = 0, + CUDNN_NEG_INF_PAD = 1, + CUDNN_EDGE_VAL_PAD = 2, +} cudnnPaddingMode_t; + +typedef enum { + CUDNN_LAYER_NORM = 0, + CUDNN_INSTANCE_NORM = 1, + CUDNN_BATCH_NORM = 2, + CUDNN_GROUP_NORM = 3, +} cudnnBackendNormMode_t; + +typedef enum { + CUDNN_NORM_FWD_INFERENCE = 0, + CUDNN_NORM_FWD_TRAINING = 1, +} cudnnBackendNormFwdPhase_t; + +cudnnStatus_t CUDNNWINAPI +cudnnBackendCreateDescriptor(cudnnBackendDescriptorType_t descriptorType, cudnnBackendDescriptor_t *descriptor); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendDestroyDescriptor(cudnnBackendDescriptor_t descriptor); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendInitialize(cudnnBackendDescriptor_t descriptor); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendFinalize(cudnnBackendDescriptor_t descriptor); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendSetAttribute(cudnnBackendDescriptor_t descriptor, + cudnnBackendAttributeName_t attributeName, + cudnnBackendAttributeType_t attributeType, + int64_t elementCount, + const void *arrayOfElements); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendGetAttribute(cudnnBackendDescriptor_t const descriptor, + cudnnBackendAttributeName_t attributeName, + cudnnBackendAttributeType_t attributeType, + int64_t requestedElementCount, + int64_t *elementCount, + void *arrayOfElements); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendExecute(cudnnHandle_t handle, cudnnBackendDescriptor_t executionPlan, cudnnBackendDescriptor_t variantPack); + +#if defined(__cplusplus) +} +#endif + +#endif /* _CUDNN_BACKEND_H_ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_train_v8.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_train_v8.h new file mode 100644 index 0000000000000000000000000000000000000000..ee0358b51d8b2c48880cf2f3cde7adf83c112336 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_train_v8.h @@ -0,0 +1,219 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * cudnn_cnn_train : cuDNN's basic definitions and inference CNN functions. + */ + +#pragma once +#include +#include + +#include "cudnn_version.h" +#include "cudnn_ops_infer.h" +#include "cudnn_ops_train.h" +#include "cudnn_cnn_infer.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_CNN_TRAIN_MAJOR 8 +#define CUDNN_CNN_TRAIN_MINOR 9 +#define CUDNN_CNN_TRAIN_PATCH 2 + +#if (CUDNN_CNN_TRAIN_MAJOR != CUDNN_MAJOR) || (CUDNN_CNN_TRAIN_MINOR != CUDNN_MINOR) || \ + (CUDNN_CNN_TRAIN_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN CNN INFER!!! +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +/* helper function to provide the convolution backward filter algo that fit best the requirement */ + +typedef struct cudnnConvolutionBwdFilterAlgoPerfStruct { + cudnnConvolutionBwdFilterAlgo_t algo; + cudnnStatus_t status; + float time; + size_t memory; + cudnnDeterminism_t determinism; + cudnnMathType_t mathType; + int reserved[3]; +} cudnnConvolutionBwdFilterAlgoPerf_t; + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardFilterAlgorithmMaxCount(cudnnHandle_t handle, int *count); + +cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionBackwardFilterAlgorithm(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnFilterDescriptor_t dwDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdFilterAlgoPerf_t *perfResults); + +cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionBackwardFilterAlgorithmEx(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnTensorDescriptor_t dyDesc, + const void *y, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnFilterDescriptor_t dwDesc, + void *dw, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdFilterAlgoPerf_t *perfResults, + void *workSpace, + size_t workSpaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardFilterAlgorithm_v7(cudnnHandle_t handle, + const cudnnTensorDescriptor_t srcDesc, + const cudnnTensorDescriptor_t diffDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnFilterDescriptor_t gradDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdFilterAlgoPerf_t *perfResults); + +/* + * convolution algorithm (which requires potentially some workspace) + */ + +/* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/ +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnFilterDescriptor_t gradDesc, + cudnnConvolutionBwdFilterAlgo_t algo, + size_t *sizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnConvolutionBackwardFilter(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnConvolutionDescriptor_t convDesc, + cudnnConvolutionBwdFilterAlgo_t algo, + void *workSpace, + size_t workSpaceSizeInBytes, + const void *beta, + const cudnnFilterDescriptor_t dwDesc, + void *dw); + +/* Function to compute the bias gradient for batch convolution */ +cudnnStatus_t CUDNNWINAPI +cudnnConvolutionBackwardBias(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const void *beta, + const cudnnTensorDescriptor_t dbDesc, + void *db); + +cudnnStatus_t CUDNNWINAPI +cudnnCreateFusedOpsConstParamPack(cudnnFusedOpsConstParamPack_t *constPack, cudnnFusedOps_t ops); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyFusedOpsConstParamPack(cudnnFusedOpsConstParamPack_t constPack); + +cudnnStatus_t CUDNNWINAPI +cudnnSetFusedOpsConstParamPackAttribute(cudnnFusedOpsConstParamPack_t constPack, + cudnnFusedOpsConstParamLabel_t paramLabel, + const void *param); + +cudnnStatus_t CUDNNWINAPI +cudnnGetFusedOpsConstParamPackAttribute(const cudnnFusedOpsConstParamPack_t constPack, + cudnnFusedOpsConstParamLabel_t paramLabel, + void *param, + int *isNULL); + +cudnnStatus_t CUDNNWINAPI +cudnnCreateFusedOpsVariantParamPack(cudnnFusedOpsVariantParamPack_t *varPack, cudnnFusedOps_t ops); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyFusedOpsVariantParamPack(cudnnFusedOpsVariantParamPack_t varPack); + +cudnnStatus_t CUDNNWINAPI +cudnnSetFusedOpsVariantParamPackAttribute(cudnnFusedOpsVariantParamPack_t varPack, + cudnnFusedOpsVariantParamLabel_t paramLabel, + void *ptr); + +cudnnStatus_t CUDNNWINAPI +cudnnGetFusedOpsVariantParamPackAttribute(const cudnnFusedOpsVariantParamPack_t varPack, + cudnnFusedOpsVariantParamLabel_t paramLabel, + void *ptr); + +cudnnStatus_t CUDNNWINAPI +cudnnCreateFusedOpsPlan(cudnnFusedOpsPlan_t *plan, cudnnFusedOps_t ops); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyFusedOpsPlan(cudnnFusedOpsPlan_t plan); + +cudnnStatus_t CUDNNWINAPI +cudnnMakeFusedOpsPlan(cudnnHandle_t handle, + cudnnFusedOpsPlan_t plan, + const cudnnFusedOpsConstParamPack_t constPack, + size_t *workspaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnFusedOpsExecute(cudnnHandle_t handle, const cudnnFusedOpsPlan_t plan, cudnnFusedOpsVariantParamPack_t varPack); + +cudnnStatus_t CUDNNWINAPI +cudnnCnnTrainVersionCheck(void); + +#if defined(__cplusplus) +} +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_infer_v8.h b/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_infer_v8.h new file mode 100644 index 0000000000000000000000000000000000000000..79ba34cc1a1557462d49b63a9cb52d9bfe149693 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_infer_v8.h @@ -0,0 +1,1183 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * cudnn_ops_infer : cuDNN's basic definitions and inference operations. + */ + +#if !defined(CUDNN_OPS_INFER_H_) +#define CUDNN_OPS_INFER_H_ + +#include +#include + +#include "cudnn_version.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_OPS_INFER_MAJOR 8 +#define CUDNN_OPS_INFER_MINOR 9 +#define CUDNN_OPS_INFER_PATCH 2 + +#if (CUDNN_OPS_INFER_MAJOR != CUDNN_MAJOR) || (CUDNN_OPS_INFER_MINOR != CUDNN_MINOR) || \ + (CUDNN_OPS_INFER_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN OPS INFER!!! +#endif + +#ifndef CUDNNWINAPI +#ifdef _WIN32 +#define CUDNNWINAPI __stdcall +#else +#define CUDNNWINAPI +#endif +#endif + +/* Warnings for deprecated API-s are enabled using the CUDNN_WARN_DEPRECATED macro */ +#if defined(CUDNN_WARN_DEPRECATED) && (defined(__GNUC__) || defined(__clang__)) +/* GCC, Intel C/C++, Cray C/C++, CLANG, IBM XL C/C++ little endian */ +#define CUDNN_DEPRECATED __attribute__((deprecated)) +#elif defined(CUDNN_WARN_DEPRECATED) && defined(_MSC_VER) +/* Microsoft Visual C++ */ +#define CUDNN_DEPRECATED __declspec(deprecated) +#elif defined(CUDNN_WARN_DEPRECATED) && (__cplusplus >= 201402L) +/* C++14 compilers */ +#define CUDNN_DEPRECATED [[deprecated]] +#else +/* No support for the deprecated attribute */ +#define CUDNN_DEPRECATED +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +struct cudnnContext; +typedef struct cudnnContext *cudnnHandle_t; + +size_t CUDNNWINAPI +cudnnGetVersion(void); + +size_t CUDNNWINAPI +cudnnGetMaxDeviceVersion(void); + +/* Returns CUDA Runtime version statically linked against cudnn */ +size_t CUDNNWINAPI +cudnnGetCudartVersion(void); + +/* + * CUDNN return codes + */ +typedef enum { + CUDNN_STATUS_SUCCESS = 0, + CUDNN_STATUS_NOT_INITIALIZED = 1, + CUDNN_STATUS_ALLOC_FAILED = 2, + CUDNN_STATUS_BAD_PARAM = 3, + CUDNN_STATUS_INTERNAL_ERROR = 4, + CUDNN_STATUS_INVALID_VALUE = 5, + CUDNN_STATUS_ARCH_MISMATCH = 6, + CUDNN_STATUS_MAPPING_ERROR = 7, + CUDNN_STATUS_EXECUTION_FAILED = 8, + CUDNN_STATUS_NOT_SUPPORTED = 9, + CUDNN_STATUS_LICENSE_ERROR = 10, + CUDNN_STATUS_RUNTIME_PREREQUISITE_MISSING = 11, + CUDNN_STATUS_RUNTIME_IN_PROGRESS = 12, + CUDNN_STATUS_RUNTIME_FP_OVERFLOW = 13, + CUDNN_STATUS_VERSION_MISMATCH = 14, +} cudnnStatus_t; + +/* human-readable error messages */ +const char *CUDNNWINAPI +cudnnGetErrorString(cudnnStatus_t status); + +/* Forward definition in this version only */ +typedef struct cudnnRuntimeTag_t cudnnRuntimeTag_t; + +typedef enum { + CUDNN_ERRQUERY_RAWCODE = 0, + CUDNN_ERRQUERY_NONBLOCKING = 1, + CUDNN_ERRQUERY_BLOCKING = 2, +} cudnnErrQueryMode_t; + +cudnnStatus_t CUDNNWINAPI +cudnnQueryRuntimeError(cudnnHandle_t handle, cudnnStatus_t *rstatus, cudnnErrQueryMode_t mode, cudnnRuntimeTag_t *tag); + +#ifndef __LIBRARY_TYPES_H__ + +typedef enum libraryPropertyType_t { MAJOR_VERSION, MINOR_VERSION, PATCH_LEVEL } libraryPropertyType; + +#endif + +cudnnStatus_t CUDNNWINAPI +cudnnGetProperty(libraryPropertyType type, int *value); + +cudnnStatus_t CUDNNWINAPI +cudnnCreate(cudnnHandle_t *handle); +cudnnStatus_t CUDNNWINAPI +cudnnDestroy(cudnnHandle_t handle); +cudnnStatus_t CUDNNWINAPI +cudnnSetStream(cudnnHandle_t handle, cudaStream_t streamId); +cudnnStatus_t CUDNNWINAPI +cudnnGetStream(cudnnHandle_t handle, cudaStream_t *streamId); + +/* Data structures to represent Image/Filter and the Neural Network Layer */ +typedef struct cudnnTensorStruct *cudnnTensorDescriptor_t; +typedef struct cudnnPoolingStruct *cudnnPoolingDescriptor_t; +typedef struct cudnnFilterStruct *cudnnFilterDescriptor_t; +typedef struct cudnnLRNStruct *cudnnLRNDescriptor_t; +typedef struct cudnnActivationStruct *cudnnActivationDescriptor_t; +typedef struct cudnnSpatialTransformerStruct *cudnnSpatialTransformerDescriptor_t; +typedef struct cudnnOpTensorStruct *cudnnOpTensorDescriptor_t; +typedef struct cudnnReduceTensorStruct *cudnnReduceTensorDescriptor_t; +typedef struct cudnnCTCLossStruct *cudnnCTCLossDescriptor_t; +typedef struct cudnnTensorTransformStruct *cudnnTensorTransformDescriptor_t; +/* + * CUDNN data type + */ +typedef enum { + CUDNN_DATA_FLOAT = 0, + CUDNN_DATA_DOUBLE = 1, + CUDNN_DATA_HALF = 2, + CUDNN_DATA_INT8 = 3, + CUDNN_DATA_INT32 = 4, + CUDNN_DATA_INT8x4 = 5, + CUDNN_DATA_UINT8 = 6, + CUDNN_DATA_UINT8x4 = 7, + CUDNN_DATA_INT8x32 = 8, + CUDNN_DATA_BFLOAT16 = 9, + CUDNN_DATA_INT64 = 10, + CUDNN_DATA_BOOLEAN = 11, + CUDNN_DATA_FP8_E4M3 = 12, + CUDNN_DATA_FP8_E5M2 = 13, + CUDNN_DATA_FAST_FLOAT_FOR_FP8 = 14, +} cudnnDataType_t; + +/* + * CUDNN math type + */ +typedef enum { + CUDNN_DEFAULT_MATH = 0, + CUDNN_TENSOR_OP_MATH = 1, + CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION = 2, + CUDNN_FMA_MATH = 3, +} cudnnMathType_t; + +/* + * CUDNN propagate Nan + */ +typedef enum { + CUDNN_NOT_PROPAGATE_NAN = 0, + CUDNN_PROPAGATE_NAN = 1, +} cudnnNanPropagation_t; + +/* + * CUDNN Determinism + */ +typedef enum { + CUDNN_NON_DETERMINISTIC = 0, + CUDNN_DETERMINISTIC = 1, +} cudnnDeterminism_t; + +/* Maximum supported number of tensor dimensions */ +#define CUDNN_DIM_MAX 8 + +/* Create an instance of a generic Tensor descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateTensorDescriptor(cudnnTensorDescriptor_t *tensorDesc); + +typedef enum { + CUDNN_TENSOR_NCHW = 0, /* row major (wStride = 1, hStride = w) */ + CUDNN_TENSOR_NHWC = 1, /* feature maps interleaved ( cStride = 1 )*/ + CUDNN_TENSOR_NCHW_VECT_C = 2, /* each image point is vector of element of C, vector length in data type */ +} cudnnTensorFormat_t; + +cudnnStatus_t CUDNNWINAPI +cudnnSetTensor4dDescriptor(cudnnTensorDescriptor_t tensorDesc, + cudnnTensorFormat_t format, + cudnnDataType_t dataType, /* image data type */ + int n, /* number of inputs (batch size) */ + int c, /* number of input feature maps */ + int h, /* height of input section */ + int w); /* width of input section */ + +cudnnStatus_t CUDNNWINAPI +cudnnSetTensor4dDescriptorEx(cudnnTensorDescriptor_t tensorDesc, + cudnnDataType_t dataType, /* image data type */ + int n, /* number of inputs (batch size) */ + int c, /* number of input feature maps */ + int h, /* height of input section */ + int w, /* width of input section */ + int nStride, + int cStride, + int hStride, + int wStride); + +cudnnStatus_t CUDNNWINAPI +cudnnGetTensor4dDescriptor(const cudnnTensorDescriptor_t tensorDesc, + cudnnDataType_t *dataType, /* image data type */ + int *n, /* number of inputs (batch size) */ + int *c, /* number of input feature maps */ + int *h, /* height of input section */ + int *w, /* width of input section */ + int *nStride, + int *cStride, + int *hStride, + int *wStride); + +cudnnStatus_t CUDNNWINAPI +cudnnSetTensorNdDescriptor(cudnnTensorDescriptor_t tensorDesc, + cudnnDataType_t dataType, + int nbDims, + const int dimA[], + const int strideA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnSetTensorNdDescriptorEx(cudnnTensorDescriptor_t tensorDesc, + cudnnTensorFormat_t format, + cudnnDataType_t dataType, + int nbDims, + const int dimA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetTensorNdDescriptor(const cudnnTensorDescriptor_t tensorDesc, + int nbDimsRequested, + cudnnDataType_t *dataType, + int *nbDims, + int dimA[], + int strideA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetTensorSizeInBytes(const cudnnTensorDescriptor_t tensorDesc, size_t *size); + +/* PixelOffset( n, c, h, w ) = n *input_stride + c * feature_stride + h * h_stride + w * w_stride + + 1)Example of all images in row major order one batch of features after the other (with an optional padding on row) + input_stride : c x h x h_stride + feature_stride : h x h_stride + h_stride : >= w ( h_stride = w if no padding) + w_stride : 1 + + + 2)Example of all images in row major with features maps interleaved + input_stride : c x h x h_stride + feature_stride : 1 + h_stride : w x c + w_stride : c + + 3)Example of all images in column major order one batch of features after the other (with optional padding on column) + input_stride : c x w x w_stride + feature_stride : w x w_stride + h_stride : 1 + w_stride : >= h + +*/ + +/* Destroy an instance of Tensor4d descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnDestroyTensorDescriptor(cudnnTensorDescriptor_t tensorDesc); + +/* Fold/unfold transforms */ +typedef enum { + CUDNN_TRANSFORM_FOLD = 0U, + CUDNN_TRANSFORM_UNFOLD = 1U, +} cudnnFoldingDirection_t; + +/** Create a destination descriptor for cudnnTransformTensor */ +cudnnStatus_t CUDNNWINAPI +cudnnInitTransformDest(const cudnnTensorTransformDescriptor_t transformDesc, + const cudnnTensorDescriptor_t srcDesc, + cudnnTensorDescriptor_t destDesc, + size_t *destSizeInBytes); + +/** Create an empty tensor transform descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateTensorTransformDescriptor(cudnnTensorTransformDescriptor_t *transformDesc); + +/** Initialize a previously created tensor transform descriptor. */ +cudnnStatus_t CUDNNWINAPI +cudnnSetTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc, + const uint32_t nbDims, + const cudnnTensorFormat_t destFormat, + const int32_t padBeforeA[], + const int32_t padAfterA[], + const uint32_t foldA[], + const cudnnFoldingDirection_t direction); + +/** + * Retrieves the values stored in a previously initialized tensor transform + * descriptor. + */ +cudnnStatus_t CUDNNWINAPI +cudnnGetTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc, + uint32_t nbDimsRequested, + cudnnTensorFormat_t *destFormat, + int32_t padBeforeA[], + int32_t padAfterA[], + uint32_t foldA[], + cudnnFoldingDirection_t *direction); + +/** + * Destroys a previously created tensor transform descriptor. + */ +cudnnStatus_t CUDNNWINAPI +cudnnDestroyTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc); + +/* Tensor layout conversion helper (y = alpha * x + beta * y) */ +cudnnStatus_t CUDNNWINAPI +cudnnTransformTensor(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +cudnnStatus_t CUDNNWINAPI +cudnnTransformTensorEx(cudnnHandle_t handle, + const cudnnTensorTransformDescriptor_t transDesc, + const void *alpha, + const cudnnTensorDescriptor_t srcDesc, + const void *srcData, + const void *beta, + const cudnnTensorDescriptor_t destDesc, + void *destData); + +/* Tensor Bias addition : C = alpha * A + beta * C */ +cudnnStatus_t CUDNNWINAPI +cudnnAddTensor(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t aDesc, + const void *A, + const void *beta, + const cudnnTensorDescriptor_t cDesc, + void *C); + +/* + * CUDNN OpTensor op type + */ +typedef enum { + CUDNN_OP_TENSOR_ADD = 0, + CUDNN_OP_TENSOR_MUL = 1, + CUDNN_OP_TENSOR_MIN = 2, + CUDNN_OP_TENSOR_MAX = 3, + CUDNN_OP_TENSOR_SQRT = 4, + CUDNN_OP_TENSOR_NOT = 5, +} cudnnOpTensorOp_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateOpTensorDescriptor(cudnnOpTensorDescriptor_t *opTensorDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetOpTensorDescriptor(cudnnOpTensorDescriptor_t opTensorDesc, + cudnnOpTensorOp_t opTensorOp, + cudnnDataType_t opTensorCompType, + cudnnNanPropagation_t opTensorNanOpt); + +cudnnStatus_t CUDNNWINAPI +cudnnGetOpTensorDescriptor(const cudnnOpTensorDescriptor_t opTensorDesc, + cudnnOpTensorOp_t *opTensorOp, + cudnnDataType_t *opTensorCompType, + cudnnNanPropagation_t *opTensorNanOpt); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyOpTensorDescriptor(cudnnOpTensorDescriptor_t opTensorDesc); + +/* Tensor operation : C = op( alpha1 * A, alpha2 * B ) + beta * C */ +/* B tensor is ignored for CUDNN_OP_TENSOR_SQRT, CUDNN_OP_TENSOR_NOT. */ +cudnnStatus_t CUDNNWINAPI +cudnnOpTensor(cudnnHandle_t handle, + const cudnnOpTensorDescriptor_t opTensorDesc, + const void *alpha1, + const cudnnTensorDescriptor_t aDesc, + const void *A, + const void *alpha2, + const cudnnTensorDescriptor_t bDesc, + const void *B, + const void *beta, + const cudnnTensorDescriptor_t cDesc, + void *C); + +/* + * CUDNN ReduceTensor op type + */ +typedef enum { + CUDNN_REDUCE_TENSOR_ADD = 0, + CUDNN_REDUCE_TENSOR_MUL = 1, + CUDNN_REDUCE_TENSOR_MIN = 2, + CUDNN_REDUCE_TENSOR_MAX = 3, + CUDNN_REDUCE_TENSOR_AMAX = 4, + CUDNN_REDUCE_TENSOR_AVG = 5, + CUDNN_REDUCE_TENSOR_NORM1 = 6, + CUDNN_REDUCE_TENSOR_NORM2 = 7, + CUDNN_REDUCE_TENSOR_MUL_NO_ZEROS = 8, +} cudnnReduceTensorOp_t; + +/* + * CUDNN ReduceTensor indices type + */ +typedef enum { + CUDNN_REDUCE_TENSOR_NO_INDICES = 0, + CUDNN_REDUCE_TENSOR_FLATTENED_INDICES = 1, +} cudnnReduceTensorIndices_t; + +/* + * CUDNN tensor indices type size (all unsigned) + * Currently not supported, default is 32 bit unsigned. + */ +typedef enum { + CUDNN_32BIT_INDICES = 0, + CUDNN_64BIT_INDICES = 1, + CUDNN_16BIT_INDICES = 2, + CUDNN_8BIT_INDICES = 3, +} cudnnIndicesType_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateReduceTensorDescriptor(cudnnReduceTensorDescriptor_t *reduceTensorDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetReduceTensorDescriptor(cudnnReduceTensorDescriptor_t reduceTensorDesc, + cudnnReduceTensorOp_t reduceTensorOp, + cudnnDataType_t reduceTensorCompType, + cudnnNanPropagation_t reduceTensorNanOpt, + cudnnReduceTensorIndices_t reduceTensorIndices, + cudnnIndicesType_t reduceTensorIndicesType); + +cudnnStatus_t CUDNNWINAPI +cudnnGetReduceTensorDescriptor(const cudnnReduceTensorDescriptor_t reduceTensorDesc, + cudnnReduceTensorOp_t *reduceTensorOp, + cudnnDataType_t *reduceTensorCompType, + cudnnNanPropagation_t *reduceTensorNanOpt, + cudnnReduceTensorIndices_t *reduceTensorIndices, + cudnnIndicesType_t *reduceTensorIndicesType); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyReduceTensorDescriptor(cudnnReduceTensorDescriptor_t reduceTensorDesc); + +/* Helper function to return the minimum size of the index space to be passed to the reduction given the input and + * output tensors */ +cudnnStatus_t CUDNNWINAPI +cudnnGetReductionIndicesSize(cudnnHandle_t handle, + const cudnnReduceTensorDescriptor_t reduceTensorDesc, + const cudnnTensorDescriptor_t aDesc, + const cudnnTensorDescriptor_t cDesc, + size_t *sizeInBytes); + +/* Helper function to return the minimum size of the workspace to be passed to the reduction given the input and output + * tensors */ +cudnnStatus_t CUDNNWINAPI +cudnnGetReductionWorkspaceSize(cudnnHandle_t handle, + const cudnnReduceTensorDescriptor_t reduceTensorDesc, + const cudnnTensorDescriptor_t aDesc, + const cudnnTensorDescriptor_t cDesc, + size_t *sizeInBytes); + +/* Tensor operation : C = reduce op( alpha * A ) + beta * C */ +/* The NaN propagation enum applies to only the min and max reduce ops; the other reduce ops propagate NaN as usual. */ +/* The indices space is ignored for reduce ops other than min or max. */ +cudnnStatus_t CUDNNWINAPI +cudnnReduceTensor(cudnnHandle_t handle, + const cudnnReduceTensorDescriptor_t reduceTensorDesc, + void *indices, + size_t indicesSizeInBytes, + void *workspace, + size_t workspaceSizeInBytes, + const void *alpha, + const cudnnTensorDescriptor_t aDesc, + const void *A, + const void *beta, + const cudnnTensorDescriptor_t cDesc, + void *C); + +/* Set all values of a tensor to a given value : y[i] = value[0] */ +cudnnStatus_t CUDNNWINAPI +cudnnSetTensor(cudnnHandle_t handle, const cudnnTensorDescriptor_t yDesc, void *y, const void *valuePtr); + +/* Scale all values of a tensor by a given factor : y[i] = alpha * y[i] */ +cudnnStatus_t CUDNNWINAPI +cudnnScaleTensor(cudnnHandle_t handle, const cudnnTensorDescriptor_t yDesc, void *y, const void *alpha); + +/* Create an instance of FilterStruct */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateFilterDescriptor(cudnnFilterDescriptor_t *filterDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetFilter4dDescriptor(cudnnFilterDescriptor_t filterDesc, + cudnnDataType_t dataType, /* image data type */ + cudnnTensorFormat_t format, + int k, /* number of output feature maps */ + int c, /* number of input feature maps */ + int h, /* height of each input filter */ + int w); /* width of each input filter */ + +cudnnStatus_t CUDNNWINAPI +cudnnGetFilter4dDescriptor(const cudnnFilterDescriptor_t filterDesc, + cudnnDataType_t *dataType, /* image data type */ + cudnnTensorFormat_t *format, + int *k, /* number of output feature maps */ + int *c, /* number of input feature maps */ + int *h, /* height of each input filter */ + int *w); /* width of each input filter */ + +cudnnStatus_t CUDNNWINAPI +cudnnSetFilterNdDescriptor(cudnnFilterDescriptor_t filterDesc, + cudnnDataType_t dataType, /* image data type */ + cudnnTensorFormat_t format, + int nbDims, + const int filterDimA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetFilterNdDescriptor(const cudnnFilterDescriptor_t filterDesc, + int nbDimsRequested, + cudnnDataType_t *dataType, /* image data type */ + cudnnTensorFormat_t *format, + int *nbDims, + int filterDimA[]); +cudnnStatus_t CUDNNWINAPI +cudnnGetFilterSizeInBytes(const cudnnFilterDescriptor_t filterDesc, size_t *size); + +cudnnStatus_t CUDNNWINAPI +cudnnTransformFilter(cudnnHandle_t handle, + const cudnnTensorTransformDescriptor_t transDesc, + const void *alpha, + const cudnnFilterDescriptor_t srcDesc, + const void *srcData, + const void *beta, + const cudnnFilterDescriptor_t destDesc, + void *destData); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyFilterDescriptor(cudnnFilterDescriptor_t filterDesc); + +/* + * softmax algorithm + */ +typedef enum { + CUDNN_SOFTMAX_FAST = 0, /* straightforward implementation */ + CUDNN_SOFTMAX_ACCURATE = 1, /* subtract max from every point to avoid overflow */ + CUDNN_SOFTMAX_LOG = 2 +} cudnnSoftmaxAlgorithm_t; + +typedef enum { + CUDNN_SOFTMAX_MODE_INSTANCE = 0, /* compute the softmax over all C, H, W for each N */ + CUDNN_SOFTMAX_MODE_CHANNEL = 1 /* compute the softmax over all C for each H, W, N */ +} cudnnSoftmaxMode_t; + +/* Softmax functions: All of the form "output = alpha * Op(inputs) + beta * output" */ + +/* Function to perform forward softmax */ +cudnnStatus_t CUDNNWINAPI +cudnnSoftmaxForward(cudnnHandle_t handle, + cudnnSoftmaxAlgorithm_t algo, + cudnnSoftmaxMode_t mode, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* + * pooling mode + */ +typedef enum { + CUDNN_POOLING_MAX = 0, + CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING = 1, /* count for average includes padded values */ + CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING = 2, /* count for average does not include padded values */ + CUDNN_POOLING_MAX_DETERMINISTIC = 3 +} cudnnPoolingMode_t; + +/* Create an instance of pooling descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnCreatePoolingDescriptor(cudnnPoolingDescriptor_t *poolingDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetPooling2dDescriptor(cudnnPoolingDescriptor_t poolingDesc, + cudnnPoolingMode_t mode, + cudnnNanPropagation_t maxpoolingNanOpt, + int windowHeight, + int windowWidth, + int verticalPadding, + int horizontalPadding, + int verticalStride, + int horizontalStride); + +cudnnStatus_t CUDNNWINAPI +cudnnGetPooling2dDescriptor(const cudnnPoolingDescriptor_t poolingDesc, + cudnnPoolingMode_t *mode, + cudnnNanPropagation_t *maxpoolingNanOpt, + int *windowHeight, + int *windowWidth, + int *verticalPadding, + int *horizontalPadding, + int *verticalStride, + int *horizontalStride); + +cudnnStatus_t CUDNNWINAPI +cudnnSetPoolingNdDescriptor(cudnnPoolingDescriptor_t poolingDesc, + const cudnnPoolingMode_t mode, + const cudnnNanPropagation_t maxpoolingNanOpt, + int nbDims, + const int windowDimA[], + const int paddingA[], + const int strideA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetPoolingNdDescriptor(const cudnnPoolingDescriptor_t poolingDesc, + int nbDimsRequested, + cudnnPoolingMode_t *mode, + cudnnNanPropagation_t *maxpoolingNanOpt, + int *nbDims, + int windowDimA[], + int paddingA[], + int strideA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetPoolingNdForwardOutputDim(const cudnnPoolingDescriptor_t poolingDesc, + const cudnnTensorDescriptor_t inputTensorDesc, + int nbDims, + int outputTensorDimA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetPooling2dForwardOutputDim(const cudnnPoolingDescriptor_t poolingDesc, + const cudnnTensorDescriptor_t inputTensorDesc, + int *n, + int *c, + int *h, + int *w); + +/* Destroy an instance of pooling descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnDestroyPoolingDescriptor(cudnnPoolingDescriptor_t poolingDesc); + +/* Pooling functions: All of the form "output = alpha * Op(inputs) + beta * output" */ + +/* Function to perform forward pooling */ +cudnnStatus_t CUDNNWINAPI +cudnnPoolingForward(cudnnHandle_t handle, + const cudnnPoolingDescriptor_t poolingDesc, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* + * activation mode + */ +typedef enum { + CUDNN_ACTIVATION_SIGMOID = 0, + CUDNN_ACTIVATION_RELU = 1, + CUDNN_ACTIVATION_TANH = 2, + CUDNN_ACTIVATION_CLIPPED_RELU = 3, + CUDNN_ACTIVATION_ELU = 4, + CUDNN_ACTIVATION_IDENTITY = 5, + CUDNN_ACTIVATION_SWISH = 6 +} cudnnActivationMode_t; + +/* Activation functions: All of the form "output = alpha * Op(inputs) + beta * output" */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateActivationDescriptor(cudnnActivationDescriptor_t *activationDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetActivationDescriptor(cudnnActivationDescriptor_t activationDesc, + cudnnActivationMode_t mode, + cudnnNanPropagation_t reluNanOpt, + double coef); /* ceiling for clipped RELU, alpha for ELU */ + +cudnnStatus_t CUDNNWINAPI +cudnnGetActivationDescriptor(const cudnnActivationDescriptor_t activationDesc, + cudnnActivationMode_t *mode, + cudnnNanPropagation_t *reluNanOpt, + double *coef); /* ceiling for clipped RELU, alpha for ELU */ + +cudnnStatus_t CUDNNWINAPI +cudnnSetActivationDescriptorSwishBeta(cudnnActivationDescriptor_t activationDesc, double swish_beta); + +cudnnStatus_t CUDNNWINAPI +cudnnGetActivationDescriptorSwishBeta(cudnnActivationDescriptor_t activationDesc, double *swish_beta); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyActivationDescriptor(cudnnActivationDescriptor_t activationDesc); + +/* Function to perform forward activation */ +cudnnStatus_t CUDNNWINAPI +cudnnActivationForward(cudnnHandle_t handle, + cudnnActivationDescriptor_t activationDesc, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* + * Create an instance of LRN (Local Response Normalization) descriptor + * Uses lrnN=5, lrnAlpha=1e-4, lrnBeta=0.75, lrnK=2.0 as defaults from Krizhevsky'12 ImageNet paper + */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateLRNDescriptor(cudnnLRNDescriptor_t *normDesc); + +#define CUDNN_LRN_MIN_N 1 /* minimum allowed lrnN */ +#define CUDNN_LRN_MAX_N 16 /* maximum allowed lrnN */ +#define CUDNN_LRN_MIN_K 1e-5 /* minimum allowed lrnK */ +#define CUDNN_LRN_MIN_BETA 0.01 /* minimum allowed lrnBeta */ + +/* LRN layer mode */ +typedef enum { + CUDNN_LRN_CROSS_CHANNEL_DIM1 = 0, /* Normalize across tensor's dimA[1] dimension */ +} cudnnLRNMode_t; + +/* + * Uses a window [center-lookBehind, center+lookAhead], where + * lookBehind = floor( (lrnN-1)/2 ), lookAhead = lrnN-lookBehind-1. + * Values of double parameters cast to tensor data type. + */ +cudnnStatus_t CUDNNWINAPI +cudnnSetLRNDescriptor(cudnnLRNDescriptor_t normDesc, unsigned lrnN, double lrnAlpha, double lrnBeta, double lrnK); +/* + * Retrieve the settings currently stored in an LRN layer descriptor + * Any of the provided pointers can be NULL (no corresponding value will be returned) + */ +cudnnStatus_t CUDNNWINAPI +cudnnGetLRNDescriptor(cudnnLRNDescriptor_t normDesc, unsigned *lrnN, double *lrnAlpha, double *lrnBeta, double *lrnK); + +/* Destroy an instance of LRN descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnDestroyLRNDescriptor(cudnnLRNDescriptor_t lrnDesc); + +/* LRN functions: output = alpha * normalize(x) + beta * old_y */ + +/* LRN cross-channel forward computation. Double parameters cast to tensor data type */ +cudnnStatus_t CUDNNWINAPI +cudnnLRNCrossChannelForward(cudnnHandle_t handle, + cudnnLRNDescriptor_t normDesc, + cudnnLRNMode_t lrnMode, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +typedef enum { + CUDNN_DIVNORM_PRECOMPUTED_MEANS = 0, +} cudnnDivNormMode_t; + +/* LCN/divisive normalization functions: y = alpha * normalize(x) + beta * y */ +cudnnStatus_t CUDNNWINAPI +cudnnDivisiveNormalizationForward(cudnnHandle_t handle, + cudnnLRNDescriptor_t normDesc, + cudnnDivNormMode_t mode, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, /* same desc for means, temp, temp2 */ + const void *x, + const void *means, /* if NULL, means are assumed to be zero */ + void *temp, + void *temp2, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +typedef enum { + /* bnScale, bnBias tensor dims are 1xCxHxWx.. (one value per CHW...-slice, normalized over N slice) */ + CUDNN_BATCHNORM_PER_ACTIVATION = 0, + + /* bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors) */ + CUDNN_BATCHNORM_SPATIAL = 1, + + /* + * bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors). + * May be faster than CUDNN_BATCHNORM_SPATIAL but imposes some limits on the range of values + */ + CUDNN_BATCHNORM_SPATIAL_PERSISTENT = 2, +} cudnnBatchNormMode_t; + +#define CUDNN_BN_MIN_EPSILON 0.0 /* Minimum epsilon allowed to be used in the Batch Normalization formula */ + +/* + * Derives a tensor descriptor from layer data descriptor for BatchNormalization + * scale, invVariance, bnBias, bnScale tensors. Use this tensor desc for + * bnScaleBiasMeanVarDesc and bnScaleBiasDiffDesc in Batch Normalization forward and backward functions. + */ +cudnnStatus_t CUDNNWINAPI +cudnnDeriveBNTensorDescriptor(cudnnTensorDescriptor_t derivedBnDesc, + const cudnnTensorDescriptor_t xDesc, + cudnnBatchNormMode_t mode); + +typedef enum { + CUDNN_BATCHNORM_OPS_BN = 0, /* do batch normalization only */ + CUDNN_BATCHNORM_OPS_BN_ACTIVATION = 1, /* do batchNorm, then activation */ + CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION = 2, /* do batchNorm, then elemWiseAdd, then activation */ +} cudnnBatchNormOps_t; + +/* + * Performs Batch Normalization during Inference: + * y[i] = bnScale[k]*(x[i]-estimatedMean[k])/sqrt(epsilon+estimatedVariance[k]) + bnBias[k] + * with bnScale, bnBias, runningMean, runningInvVariance tensors indexed + * according to spatial or per-activation mode. Refer to cudnnBatchNormalizationForwardTraining + * above for notes on function arguments. + */ +cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationForwardInference(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + const cudnnTensorDescriptor_t xDesc, + const void *x, /* NxCxHxW */ + const cudnnTensorDescriptor_t yDesc, + void *y, /* NxCxHxW */ + const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc, + const void *bnScale, + const void *bnBias, + const void *estimatedMean, + const void *estimatedVariance, + double epsilon); + +typedef enum { + /* bnScale, bnBias tensor dims are 1xCxHxWx.. (one value per CHW...-slice, normalized over N slice) */ + CUDNN_NORM_PER_ACTIVATION = 0, + + /* bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors) */ + CUDNN_NORM_PER_CHANNEL = 1, +} cudnnNormMode_t; + +typedef enum { CUDNN_NORM_ALGO_STANDARD = 0, CUDNN_NORM_ALGO_PERSIST = 1 } cudnnNormAlgo_t; + +/* + * Derives a tensor descriptor from layer data descriptor for Normalization + * scale, invVariance, bnBias, bnScale tensors. Use this tensor desc for + * normScaleBiasMeanVarDesc and normScaleBiasDiffDesc in Normalization forward and backward functions. + */ +cudnnStatus_t CUDNNWINAPI +cudnnDeriveNormTensorDescriptor(cudnnTensorDescriptor_t derivedNormScaleBiasDesc, + cudnnTensorDescriptor_t derivedNormMeanVarDesc, + const cudnnTensorDescriptor_t xDesc, + cudnnNormMode_t mode, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +typedef enum { + CUDNN_NORM_OPS_NORM = 0, /* do normalization only */ + CUDNN_NORM_OPS_NORM_ACTIVATION = 1, /* do Norm, then activation */ + CUDNN_NORM_OPS_NORM_ADD_ACTIVATION = 2, /* do Norm, then elemWiseAdd, then activation */ +} cudnnNormOps_t; + +/* + * Performs Normalization during Inference: + * y[i] = normScale[k]*(x[i]-estimatedMean[k])/sqrt(epsilon+estimatedVariance[k]) + normBias[k] + * with normScale, normBias, runningMean, runningInvVariance tensors indexed + * according to per-channel or per-activation mode. Refer to cudnnNormalizationForwardTraining + * above for notes on function arguments. + */ +cudnnStatus_t CUDNNWINAPI +cudnnNormalizationForwardInference(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + const cudnnTensorDescriptor_t xDesc, + const void *x, /* NxCxHxW */ + const cudnnTensorDescriptor_t normScaleBiasDesc, + const void *normScale, + const void *normBias, + const cudnnTensorDescriptor_t normMeanVarDesc, + const void *estimatedMean, + const void *estimatedVariance, + const cudnnTensorDescriptor_t zDesc, + const void *z, + cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t yDesc, + void *y, /* NxCxHxW */ + double epsilon, + int groupCnt); /* Place hold for future work*/ + +/* APIs for spatial transformer network*/ +typedef enum { + CUDNN_SAMPLER_BILINEAR = 0, +} cudnnSamplerType_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateSpatialTransformerDescriptor(cudnnSpatialTransformerDescriptor_t *stDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetSpatialTransformerNdDescriptor(cudnnSpatialTransformerDescriptor_t stDesc, + cudnnSamplerType_t samplerType, + cudnnDataType_t dataType, + const int nbDims, + const int dimA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroySpatialTransformerDescriptor(cudnnSpatialTransformerDescriptor_t stDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSpatialTfGridGeneratorForward(cudnnHandle_t handle, + const cudnnSpatialTransformerDescriptor_t stDesc, + const void *theta, + void *grid); + +cudnnStatus_t CUDNNWINAPI +cudnnSpatialTfSamplerForward(cudnnHandle_t handle, + cudnnSpatialTransformerDescriptor_t stDesc, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *grid, + const void *beta, + cudnnTensorDescriptor_t yDesc, + void *y); + +typedef struct cudnnDropoutStruct *cudnnDropoutDescriptor_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateDropoutDescriptor(cudnnDropoutDescriptor_t *dropoutDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc); + +/*helper function to determine size of the states to be passed to cudnnSetDropoutDescriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnDropoutGetStatesSize(cudnnHandle_t handle, size_t *sizeInBytes); + +/*helper function to determine size of the reserve space to be passed to dropout forward/backward calls */ +cudnnStatus_t CUDNNWINAPI +cudnnDropoutGetReserveSpaceSize(cudnnTensorDescriptor_t xdesc, size_t *sizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnSetDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc, + cudnnHandle_t handle, + float dropout, + void *states, + size_t stateSizeInBytes, + unsigned long long seed); + +/* Restores the dropout descriptor to a previously saved-off state */ +cudnnStatus_t CUDNNWINAPI +cudnnRestoreDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc, + cudnnHandle_t handle, + float dropout, + void *states, + size_t stateSizeInBytes, + unsigned long long seed); + +cudnnStatus_t CUDNNWINAPI +cudnnGetDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc, + cudnnHandle_t handle, + float *dropout, + void **states, + unsigned long long *seed); + +cudnnStatus_t CUDNNWINAPI +cudnnDropoutForward(cudnnHandle_t handle, + const cudnnDropoutDescriptor_t dropoutDesc, + const cudnnTensorDescriptor_t xdesc, + const void *x, + const cudnnTensorDescriptor_t ydesc, + void *y, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +/* TODO: remove */ + +typedef struct cudnnAlgorithmStruct *cudnnAlgorithmDescriptor_t; +typedef struct cudnnAlgorithmPerformanceStruct *cudnnAlgorithmPerformance_t; + +/* TODO: move these enums out to the appropriate submodule */ +typedef enum { + CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM = 0, + CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM = 1, + CUDNN_CONVOLUTION_FWD_ALGO_GEMM = 2, + CUDNN_CONVOLUTION_FWD_ALGO_DIRECT = 3, + CUDNN_CONVOLUTION_FWD_ALGO_FFT = 4, + CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING = 5, + CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD = 6, + CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED = 7, + CUDNN_CONVOLUTION_FWD_ALGO_COUNT = 8 +} cudnnConvolutionFwdAlgo_t; + +typedef enum { + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0 = 0, /* non-deterministic */ + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1 = 1, + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT = 2, + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3 = 3, /* non-deterministic */ + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD = 4, /* not implemented */ + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED = 5, + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING = 6, + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_COUNT = 7 +} cudnnConvolutionBwdFilterAlgo_t; + +typedef enum { + CUDNN_CONVOLUTION_BWD_DATA_ALGO_0 = 0, /* non-deterministic */ + CUDNN_CONVOLUTION_BWD_DATA_ALGO_1 = 1, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT = 2, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING = 3, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD = 4, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED = 5, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_COUNT = 6 +} cudnnConvolutionBwdDataAlgo_t; + +typedef enum { + CUDNN_RNN_ALGO_STANDARD = 0, + CUDNN_RNN_ALGO_PERSIST_STATIC = 1, + CUDNN_RNN_ALGO_PERSIST_DYNAMIC = 2, + CUDNN_RNN_ALGO_PERSIST_STATIC_SMALL_H = 3, + CUDNN_RNN_ALGO_COUNT = 4, +} cudnnRNNAlgo_t; + +typedef enum { CUDNN_CTC_LOSS_ALGO_DETERMINISTIC = 0, CUDNN_CTC_LOSS_ALGO_NON_DETERMINISTIC = 1 } cudnnCTCLossAlgo_t; + +/* TODO: remove */ +typedef struct cudnnAlgorithmUnionStruct { + union Algorithm { + cudnnConvolutionFwdAlgo_t convFwdAlgo; + cudnnConvolutionBwdFilterAlgo_t convBwdFilterAlgo; + cudnnConvolutionBwdDataAlgo_t convBwdDataAlgo; + cudnnRNNAlgo_t RNNAlgo; + cudnnCTCLossAlgo_t CTCLossAlgo; + } algo; +} cudnnAlgorithm_t; + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateAlgorithmDescriptor(cudnnAlgorithmDescriptor_t *algoDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetAlgorithmDescriptor(cudnnAlgorithmDescriptor_t algoDesc, cudnnAlgorithm_t algorithm); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetAlgorithmDescriptor(const cudnnAlgorithmDescriptor_t algoDesc, cudnnAlgorithm_t *algorithm); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCopyAlgorithmDescriptor(const cudnnAlgorithmDescriptor_t src, cudnnAlgorithmDescriptor_t dest); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyAlgorithmDescriptor(cudnnAlgorithmDescriptor_t algoDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateAlgorithmPerformance(cudnnAlgorithmPerformance_t *algoPerf, int numberToCreate); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetAlgorithmPerformance(cudnnAlgorithmPerformance_t algoPerf, + cudnnAlgorithmDescriptor_t algoDesc, + cudnnStatus_t status, + float time, + size_t memory); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetAlgorithmPerformance(const cudnnAlgorithmPerformance_t algoPerf, + cudnnAlgorithmDescriptor_t *algoDesc, + cudnnStatus_t *status, + float *time, + size_t *memory); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyAlgorithmPerformance(cudnnAlgorithmPerformance_t *algoPerf, int numberToDestroy); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetAlgorithmSpaceSize(cudnnHandle_t handle, cudnnAlgorithmDescriptor_t algoDesc, size_t *algoSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSaveAlgorithm(cudnnHandle_t handle, + cudnnAlgorithmDescriptor_t algoDesc, + void *algoSpace, + size_t algoSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRestoreAlgorithm(cudnnHandle_t handle, + void *algoSpace, + size_t algoSpaceSizeInBytes, + cudnnAlgorithmDescriptor_t algoDesc); + +typedef enum { + CUDNN_SEV_FATAL = 0, + CUDNN_SEV_ERROR = 1, + CUDNN_SEV_WARNING = 2, + CUDNN_SEV_INFO = 3, +} cudnnSeverity_t; + +/* Message masks to be used with cudnnSetCallback() */ +#define CUDNN_SEV_ERROR_EN (1U << CUDNN_SEV_ERROR) +#define CUDNN_SEV_WARNING_EN (1U << CUDNN_SEV_WARNING) +#define CUDNN_SEV_INFO_EN (1U << CUDNN_SEV_INFO) + +/* struct containing useful informaiton for each API call */ +typedef struct cudnnDebugStruct { + unsigned cudnn_version; + cudnnStatus_t cudnnStatus; + unsigned time_sec; /* epoch time in seconds */ + unsigned time_usec; /* microseconds part of epoch time */ + unsigned time_delta; /* time since start in seconds */ + cudnnHandle_t handle; /* cudnn handle */ + cudaStream_t stream; /* cuda stream ID */ + unsigned long long pid; /* process ID */ + unsigned long long tid; /* thread ID */ + int cudaDeviceId; /* CUDA device ID */ + int reserved[15]; /* reserved for future use */ +} cudnnDebug_t; + +typedef void (*cudnnCallback_t)(cudnnSeverity_t sev, void *udata, const cudnnDebug_t *dbg, const char *msg); + +cudnnStatus_t CUDNNWINAPI +cudnnSetCallback(unsigned mask, void *udata, cudnnCallback_t fptr); + +cudnnStatus_t CUDNNWINAPI +cudnnGetCallback(unsigned *mask, void **udata, cudnnCallback_t *fptr); + +/* + * \brief Cross-library version checker. + * This function is implemented differently in each sub-library. Each sublib + * checks whether its own version matches that of its dependencies. + * \returns CUDNN_STATUS_SUCCESS if the version check passes, + * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent. + */ +cudnnStatus_t CUDNNWINAPI +cudnnOpsInferVersionCheck(void); + +#if defined(__cplusplus) +} +#endif + +#endif /* CUDNN_OPS_INFER_H_ */ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/__init__.py b/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec63500b14c35b9683ef85c71919c2df551b5467 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn.so.8 b/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn.so.8 new file mode 100644 index 0000000000000000000000000000000000000000..e93ea0e9530b4537f02974fb16bd40b003984e03 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn.so.8 differ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv_infer.so.8 b/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv_infer.so.8 new file mode 100644 index 0000000000000000000000000000000000000000..3160b08a47fd6250449d080b1fd05b41b3e947c0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv_infer.so.8 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50c02f961775d3aca2676369156435d964b5615719f8b44b49cd73edc21217f3 +size 125141304 diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv_train.so.8 b/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv_train.so.8 new file mode 100644 index 0000000000000000000000000000000000000000..f07a406d06ec5bb1e21ab8d2054c095139eb04e6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv_train.so.8 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c8180a1232b79bd9820f3d3bee72ae5d30daddd46db30922a76fa13ac7e6fdf +size 116248176 diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn_train.so.8 b/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn_train.so.8 new file mode 100644 index 0000000000000000000000000000000000000000..813bd94bcd33be0c73a1c40ae73ee3d19bf8170f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn_train.so.8 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0768f3ee319e217e961baa1b4084a7d52900f0881c0364c371308bb1f58a226 +size 132457288 diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_infer.so.8 b/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_infer.so.8 new file mode 100644 index 0000000000000000000000000000000000000000..dab143352b51ddc4970ed8b37139649454c66d85 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_infer.so.8 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e520be70d6ab22e5ac565049bdb7b1582483703b641c440a7c21e92e12715eae +size 90621456 diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_train.so.8 b/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_train.so.8 new file mode 100644 index 0000000000000000000000000000000000000000..b10618e3677ad5bd911f7bd355a419eaec168495 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_train.so.8 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c89d9c871d0f1d1b3d1037234a66ebb09f990e3d5324c5025d45835d7d82d462 +size 70922856 diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/nvjitlink/__init__.py b/env-llmeval/lib/python3.10/site-packages/nvidia/nvjitlink/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/nvjitlink/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nvidia/nvjitlink/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..050024f59d779dd3e37227c2a16d3be154513749 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nvidia/nvjitlink/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/nvjitlink/include/__init__.py b/env-llmeval/lib/python3.10/site-packages/nvidia/nvjitlink/include/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/nvjitlink/include/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nvidia/nvjitlink/include/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b27f4bcc9aa75d18d64fe7615d2fa1bee7356734 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nvidia/nvjitlink/include/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/nvjitlink/include/nvJitLink.h b/env-llmeval/lib/python3.10/site-packages/nvidia/nvjitlink/include/nvJitLink.h new file mode 100644 index 0000000000000000000000000000000000000000..bc4be77a6eea692dbd06d814d0c21c80d7eaeff2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/nvjitlink/include/nvJitLink.h @@ -0,0 +1,522 @@ +/* + * NVIDIA_COPYRIGHT_BEGIN + * + * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + * + * NVIDIA_COPYRIGHT_END + */ + +#ifndef nvJitLink_INCLUDED +#define nvJitLink_INCLUDED + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +/** + * + * \defgroup error Error codes + * + */ + +/** \ingroup error + * + * \brief The enumerated type nvJitLinkResult defines API call result codes. + * nvJitLink APIs return nvJitLinkResult codes to indicate the result. + */ + +typedef enum { + NVJITLINK_SUCCESS = 0, + NVJITLINK_ERROR_UNRECOGNIZED_OPTION, + NVJITLINK_ERROR_MISSING_ARCH, // -arch=sm_NN option not specified + NVJITLINK_ERROR_INVALID_INPUT, + NVJITLINK_ERROR_PTX_COMPILE, + NVJITLINK_ERROR_NVVM_COMPILE, + NVJITLINK_ERROR_INTERNAL, + NVJITLINK_ERROR_THREADPOOL, + NVJITLINK_ERROR_UNRECOGNIZED_INPUT, +#ifdef NEW_ERROR_CODES // These error codes will appear in a future CUDA release. + NVJITLINK_ERROR_NULL_INPUT, + NVJITLINK_ERROR_INCOMPATIBLE_OPTIONS, + NVJITLINK_ERROR_INCORRECT_INPUT_TYPE, + NVJITLINK_ERROR_ARCH_MISMATCH, + NVJITLINK_ERROR_OUTDATED_LIBRARY, + NVJITLINK_ERROR_MISSING_FATBIN +#endif +} nvJitLinkResult; + +#ifndef NEW_ERROR_CODES // To avoid breaking compatibility, we map them to existing error codes for now. +#define NVJITLINK_ERROR_NULL_INPUT NVJITLINK_ERROR_INVALID_INPUT +#define NVJITLINK_ERROR_INCOMPATIBLE_OPTIONS NVJITLINK_ERROR_INVALID_INPUT +#define NVJITLINK_ERROR_INCORRECT_INPUT_TYPE NVJITLINK_ERROR_INVALID_INPUT +#define NVJITLINK_ERROR_ARCH_MISMATCH NVJITLINK_ERROR_INTERNAL +#define NVJITLINK_ERROR_OUTDATED_LIBRARY NVJITLINK_ERROR_INTERNAL +#define NVJITLINK_ERROR_MISSING_FATBIN NVJITLINK_ERROR_INVALID_INPUT +#endif + +/** + * + * \defgroup linking Linking + * + */ + +/** \ingroup linking + * + * \brief The enumerated type nvJitLinkInputType defines the kind of inputs + * that can be passed to nvJitLinkAdd* APIs. + */ + +typedef enum { + NVJITLINK_INPUT_NONE = 0, // error + NVJITLINK_INPUT_CUBIN = 1, + NVJITLINK_INPUT_PTX, + NVJITLINK_INPUT_LTOIR, + NVJITLINK_INPUT_FATBIN, + NVJITLINK_INPUT_OBJECT, + NVJITLINK_INPUT_LIBRARY, + NVJITLINK_INPUT_ANY = 10 // will dynamically determine one of above types +} nvJitLinkInputType; + +/** + * \defgroup options Supported Link Options + * + * nvJitLink supports the link options below. + * Option names are prefixed with a single dash (\c -). + * Options that take a value have an assignment operator (\c =) + * followed by the option value, with no spaces, e.g. \c "-arch=sm_90". + * + * The supported options are: + * - \c -arch=sm_ \n + * Pass SM architecture value. See nvcc for valid values of . + * Can use compute_ value instead if only generating PTX. + * This is a required option. + * - \c -maxrregcount= \n + * Maximum register count. + * - \c -time \n + * Print timing information to InfoLog. + * - \c -verbose \n + * Print verbose messages to InfoLog. + * - \c -lto \n + * Do link time optimization. + * - \c -ptx \n + * Emit ptx after linking instead of cubin; only supported with \c -lto + * - \c -O \n + * Optimization level. Only 0 and 3 are accepted. + * - \c -g \n + * Generate debug information. + * - \c -lineinfo \n + * Generate line information. + * - \c -ftz= \n + * Flush to zero. + * - \c -prec-div= \n + * Precise divide. + * - \c -prec-sqrt= \n + * Precise square root. + * - \c -fma= \n + * Fast multiply add. + * - \c -kernels-used= \n + * Pass list of kernels that are used; any not in the list can be removed. + * This option can be specified multiple times. + * - \c -variables-used= \n + * Pass list of variables that are used; any not in the list can be removed. + * This option can be specified multiple times. + * - \c -optimize-unused-variables \n + * Normally device code optimization is limited by not knowing what the + * host code references. With this option it can assume that if a variable + * is not referenced in device code then it can be removed. + * - \c -Xptxas= \n + * Pass to ptxas. This option can be called multiple times. + * - \c -split-compile= \n + * Split compilation maximum thread count. Use 0 to use all available processors. + * Value of 1 disables split compilation (default). + * - \c -split-compile-extended= \n + * [Experimental] A more aggressive form of split compilation. + * Accepts a maximum thread count value. Use 0 to use all available processors. + * Value of 1 disables extended split compilation (default). + * - \c -jump-table-density= \n + * When doing LTO, specify the case density percentage in switch statements, + * and use it as a minimal threshold to determine whether jump table(brx.idx + * instruction) will be used to implement a switch statement. Default + * value is 101. The percentage ranges from 0 to 101 inclusively. + */ + +/** + * \ingroup linking + * \brief nvJitLinkHandle is the unit of linking, and an opaque handle for + * a program. + * + * To link inputs, an instance of nvJitLinkHandle must be created first with + * nvJitLinkCreate(). + */ + +typedef struct nvJitLink* nvJitLinkHandle; // opaque handle + +// For versioning we will have separate API version for each library version + +extern nvJitLinkResult __nvJitLinkCreate_12_4( + nvJitLinkHandle *handle, + uint32_t numOptions, + const char **options); +/** + * \ingroup linking + * \brief nvJitLinkCreate creates an instance of nvJitLinkHandle with the + * given input options, and sets the output parameter \p handle. + * + * \param [out] handle Address of nvJitLink handle. + * \param [in] numOptions Number of options passed. + * \param [in] options Array of size \p numOptions of option strings. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_UNRECOGNIZED_OPTION\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_MISSING_ARCH\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + * + * It supports options listed in \ref options. + * + * \see nvJitLinkDestroy + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkCreate( + nvJitLinkHandle *handle, + uint32_t numOptions, + const char **options) +{ + return __nvJitLinkCreate_12_4 (handle, numOptions, options); +} +#endif + +extern nvJitLinkResult __nvJitLinkDestroy_12_4 (nvJitLinkHandle *handle); +/** + * \ingroup linking + * \brief nvJitLinkDestroy frees the memory associated with the given handle + * and sets it to NULL. + * + * \param [in] handle Address of nvJitLink handle. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + * + * \see nvJitLinkCreate + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkDestroy (nvJitLinkHandle *handle) +{ + return __nvJitLinkDestroy_12_4 (handle); +} +#endif + +extern nvJitLinkResult __nvJitLinkAddData_12_4( + nvJitLinkHandle handle, + nvJitLinkInputType inputType, + const void *data, + size_t size, + const char *name); // name can be null +/** + * \ingroup linking + * \brief nvJitLinkAddData adds data image to the link. + * + * \param [in] handle nvJitLink handle. + * \param [in] inputType kind of input. + * \param [in] data pointer to data image in memory. + * \param [in] size size of the data. + * \param [in] name name of input object. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkAddData( + nvJitLinkHandle handle, + nvJitLinkInputType inputType, + const void *data, + size_t size, + const char *name) // name can be null +{ + return __nvJitLinkAddData_12_4 (handle, inputType, data, size, name); +} +#endif + +extern nvJitLinkResult __nvJitLinkAddFile_12_4( + nvJitLinkHandle handle, + nvJitLinkInputType inputType, + const char *fileName); // includes path to file +/** + * \ingroup linking + * \brief nvJitLinkAddFile reads data from file and links it in. + * + * \param [in] handle nvJitLink handle. + * \param [in] inputType kind of input. + * \param [in] fileName name of file. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkAddFile( + nvJitLinkHandle handle, + nvJitLinkInputType inputType, + const char *fileName) // includes path to file +{ + return __nvJitLinkAddFile_12_4 (handle, inputType, fileName); +} +#endif + +extern nvJitLinkResult __nvJitLinkComplete_12_4 (nvJitLinkHandle handle); +/** + * \ingroup linking + * \brief nvJitLinkComplete does the actual link. + * + * \param [in] handle nvJitLink handle. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkComplete (nvJitLinkHandle handle) +{ + return __nvJitLinkComplete_12_4 (handle); +} +#endif + +extern nvJitLinkResult __nvJitLinkGetLinkedCubinSize_12_4( + nvJitLinkHandle handle, + size_t *size); +/** + * \ingroup linking + * \brief nvJitLinkGetLinkedCubinSize gets the size of the linked cubin. + * + * \param [in] handle nvJitLink handle. + * \param [out] size Size of the linked cubin. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + * + * \see nvJitLinkGetLinkedCubin + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkGetLinkedCubinSize( + nvJitLinkHandle handle, + size_t *size) +{ + return __nvJitLinkGetLinkedCubinSize_12_4 (handle, size); +} +#endif + +extern nvJitLinkResult __nvJitLinkGetLinkedCubin_12_4( + nvJitLinkHandle handle, + void *cubin); +/** + * \ingroup linking + * \brief nvJitLinkGetLinkedCubin gets the linked cubin. + * + * \param [in] handle nvJitLink handle. + * \param [out] cubin The linked cubin. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + * + * User is responsible for allocating enough space to hold the \p cubin. + * \see nvJitLinkGetLinkedCubinSize + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkGetLinkedCubin( + nvJitLinkHandle handle, + void *cubin) +{ + return __nvJitLinkGetLinkedCubin_12_4 (handle, cubin); +} +#endif + +extern nvJitLinkResult __nvJitLinkGetLinkedPtxSize_12_4( + nvJitLinkHandle handle, + size_t *size); +/** + * \ingroup linking + * \brief nvJitLinkGetLinkedPtxSize gets the size of the linked ptx. + * + * \param [in] handle nvJitLink handle. + * \param [out] size Size of the linked PTX. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + * + * Linked PTX is only available when using the \c -lto option. + * \see nvJitLinkGetLinkedPtx + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkGetLinkedPtxSize( + nvJitLinkHandle handle, + size_t *size) +{ + return __nvJitLinkGetLinkedPtxSize_12_4 (handle, size); +} +#endif + +extern nvJitLinkResult __nvJitLinkGetLinkedPtx_12_4( + nvJitLinkHandle handle, + char *ptx); +/** + * \ingroup linking + * \brief nvJitLinkGetLinkedPtx gets the linked ptx. + * + * \param [in] handle nvJitLink handle. + * \param [out] ptx The linked PTX. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + * + * Linked PTX is only available when using the \c -lto option. + * User is responsible for allocating enough space to hold the \p ptx. + * \see nvJitLinkGetLinkedPtxSize + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkGetLinkedPtx( + nvJitLinkHandle handle, + char *ptx) +{ + return __nvJitLinkGetLinkedPtx_12_4 (handle, ptx); +} +#endif + +extern nvJitLinkResult __nvJitLinkGetErrorLogSize_12_4( + nvJitLinkHandle handle, + size_t *size); +/** + * \ingroup linking + * \brief nvJitLinkGetErrorLogSize gets the size of the error log. + * + * \param [in] handle nvJitLink handle. + * \param [out] size Size of the error log. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + * + * \see nvJitLinkGetErrorLog + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkGetErrorLogSize( + nvJitLinkHandle handle, + size_t *size) +{ + return __nvJitLinkGetErrorLogSize_12_4 (handle, size); +} +#endif + +extern nvJitLinkResult __nvJitLinkGetErrorLog_12_4( + nvJitLinkHandle handle, + char *log); +/** + * \ingroup linking + * \brief nvJitLinkGetErrorLog puts any error messages in the log. + * + * \param [in] handle nvJitLink handle. + * \param [out] log The error log. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + * + * User is responsible for allocating enough space to hold the \p log. + * \see nvJitLinkGetErrorLogSize + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkGetErrorLog( + nvJitLinkHandle handle, + char *log) +{ + return __nvJitLinkGetErrorLog_12_4 (handle, log); +} +#endif + +extern nvJitLinkResult __nvJitLinkGetInfoLogSize_12_4( + nvJitLinkHandle handle, + size_t *size); +/** + * \ingroup linking + * \brief nvJitLinkGetInfoLogSize gets the size of the info log. + * + * \param [in] handle nvJitLink handle. + * \param [out] size Size of the info log. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + * + * \see nvJitLinkGetInfoLog + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkGetInfoLogSize( + nvJitLinkHandle handle, + size_t *size) +{ + return __nvJitLinkGetInfoLogSize_12_4 (handle, size); +} +#endif + +extern nvJitLinkResult __nvJitLinkGetInfoLog_12_4( + nvJitLinkHandle handle, + char *log); +/** + * \ingroup linking + * \brief nvJitLinkGetInfoLog puts any info messages in the log. + * + * \param [in] handle nvJitLink handle. + * \param [out] log The info log. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + * + * User is responsible for allocating enough space to hold the \p log. + * \see nvJitLinkGetInfoLogSize + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkGetInfoLog( + nvJitLinkHandle handle, + char *log) +{ + return __nvJitLinkGetInfoLog_12_4 (handle, log); +} +#endif + +/** + * \ingroup linking + * \brief nvJitLinkVersion returns the current version of nvJitLink. + * + * \param [out] major The major version. + * \param [out] minor The minor version. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + * + */ +extern nvJitLinkResult nvJitLinkVersion( + unsigned int *major, + unsigned int *minor); + +#ifdef __cplusplus +} +#endif + +#endif // nvJitLink_INCLUDED + diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/nvjitlink/lib/__init__.py b/env-llmeval/lib/python3.10/site-packages/nvidia/nvjitlink/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/nvjitlink/lib/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nvidia/nvjitlink/lib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ff7fdf26ea4e209fccfe6153170f4c5af748c87 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nvidia/nvjitlink/lib/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia/nvjitlink/lib/libnvJitLink.so.12 b/env-llmeval/lib/python3.10/site-packages/nvidia/nvjitlink/lib/libnvJitLink.so.12 new file mode 100644 index 0000000000000000000000000000000000000000..de6328acdeecf7f3d1a27f667dd2a68296b65297 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia/nvjitlink/lib/libnvJitLink.so.12 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbd1488bbef82b8d64c2dd51a9b6aef3f6b0bc3a7a80a9821be48d1c07f700e7 +size 53594512 diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/METADATA b/env-llmeval/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..9382bb3fea1153c53386720793963ac5e3f44f6a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/METADATA @@ -0,0 +1,35 @@ +Metadata-Version: 2.1 +Name: nvidia-cuda-cupti-cu12 +Version: 12.1.105 +Summary: CUDA profiling tools runtime libs. +Home-page: https://developer.nvidia.com/cuda-zone +Author: Nvidia CUDA Installer Team +Author-email: cuda_installer@nvidia.com +License: NVIDIA Proprietary Software +Keywords: cuda,nvidia,runtime,machine learning,deep learning +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: Other/Proprietary License +Classifier: Natural Language :: English +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Topic :: Software Development +Classifier: Topic :: Software Development :: Libraries +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX :: Linux +Requires-Python: >=3 +License-File: License.txt + +Provides libraries to enable third party tools using GPU profiling APIs. diff --git a/env-llmeval/lib/python3.10/site-packages/safetensors/_safetensors_rust.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/safetensors/_safetensors_rust.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..fc02fe41ceae6c1ad930a92a538eb9e43abad6fc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/safetensors/_safetensors_rust.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:637166b60ae09b7100ccffcef4d75afbf9ab9a57562984b3f282134405a30498 +size 5042744