diff --git a/.gitattributes b/.gitattributes index e84f34c52b2854aea415585933d7534780c1e0d6..4ba9194928d6f069f000712e40dc6a349e89456b 100644 --- a/.gitattributes +++ b/.gitattributes @@ -98,3 +98,4 @@ llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_target. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/scipy.libs/libopenblasp-r0-24bff013.3.26.dev.so filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.12.1 filter=lfs diff=lfs merge=lfs -text +llmeval-env/lib/python3.10/site-packages/nvidia/cusolver/lib/libcusolverMg.so.11 filter=lfs diff=lfs merge=lfs -text diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2444f0a4a86ba0735e9dfc9e45c6d35d986d0a78 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/__init__.py b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71562a5ad43654177335690f7a2402b4e3ba2257 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/builtin_types.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/builtin_types.h new file mode 100644 index 0000000000000000000000000000000000000000..5247c40807f0dd36a886513ab1bff5d2977364db --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/builtin_types.h @@ -0,0 +1,64 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "device_types.h" +#if !defined(__CUDACC_RTC__) +#define EXCLUDE_FROM_RTC +#include "driver_types.h" +#undef EXCLUDE_FROM_RTC +#endif /* !__CUDACC_RTC__ */ +#include "surface_types.h" +#include "texture_types.h" +#include "vector_types.h" diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/channel_descriptor.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/channel_descriptor.h new file mode 100644 index 0000000000000000000000000000000000000000..c6f039db8effce996015f901562009ebe976d832 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/channel_descriptor.h @@ -0,0 +1,588 @@ +/* + * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CHANNEL_DESCRIPTOR_H__) +#define __CHANNEL_DESCRIPTOR_H__ + +#if defined(__cplusplus) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +/** + * \addtogroup CUDART_HIGHLEVEL + * + * @{ + */ + +/** + * \brief \hl Returns a channel descriptor using the specified format + * + * Returns a channel descriptor with format \p f and number of bits of each + * component \p x, \p y, \p z, and \p w. The ::cudaChannelFormatDesc is + * defined as: + * \code + struct cudaChannelFormatDesc { + int x, y, z, w; + enum cudaChannelFormatKind f; + }; + * \endcode + * + * where ::cudaChannelFormatKind is one of ::cudaChannelFormatKindSigned, + * ::cudaChannelFormatKindUnsigned, cudaChannelFormatKindFloat, + * ::cudaChannelFormatKindSignedNormalized8X1, ::cudaChannelFormatKindSignedNormalized8X2, + * ::cudaChannelFormatKindSignedNormalized8X4, + * ::cudaChannelFormatKindUnsignedNormalized8X1, ::cudaChannelFormatKindUnsignedNormalized8X2, + * ::cudaChannelFormatKindUnsignedNormalized8X4, + * ::cudaChannelFormatKindSignedNormalized16X1, ::cudaChannelFormatKindSignedNormalized16X2, + * ::cudaChannelFormatKindSignedNormalized16X4, + * ::cudaChannelFormatKindUnsignedNormalized16X1, ::cudaChannelFormatKindUnsignedNormalized16X2, + * ::cudaChannelFormatKindUnsignedNormalized16X4 + * or ::cudaChannelFormatKindNV12. + * + * The format is specified by the template specialization. + * + * The template function specializes for the following scalar types: + * char, signed char, unsigned char, short, unsigned short, int, unsigned int, long, unsigned long, and float. + * The template function specializes for the following vector types: + * char{1|2|4}, uchar{1|2|4}, short{1|2|4}, ushort{1|2|4}, int{1|2|4}, uint{1|2|4}, long{1|2|4}, ulong{1|2|4}, float{1|2|4}. + * The template function specializes for following cudaChannelFormatKind enum values: + * ::cudaChannelFormatKind{Uns|S}ignedNormalized{8|16}X{1|2|4}, and ::cudaChannelFormatKindNV12. + * + * Invoking the function on a type without a specialization defaults to creating a channel format of kind ::cudaChannelFormatKindNone + * + * \return + * Channel descriptor with format \p f + * + * \sa \ref ::cudaCreateChannelDesc(int,int,int,int,cudaChannelFormatKind) "cudaCreateChannelDesc (Low level)", + * ::cudaGetChannelDesc, + */ +template __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(0, 0, 0, 0, cudaChannelFormatKindNone); +} + +static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf(void) +{ + int e = (int)sizeof(unsigned short) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat); +} + +static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf1(void) +{ + int e = (int)sizeof(unsigned short) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat); +} + +static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf2(void) +{ + int e = (int)sizeof(unsigned short) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindFloat); +} + +static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf4(void) +{ + int e = (int)sizeof(unsigned short) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindFloat); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(char) * 8; + +#if defined(_CHAR_UNSIGNED) || defined(__CHAR_UNSIGNED__) + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +#else /* _CHAR_UNSIGNED || __CHAR_UNSIGNED__ */ + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +#endif /* _CHAR_UNSIGNED || __CHAR_UNSIGNED__ */ +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(signed char) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned char) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(signed char) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned char) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(signed char) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned char) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(signed char) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned char) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(short) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned short) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(short) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned short) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(short) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned short) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(short) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned short) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(int) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned int) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(int) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned int) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(int) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned int) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(int) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned int) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned); +} + +#if !defined(__LP64__) + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(long) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned long) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(long) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned long) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(long) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned long) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(long) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(unsigned long) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned); +} + +#endif /* !__LP64__ */ + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(float) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(float) * 8; + + return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(float) * 8; + + return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindFloat); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + int e = (int)sizeof(float) * 8; + + return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindFloat); +} + +static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescNV12(void) +{ + int e = (int)sizeof(char) * 8; + + return cudaCreateChannelDesc(e, e, e, 0, cudaChannelFormatKindNV12); +} + +template __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(0, 0, 0, 0, cudaChannelFormatKindNone); +} + +/* Signed 8-bit normalized integer formats */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindSignedNormalized8X1); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindSignedNormalized8X2); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindSignedNormalized8X4); +} + +/* Unsigned 8-bit normalized integer formats */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsignedNormalized8X1); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindUnsignedNormalized8X2); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedNormalized8X4); +} + +/* Signed 16-bit normalized integer formats */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(16, 0, 0, 0, cudaChannelFormatKindSignedNormalized16X1); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(16, 16, 0, 0, cudaChannelFormatKindSignedNormalized16X2); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(16, 16, 16, 16, cudaChannelFormatKindSignedNormalized16X4); +} + +/* Unsigned 16-bit normalized integer formats */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(16, 0, 0, 0, cudaChannelFormatKindUnsignedNormalized16X1); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(16, 16, 0, 0, cudaChannelFormatKindUnsignedNormalized16X2); +} + +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(16, 16, 16, 16, cudaChannelFormatKindUnsignedNormalized16X4); +} + +/* NV12 format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 0, cudaChannelFormatKindNV12); +} + +/* BC1 format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed1); +} + +/* BC1sRGB format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed1SRGB); +} + +/* BC2 format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed2); +} + +/* BC2sRGB format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed2SRGB); +} + +/* BC3 format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed3); +} + +/* BC3sRGB format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed3SRGB); +} + +/* BC4 unsigned format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsignedBlockCompressed4); +} + +/* BC4 signed format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindSignedBlockCompressed4); +} + +/* BC5 unsigned format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindUnsignedBlockCompressed5); +} + +/* BC5 signed format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindSignedBlockCompressed5); +} + +/* BC6H unsigned format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(16, 16, 16, 0, cudaChannelFormatKindUnsignedBlockCompressed6H); +} + +/* BC6H signed format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(16, 16, 16, 0, cudaChannelFormatKindSignedBlockCompressed6H); +} + +/* BC7 format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed7); +} + +/* BC7sRGB format */ +template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void) +{ + return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed7SRGB); +} + +#endif /* __cplusplus */ + +/** @} */ +/** @} */ /* END CUDART_TEXTURE_HL */ + +#endif /* !__CHANNEL_DESCRIPTOR_H__ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/common_functions.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/common_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..5f8ea3d242640f2196b789c7da6c05d2ed1bed3e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/common_functions.h @@ -0,0 +1,65 @@ +/* + * Copyright 1993-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("common_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "common_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H_WRAPPER__ +#endif + +#include "crt/common_functions.h" + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H_WRAPPER__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H_WRAPPER__ +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/async.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/async.h new file mode 100644 index 0000000000000000000000000000000000000000..1b7dcb2433f2cb7d1ef61290995ac871a901b1e8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/async.h @@ -0,0 +1,452 @@ +/* Copyright 1993-2016 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CG_ASYNC_H +#define _CG_ASYNC_H + +#include "helpers.h" +#include "info.h" + +#include + +_CG_BEGIN_NAMESPACE + +namespace details { +// Groups supported by memcpy_async +template +struct _async_copy_group_supported : public _CG_STL_NAMESPACE::false_type {}; + +template +struct _async_copy_group_supported> + : public _CG_STL_NAMESPACE::true_type {}; +template <> +struct _async_copy_group_supported : public _CG_STL_NAMESPACE::true_type {}; +template <> +struct _async_copy_group_supported : public _CG_STL_NAMESPACE::true_type {}; + +template +using async_copy_group_supported = _async_copy_group_supported>; + +// Groups that require optimization +template +struct _async_copy_optimize_tile : public _CG_STL_NAMESPACE::false_type {}; + +template +struct _async_copy_optimize_tile> + : public _CG_STL_NAMESPACE::false_type {}; + +template +struct _async_copy_optimize_tile> + : public _CG_STL_NAMESPACE::true_type {}; + +template +using async_copy_optimize_tile = _async_copy_optimize_tile>; + +// SFINAE helpers for tile optimizations +template +using enable_tile_optimization = + typename _CG_STL_NAMESPACE::enable_if::value, void *>::type; + +template +using disable_tile_optimization = + typename _CG_STL_NAMESPACE::enable_if::value, void *>::type; + +// Segment for punning to aligned types +template +struct _Segment { + int _seg[N]; +}; + +// Trivial layout guaranteed-aligned copy-async compatible segments +template +struct Segment; +template <> +struct __align__(4) Segment<1> : public _Segment<1>{}; +template <> +struct __align__(8) Segment<2> : public _Segment<2>{}; +template <> +struct __align__(16) Segment<4> : public _Segment<4>{}; + +// Interleaved element by element copies from source to dest +template +_CG_STATIC_QUALIFIER void inline_copy(TyGroup &group, TyElem *__restrict__ dst, const TyElem *__restrict__ src, + size_t count) { + const unsigned int rank = group.thread_rank(); + const unsigned int stride = group.size(); + + for (size_t idx = rank; idx < count; idx += stride) { + dst[idx] = src[idx]; + } +} + +template = nullptr> +_CG_STATIC_QUALIFIER void accelerated_async_copy(TyGroup &group, TyElem *__restrict__ dst, + const TyElem *__restrict__ src, size_t count) { + static_assert(async_copy_group_supported::value, + "Async copy is only supported for groups that represent private shared memory"); + + if (count == 0) { + return; + } + + const bool dstIsNotShared = !__isShared(dst); + const bool srcIsNotGlobal = !__isGlobal(src); + + if (dstIsNotShared || srcIsNotGlobal) { + inline_copy(group, dst, src, count); + return; + } + + const unsigned int stride = group.size(); + const unsigned int rank = group.thread_rank(); + // Efficient copies require warps to operate on the same amount of work at each step. + // remainders are handled in a separate stage to prevent branching + const unsigned int subWarpMask = (stride - 1); + const unsigned int subwarpCopies = (subWarpMask & (unsigned int)count); + const unsigned int maxSubwarpRank = min(rank, subwarpCopies - 1); + + const size_t warpCopies = (count & (~subWarpMask)); + + for (size_t idx = 0; idx < warpCopies; idx += stride) { + size_t _srcIdx = rank + idx; + size_t _dstIdx = rank + idx; + __pipeline_memcpy_async(dst + _dstIdx, src + _srcIdx, sizeof(TyElem)); + } + + if (subwarpCopies) { + size_t _srcIdx = warpCopies + maxSubwarpRank; + size_t _dstIdx = warpCopies + maxSubwarpRank; + __pipeline_memcpy_async(dst + _dstIdx, src + _srcIdx, sizeof(TyElem)); + } +} + +template = nullptr> +_CG_STATIC_QUALIFIER void accelerated_async_copy(TyGroup &group, TyElem *__restrict__ dst, + const TyElem *__restrict__ src, size_t count) { + static_assert(async_copy_group_supported::value, + "Async copy is only supported for groups that represent private shared memory"); + + const bool dstIsNotShared = !__isShared(dst); + const bool srcIsNotGlobal = !__isGlobal(src); + + if (dstIsNotShared || srcIsNotGlobal) { + inline_copy(group, dst, src, count); + return; + } + + unsigned int stride = group.size(); + unsigned int rank = group.thread_rank(); + + for (size_t idx = rank; idx < count; idx += stride) { + size_t _srcIdx = idx; + size_t _dstIdx = idx; + __pipeline_memcpy_async(dst + _dstIdx, src + _srcIdx, sizeof(TyElem)); + } +} + +// Determine best possible alignment given an input and initial conditions +// Attempts to generate as little code as possible, most likely should only be used with 1 and 2 byte alignments +template +_CG_STATIC_QUALIFIER uint32_t find_best_alignment(void *__restrict__ dst, const void *__restrict__ src) { + // Narrowing conversion intentional + uint32_t base1 = (uint32_t) reinterpret_cast(src); + uint32_t base2 = (uint32_t) reinterpret_cast(dst); + + uint32_t diff = ((base1) ^ (base2)) & (MaxAlignment - 1); + + // range [MaxAlignment, alignof(elem)], step: x >> 1 + // over range of possible alignments, choose best available out of range + uint32_t out = MaxAlignment; +#pragma unroll + for (uint32_t alignment = (MaxAlignment >> 1); alignment >= MinAlignment; alignment >>= 1) { + if (alignment & diff) + out = alignment; + } + + return out; +} + +// Determine best possible alignment given an input and initial conditions +// Attempts to generate as little code as possible, most likely should only be used with 1 and 2 byte alignments +template +_CG_STATIC_QUALIFIER void copy_like(const TyGroup &group, void *__restrict__ _dst, const void *__restrict__ _src, + size_t count) { + const char *src = reinterpret_cast(_src); + char *dst = reinterpret_cast(_dst); + + constexpr uint32_t targetAlignment = (uint32_t)alignof(TyType); + + uint32_t base = (uint32_t) reinterpret_cast(src); + uint32_t alignOffset = ((~base) + 1) & (targetAlignment - 1); + + inline_copy(group, dst, src, alignOffset); + count -= alignOffset; + src += alignOffset; + dst += alignOffset; + + // Copy using the best available alignment, async_copy expects n-datums, not bytes + size_t asyncCount = count / sizeof(TyType); + accelerated_async_copy(group, reinterpret_cast(dst), reinterpret_cast(src), asyncCount); + asyncCount *= sizeof(TyType); + + count -= asyncCount; + src += asyncCount; + dst += asyncCount; + inline_copy(group, dst, src, count); +} + +// We must determine alignment and manually align src/dst ourselves +template +struct _memcpy_async_align_dispatch { + template + _CG_STATIC_QUALIFIER void copy(TyGroup &group, void *__restrict__ dst, const void *__restrict__ src, size_t count) { + uint32_t alignment = find_best_alignment(dst, src); + + // Avoid copying the extra bytes if desired copy count is smaller + alignment = count < alignment ? AlignHint : alignment; + + switch (alignment) { + default: + case 1: + inline_copy(group, reinterpret_cast(dst), reinterpret_cast(src), count); + break; + case 2: + inline_copy(group, reinterpret_cast(dst), reinterpret_cast(src), count >> 1); + break; + case 4: + copy_like>(group, dst, src, count); + break; + case 8: + copy_like>(group, dst, src, count); + break; + case 16: + copy_like>(group, dst, src, count); + break; + } + } +}; + +// Specialization for 4 byte alignments +template <> +struct _memcpy_async_align_dispatch<4> { + template + _CG_STATIC_QUALIFIER void copy(TyGroup &group, void *__restrict__ _dst, const void *__restrict__ _src, + size_t count) { + const Segment<1> *src = reinterpret_cast *>(_src); + Segment<1> *dst = reinterpret_cast *>(_dst); + + // Dispatch straight to aligned LDGSTS calls + accelerated_async_copy(group, dst, src, count / sizeof(*dst)); + } +}; + +// Specialization for 8 byte alignments +template <> +struct _memcpy_async_align_dispatch<8> { + template + _CG_STATIC_QUALIFIER void copy(TyGroup &group, void *__restrict__ _dst, const void *__restrict__ _src, + size_t count) { + const Segment<2> *src = reinterpret_cast *>(_src); + Segment<2> *dst = reinterpret_cast *>(_dst); + + // Dispatch straight to aligned LDGSTS calls + accelerated_async_copy(group, dst, src, count / sizeof(*dst)); + } +}; + +// Alignments over 16 are truncated to 16 and bypass alignment +// This is the highest performing memcpy available +template <> +struct _memcpy_async_align_dispatch<16> { + template + _CG_STATIC_QUALIFIER void copy(TyGroup &group, void *__restrict__ _dst, const void *__restrict__ _src, + size_t count) { + const Segment<4> *src = reinterpret_cast *>(_src); + Segment<4> *dst = reinterpret_cast *>(_dst); + + // Dispatch straight to aligned LDGSTS calls + accelerated_async_copy(group, dst, src, count / sizeof(*dst)); + } +}; + +// byte-wide API +template +_CG_STATIC_QUALIFIER void _memcpy_async_dispatch_to_aligned_copy(const TyGroup &group, void *__restrict__ _dst, + const void *__restrict__ _src, size_t count) { + static_assert(!(Alignment & (Alignment - 1)), "Known static alignment dispatch must be a power of 2"); + details::_memcpy_async_align_dispatch::copy(group, _dst, _src, count); +} + +// Internal dispatch APIs +// These deduce the alignments and sizes necessary to invoke the underlying copy engine +template +using is_void = _CG_STL_NAMESPACE::is_same; + +template +using enable_if_not_void = typename _CG_STL_NAMESPACE::enable_if::value, void *>::type; + +template +using enable_if_void = typename _CG_STL_NAMESPACE::enable_if::value, void *>::type; + +template +using enable_if_integral = + typename _CG_STL_NAMESPACE::enable_if<_CG_STL_NAMESPACE::is_integral::value, void *>::type; + +// byte-wide API using aligned_sized_t +template typename Alignment, size_t Hint> +_CG_STATIC_QUALIFIER void _memcpy_async_bytes(const TyGroup &group, void *__restrict__ _dst, + const void *__restrict__ _src, const Alignment &count) { + constexpr size_t _align = (Hint > 16) ? 16 : Hint; + + details::_memcpy_async_dispatch_to_aligned_copy<_align>(group, _dst, _src, (size_t)count); +} + +// byte-wide API using type for aligment +template = nullptr, enable_if_integral = nullptr> +_CG_STATIC_QUALIFIER void _memcpy_async_bytes(const TyGroup &group, TyElem *__restrict__ _dst, + const TyElem *__restrict__ _src, const TySize& count) { + constexpr size_t _align = (Hint > 16) ? 16 : Hint; + + details::_memcpy_async_dispatch_to_aligned_copy<_align>(group, _dst, _src, count); +} + +// byte-wide API with full alignment deduction required +template = nullptr, + enable_if_integral = nullptr> +_CG_STATIC_QUALIFIER void _memcpy_async_bytes(const TyGroup &group, TyElem *__restrict__ _dst, + const TyElem *__restrict__ _src, const TySize& count) { + details::_memcpy_async_dispatch_to_aligned_copy<1>(group, _dst, _src, count); +} + +// 1d-datum API +template +_CG_STATIC_QUALIFIER void _memcpy_async_datum(const TyGroup &group, TyElem *__restrict__ dst, const size_t dstCount, + const TyElem *__restrict__ src, const size_t srcCount) { + constexpr unsigned int _align = Hint; + const size_t totalCount = min(dstCount, srcCount) * sizeof(TyElem); + + details::_memcpy_async_dispatch_to_aligned_copy<_align>(group, dst, src, totalCount); +} + +// 1d-datum API using aligned_size_t +template typename Alignment, size_t Hint> +_CG_STATIC_QUALIFIER void _memcpy_async_datum(const TyGroup &group, TyElem *__restrict__ dst, const Alignment &dstCount, + const TyElem *__restrict__ src, const Alignment &srcCount) { + constexpr unsigned int _align = Hint; + const size_t totalCount = min((size_t)dstCount, (size_t)srcCount) * sizeof(TyElem); + + details::_memcpy_async_dispatch_to_aligned_copy<_align>(group, dst, src, totalCount); +} + +} // namespace details + +/* + * Group submit batch of async-copy to cover contiguous 1D array + * and commit that batch to eventually wait for completion. + */ +template +_CG_STATIC_QUALIFIER void memcpy_async(const TyGroup &group, TyElem *__restrict__ _dst, const TyElem *__restrict__ _src, + const TySizeT &count) { + details::_memcpy_async_bytes(group, _dst, _src, count); + __pipeline_commit(); +} + +/* + * Group submit batch of async-copy to cover contiguous 1D array + * and commit that batch to eventually wait for completion. + * Object counts are in datum sized chunks, not bytes. + */ +template +_CG_STATIC_QUALIFIER void memcpy_async(const TyGroup &group, TyElem *__restrict__ dst, const DstLayout &dstLayout, + const TyElem *__restrict__ src, const SrcLayout &srcLayout) { + details::_memcpy_async_datum(group, dst, dstLayout, src, srcLayout); + __pipeline_commit(); +} + +/* Group wait for prior Nth stage of memcpy_async to complete. */ +template +_CG_STATIC_QUALIFIER void wait_prior(const TyGroup &group) { + __pipeline_wait_prior(Stage); + group.sync(); +} + +/* Group wait all previously submitted memcpy_async to complete. */ +template +_CG_STATIC_QUALIFIER void wait(const TyGroup &group) { + __pipeline_wait_prior(0); + group.sync(); +} + +/***************** CG APIs including pipeline are deprecated *****************/ + +/* Group submit batch of async-copy to cover of contiguous 1D array + to a pipeline and commit the batch*/ +template +_CG_DEPRECATED _CG_STATIC_QUALIFIER void memcpy_async(TyGroup &group, TyElem *dst, size_t dstCount, const TyElem *src, size_t srcCount, + nvcuda::experimental::pipeline &pipe) { + details::_memcpy_async_datum(group, dst, dstCount, src, srcCount); + pipe.commit(); +} + +/* Group wait for prior Nth stage of memcpy_async to complete. */ +template +_CG_DEPRECATED _CG_STATIC_QUALIFIER void wait_prior(TyGroup &group, nvcuda::experimental::pipeline &pipe) { + pipe.wait_prior(); + group.sync(); +} + +/* Group wait for stage-S of memcpy_async to complete. */ +template +_CG_DEPRECATED _CG_STATIC_QUALIFIER void wait(TyGroup &group, nvcuda::experimental::pipeline &pipe, size_t stage) { + pipe.wait(stage); + group.sync(); +} +_CG_END_NAMESPACE + +#endif // _CG_ASYNC_H diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_reduce.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_reduce.h new file mode 100644 index 0000000000000000000000000000000000000000..c3722fb5c22809027cee66ab05758e477e8ef2bf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_reduce.h @@ -0,0 +1,108 @@ + /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CG_COALESCED_REDUCE_H_ +#define _CG_COALESCED_REDUCE_H_ + +#include "info.h" +#include "helpers.h" +#include "cooperative_groups.h" +#include "partitioning.h" +#include "coalesced_scan.h" + +_CG_BEGIN_NAMESPACE + +namespace details { + +template +_CG_QUALIFIER auto coalesced_reduce_to_one(const coalesced_group& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) { + if (group.size() == 32) { + auto out = val; + for (int offset = group.size() >> 1; offset > 0; offset >>= 1) { + out = op(out, group.shfl_up(out, offset)); + } + return out; + } + else { + auto scan_result = + inclusive_scan_non_contiguous(group, _CG_STL_NAMESPACE::forward(val), _CG_STL_NAMESPACE::forward(op)); + return scan_result; + } +} + +template +_CG_QUALIFIER auto coalesced_reduce(const coalesced_group& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) { + auto out = coalesced_reduce_to_one(group, _CG_STL_NAMESPACE::forward(val), _CG_STL_NAMESPACE::forward(op)); + if (group.size() == 32) { + return group.shfl(out, 31); + } + else { + unsigned int group_mask = _coalesced_group_data_access::get_mask(group); + unsigned int last_thread_id = 31 - __clz(group_mask); + return details::tile::shuffle_dispatch::shfl( + _CG_STL_NAMESPACE::forward(out), group_mask, last_thread_id, 32); + } +} + +template +_CG_QUALIFIER auto coalesced_reduce(const __single_warp_thread_block_tile& group, + TyVal&& val, + TyOp&& op) -> decltype(op(val, val)) { + auto out = val; + for (int mask = TySize >> 1; mask > 0; mask >>= 1) { + out = op(out, group.shfl_xor(out, mask)); + } + + return out; +} + +} // details + +_CG_END_NAMESPACE + +#endif // _CG_COALESCED_REDUCE_H_ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_scan.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_scan.h new file mode 100644 index 0000000000000000000000000000000000000000..383f4bde059dd8daad7d1c56e99152ea7ee28a08 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_scan.h @@ -0,0 +1,174 @@ +/* Copyright 1993-2016 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CG_COALESCED_SCAN_H_ +#define _CG_COALESCED_SCAN_H_ + +#include "info.h" +#include "helpers.h" +#include "cooperative_groups.h" +#include "partitioning.h" +#include "functional.h" + +_CG_BEGIN_NAMESPACE + +namespace details { + +template +_CG_QUALIFIER auto inclusive_scan_contiguous(const TyGroup& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) { + auto out = val; + for (int mask = 1; mask < group.size(); mask <<= 1) { + auto tmp = group.shfl_up(out, mask); + if (mask <= group.thread_rank()) { + out = op(out, tmp); + } + } + + return out; +} + +template +_CG_QUALIFIER auto inclusive_scan_non_contiguous(const TyGroup& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) { + const unsigned int groupSize = group.size(); + auto out = val; + + const unsigned int mask = details::_coalesced_group_data_access::get_mask(group); + unsigned int lanemask = details::lanemask32_lt() & mask; + unsigned int srcLane = details::laneid(); + + const unsigned int base = __ffs(mask)-1; /* lane with rank == 0 */ + const unsigned int rank = __popc(lanemask); + + for (unsigned int i = 1, j = 1; i < groupSize; i <<= 1) { + if (i <= rank) { + srcLane -= j; + j = i; /* maximum possible lane */ + + unsigned int begLane = base + rank - i; /* minimum possible lane */ + + /* Next source lane is in the range [ begLane .. srcLane ] + * If begLane < srcLane then do a binary search. + */ + while (begLane < srcLane) { + const unsigned int halfLane = (begLane + srcLane) >> 1; + const unsigned int halfMask = lanemask >> halfLane; + const unsigned int d = __popc(halfMask); + if (d < i) { + srcLane = halfLane - 1; /* halfLane too large */ + } + else if ((i < d) || !(halfMask & 0x01)) { + begLane = halfLane + 1; /* halfLane too small */ + } + else { + begLane = srcLane = halfLane; /* happen to hit */ + } + } + } + + auto tmp = details::tile::shuffle_dispatch::shfl(out, mask, srcLane, 32); + if (i <= rank) { + out = op(out, tmp); + } + } + return out; +} + +template +_CG_QUALIFIER auto coalesced_inclusive_scan(const __single_warp_thread_block_tile& group, + TyVal&& val, + TyOp&& op) -> decltype(op(val, val)) { + return inclusive_scan_contiguous(group, _CG_STL_NAMESPACE::forward(val), _CG_STL_NAMESPACE::forward(op)); +} + +template +_CG_QUALIFIER auto coalesced_inclusive_scan(const coalesced_group& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) { + if (group.size() == 32) { + return inclusive_scan_contiguous(group, _CG_STL_NAMESPACE::forward(val), _CG_STL_NAMESPACE::forward(op)); + } + else { + return inclusive_scan_non_contiguous(group, _CG_STL_NAMESPACE::forward(val), _CG_STL_NAMESPACE::forward(op)); + } +} + +template +struct scan_choose_convertion; + +template<> +struct scan_choose_convertion { + template + _CG_STATIC_QUALIFIER details::remove_qual convert_inclusive_to_exclusive(const TyGroup& group, TyRes& result, TyVal&& val) { + return result - val; + } +}; + +template<> +struct scan_choose_convertion { + template + _CG_STATIC_QUALIFIER details::remove_qual convert_inclusive_to_exclusive(const TyGroup& group, TyRes& result, TyVal&& val) { + auto ret = group.shfl_up(result, 1); + if (group.thread_rank() == 0) { + return {}; + } + else { + return ret; + } + } +}; + +template +_CG_QUALIFIER auto convert_inclusive_to_exclusive(const TyGroup& group, TyRes& result, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) { + using conversion = scan_choose_convertion<_CG_STL_NAMESPACE::is_same, cooperative_groups::plus>>::value + && _CG_STL_NAMESPACE::is_integral>::value>; + return conversion::convert_inclusive_to_exclusive(group, result, _CG_STL_NAMESPACE::forward(val)); +} + +} // details + +_CG_END_NAMESPACE + +#endif // _CG_COALESCED_SCAN_H_ \ No newline at end of file diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/driver_abi.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/driver_abi.h new file mode 100644 index 0000000000000000000000000000000000000000..9c866fcf740beb709a106057d28e8a2a1ac37924 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/driver_abi.h @@ -0,0 +1,99 @@ + /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CG_DRIVER_API_H +#define _CG_DRIVER_API_H + +#include "info.h" + +_CG_BEGIN_NAMESPACE + +namespace details { + template + _CG_QUALIFIER unsigned int load_env_reg() { + // Abort by default + _CG_ABORT(); + return 0; + } + + template + _CG_QUALIFIER unsigned long long load_env_reg64() { + unsigned long long registerLo = load_env_reg(); + unsigned long long registerHi = load_env_reg(); + + return (registerHi << 32) | registerLo; + } + +// inline PTX for accessing registers requires an immediate for the special reg +# define LOAD_ENVREG(NUMBER) \ + template <> _CG_QUALIFIER unsigned int load_env_reg() { \ + unsigned int r; \ + asm ("mov.u32 %0, %%envreg" #NUMBER ";" : "=r"(r)); \ + return r; \ + } + + // Instantiate loaders for registers used + LOAD_ENVREG(0); + LOAD_ENVREG(1); + LOAD_ENVREG(2); +# undef LOAD_ENVREG + + struct grid_workspace { + unsigned int wsSize; + unsigned int barrier; + }; + + _CG_QUALIFIER grid_workspace* get_grid_workspace() { + unsigned long long gridWsAbiAddress = load_env_reg64<1, 2>(); + // Interpret the address from envreg 1 and 2 as the driver's grid workspace + return (reinterpret_cast(gridWsAbiAddress)); + } +} +_CG_END_NAMESPACE + +#endif // _CG_DRIVER_API_H diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/functional.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/functional.h new file mode 100644 index 0000000000000000000000000000000000000000..0f151fe2c270421ba56e22935e84c4bf93790eff --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/functional.h @@ -0,0 +1,212 @@ + /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CG_FUNCTIONAL_H +#define _CG_FUNCTIONAL_H + +#include "info.h" +#include "helpers.h" + +#ifdef _CG_CPP11_FEATURES +#ifdef _CG_USE_CUDA_STL +# include +#endif + +_CG_BEGIN_NAMESPACE + +namespace details { +#ifdef _CG_USE_CUDA_STL + using cuda::std::plus; + using cuda::std::bit_and; + using cuda::std::bit_xor; + using cuda::std::bit_or; +#else + template struct plus {__device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {return arg1 + arg2;}}; + template struct bit_and {__device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {return arg1 & arg2;}}; + template struct bit_xor {__device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {return arg1 ^ arg2;}}; + template struct bit_or {__device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {return arg1 | arg2;}}; +#endif // _CG_USE_PLATFORM_STL +} // details + +template +struct plus : public details::plus {}; + +template +struct less { + __device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const { + return (arg2 < arg1) ? arg2 : arg1; + } +}; + +template +struct greater { + __device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const { + return (arg1 < arg2) ? arg2 : arg1; + } +}; + +template +struct bit_and : public details::bit_and {}; + +template +struct bit_xor : public details::bit_xor {}; + +template +struct bit_or : public details::bit_or {}; + +#if defined(_CG_HAS_STL_ATOMICS) +namespace details { + template + using _atomic_is_type_supported = _CG_STL_NAMESPACE::integral_constant::value && (sizeof(Ty) == 4 || sizeof(Ty) == 8)>; + + template struct _atomic_op_supported : public _CG_STL_NAMESPACE::false_type {}; + template struct _atomic_op_supported> : public _atomic_is_type_supported {}; + template struct _atomic_op_supported> : public _atomic_is_type_supported {}; + template struct _atomic_op_supported> : public _atomic_is_type_supported {}; + template struct _atomic_op_supported> : public _atomic_is_type_supported {}; + template struct _atomic_op_supported> : public _atomic_is_type_supported {}; + template struct _atomic_op_supported> : public _atomic_is_type_supported {}; + + template + _CG_QUALIFIER remove_qual atomic_cas_fallback(TyAtomic&& atomic, TyVal&& val, TyOp&& op) { + auto old = atomic.load(cuda::std::memory_order_relaxed); + while(!atomic.compare_exchange_weak(old, op(old, val), cuda::std::memory_order_relaxed)); + return old; + } + + template + struct op_picker; + + template + struct op_picker> { + template + _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) { + return atomic.fetch_add(val, cuda::std::memory_order_relaxed); + } + }; + + template + struct op_picker> { + template + _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) { + return atomic.fetch_min(val, cuda::std::memory_order_relaxed); + } + }; + + template + struct op_picker> { + template + _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) { + return atomic.fetch_max(val, cuda::std::memory_order_relaxed); + } + }; + + template + struct op_picker> { + template + _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) { + return atomic.fetch_and(val, cuda::std::memory_order_relaxed); + } + }; + + template + struct op_picker> { + template + _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) { + return atomic.fetch_xor(val, cuda::std::memory_order_relaxed); + } + }; + + template + struct op_picker> { + template + _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) { + return atomic.fetch_or(val, cuda::std::memory_order_relaxed); + } + }; + + template + struct atomic_update_dispatch {}; + + template<> + struct atomic_update_dispatch { + template + _CG_STATIC_QUALIFIER remove_qual atomic_update(TyAtomic& atomic, TyVal&& val, TyOp&& op) { + return atomic_cas_fallback(atomic, _CG_STL_NAMESPACE::forward(val), _CG_STL_NAMESPACE::forward(op)); + } + }; + + template<> + struct atomic_update_dispatch { + template + _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val, TyOp&& op) { + using dispatch = op_picker>; + + return dispatch::atomic_update(atomic, val); + } + }; + + template + _CG_QUALIFIER remove_qual atomic_update(TyAtomic& atomic, TyVal&& val, TyOp&& op) { + using dispatch = atomic_update_dispatch<_atomic_op_supported>::value>; + + return dispatch::atomic_update(atomic, _CG_STL_NAMESPACE::forward(val), _CG_STL_NAMESPACE::forward(op)); + } + + template + _CG_QUALIFIER void atomic_store(TyAtomic& atomic, TyVal&& val) { + atomic.store(val, cuda::std::memory_order_relaxed); + } +} +#endif + +_CG_END_NAMESPACE + +#endif +#endif //_CG_FUNCTIONAL_H diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/helpers.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..5c76c2ee57feff78ee6189ffbcce2d728bf58edf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/helpers.h @@ -0,0 +1,634 @@ + /* Copyright 1993-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _COOPERATIVE_GROUPS_HELPERS_H_ +# define _COOPERATIVE_GROUPS_HELPERS_H_ + +#include "info.h" +#include "sync.h" + +_CG_BEGIN_NAMESPACE + +namespace details { +#ifdef _CG_CPP11_FEATURES + template struct _is_float_or_half : public _CG_STL_NAMESPACE::is_floating_point {}; +# ifdef _CG_HAS_FP16_COLLECTIVE + template <> struct _is_float_or_half<__half> : public _CG_STL_NAMESPACE::true_type {}; + template <> struct _is_float_or_half<__half2> : public _CG_STL_NAMESPACE::true_type {}; +# endif + template + using is_float_or_half = _is_float_or_half::type>; + + // Non-STL utility templates + template + using remove_qual = typename _CG_STL_NAMESPACE::remove_cv::type>::type; + + template + using is_op_type_same = _CG_STL_NAMESPACE::is_same, remove_qual + >; +#endif + + template + _CG_STATIC_QUALIFIER TyTrunc vec3_to_linear(dim3 index, dim3 nIndex) { + return ((TyTrunc)index.z * nIndex.y * nIndex.x) + + ((TyTrunc)index.y * nIndex.x) + + (TyTrunc)index.x; + } + + namespace cta { + + _CG_STATIC_QUALIFIER void sync() + { + __barrier_sync(0); + } + + _CG_STATIC_QUALIFIER unsigned int num_threads() + { + return static_cast(blockDim.x * blockDim.y * blockDim.z); + } + + _CG_STATIC_QUALIFIER unsigned int thread_rank() + { + return vec3_to_linear(threadIdx, blockDim); + } + + _CG_STATIC_QUALIFIER dim3 group_index() + { + return dim3(blockIdx.x, blockIdx.y, blockIdx.z); + } + + _CG_STATIC_QUALIFIER dim3 thread_index() + { + return dim3(threadIdx.x, threadIdx.y, threadIdx.z); + } + + _CG_STATIC_QUALIFIER dim3 dim_threads() + { + return dim3(blockDim.x, blockDim.y, blockDim.z); + } + + // Legacy aliases + _CG_STATIC_QUALIFIER unsigned int size() + { + return num_threads(); + } + + _CG_STATIC_QUALIFIER dim3 block_dim() + { + return dim_threads(); + } + + }; + + class _coalesced_group_data_access { + public: + // Retrieve mask of coalesced groups and tiles + template + _CG_STATIC_QUALIFIER unsigned int get_mask(const TyGroup &group) { + return group.get_mask(); + } + + template + _CG_STATIC_QUALIFIER TyGroup construct_from_mask(unsigned int mask) { + return TyGroup(mask); + } + + template + _CG_STATIC_QUALIFIER void modify_meta_group(TyGroup &group, unsigned int mgRank, unsigned int mgSize) { + group._data.coalesced.metaGroupRank = mgRank; + group._data.coalesced.metaGroupSize = mgSize; + } + }; + + namespace tile { + template + struct _tile_helpers{ + _CG_STATIC_CONST_DECL unsigned int tileCount = TileCount; + _CG_STATIC_CONST_DECL unsigned int tileMask = TileMask; + _CG_STATIC_CONST_DECL unsigned int laneMask = LaneMask; + _CG_STATIC_CONST_DECL unsigned int shiftCount = ShiftCount; + }; + + template struct tile_helpers; + template <> struct tile_helpers<32> : public _tile_helpers<1, 0xFFFFFFFF, 0x1F, 5> {}; + template <> struct tile_helpers<16> : public _tile_helpers<2, 0x0000FFFF, 0x0F, 4> {}; + template <> struct tile_helpers<8> : public _tile_helpers<4, 0x000000FF, 0x07, 3> {}; + template <> struct tile_helpers<4> : public _tile_helpers<8, 0x0000000F, 0x03, 2> {}; + template <> struct tile_helpers<2> : public _tile_helpers<16, 0x00000003, 0x01, 1> {}; + template <> struct tile_helpers<1> : public _tile_helpers<32, 0x00000001, 0x00, 0> {}; + +#ifdef _CG_CPP11_FEATURES + namespace shfl { + /*********************************************************************************** + * Recursively Sliced Shuffle + * Purpose: + * Slices an input type a number of times into integral types so that shuffles + * are well defined + * Expectations: + * This object *should not* be used from a reinterpret_cast pointer unless + * some alignment guarantees can be met. Use a memcpy to guarantee that loads + * from the integral types stored within are aligned and correct. + **********************************************************************************/ + template + struct recursive_sliced_shuffle_helper; + + template + struct recursive_sliced_shuffle_helper { + int val; + + template + _CG_QUALIFIER void invoke_shuffle(const TyFn &shfl) { + val = shfl(val); + } + }; + + template + struct recursive_sliced_shuffle_helper { + int val; + recursive_sliced_shuffle_helper next; + + template + _CG_QUALIFIER void invoke_shuffle(const TyFn &shfl) { + val = shfl(val); + next.invoke_shuffle(shfl); + } + }; + } + + struct _memory_shuffle { + template + _CG_STATIC_QUALIFIER TyElem _shfl_internal(TyElem elem, const TyShflFn& fn) { + static_assert(sizeof(TyElem) <= 32, "Cooperative groups collectives are limited to types smaller than 32B"); + return TyElem{}; + } + + template > + _CG_STATIC_QUALIFIER TyRet shfl(TyElem&& elem, unsigned int gMask, unsigned int srcRank, unsigned int threads) { + auto shfl = [=](int val) -> int { + return 0; + }; + + return _shfl_internal(_CG_STL_NAMESPACE::forward(elem), shfl); + } + + template > + _CG_STATIC_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int gMask, unsigned int delta, unsigned int threads) { + auto shfl = [=](int val) -> int { + return 0; + }; + + return _shfl_internal(_CG_STL_NAMESPACE::forward(elem), shfl); + } + + template > + _CG_STATIC_QUALIFIER TyRet shfl_up(TyElem&& elem, unsigned int gMask, unsigned int delta, unsigned int threads) { + auto shfl = [=](int val) -> int { + return 0; + }; + + return _shfl_internal(_CG_STL_NAMESPACE::forward(elem), shfl); + } + + template > + _CG_STATIC_QUALIFIER TyRet shfl_xor(TyElem&& elem, unsigned int gMask, unsigned int lMask, unsigned int threads) { + auto shfl = [=](int val) -> int { + return 0; + }; + + return _shfl_internal(_CG_STL_NAMESPACE::forward(elem), shfl); + } + }; + + /*********************************************************************************** + * Intrinsic Device Function Shuffle + * Purpose: + * Uses a shuffle helper that has characteristics best suited for moving + * elements between threads + * Expectations: + * Object given will be forced into an l-value type so that it can be used + * with a helper structure that reinterprets the data into intrinsic compatible + * types + * Notes: + * !! TyRet is required so that objects are returned by value and not as + * dangling references depending on the value category of the passed object + **********************************************************************************/ + struct _intrinsic_compat_shuffle { + template + using shfl_helper = shfl::recursive_sliced_shuffle_helper; + + template + _CG_STATIC_QUALIFIER TyElem _shfl_internal(TyElem elem, const TyShflFn& fn) { + static_assert(__is_trivially_copyable(TyElem), "Type is not compatible with device shuffle"); + shfl_helper helper; + memcpy(&helper, &elem, sizeof(TyElem)); + helper.invoke_shuffle(fn); + memcpy(&elem, &helper, sizeof(TyElem)); + return elem; + } + + template > + _CG_STATIC_QUALIFIER TyRet shfl(TyElem&& elem, unsigned int gMask, unsigned int srcRank, unsigned int threads) { + auto shfl = [=](int val) -> int { + return __shfl_sync(gMask, val, srcRank, threads); + }; + + return _shfl_internal(_CG_STL_NAMESPACE::forward(elem), shfl); + } + + template > + _CG_STATIC_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int gMask, unsigned int delta, unsigned int threads) { + auto shfl = [=](int val) -> int { + return __shfl_down_sync(gMask, val, delta, threads); + }; + + return _shfl_internal(_CG_STL_NAMESPACE::forward(elem), shfl); + } + + template > + _CG_STATIC_QUALIFIER TyRet shfl_up(TyElem&& elem, unsigned int gMask, unsigned int delta, unsigned int threads) { + auto shfl = [=](int val) -> int { + return __shfl_up_sync(gMask, val, delta, threads); + }; + + return _shfl_internal(_CG_STL_NAMESPACE::forward(elem), shfl); + } + + template > + _CG_STATIC_QUALIFIER TyRet shfl_xor(TyElem&& elem, unsigned int gMask, unsigned int lMask, unsigned int threads) { + auto shfl = [=](int val) -> int { + return __shfl_xor_sync(gMask, val, lMask, threads); + }; + + return _shfl_internal(_CG_STL_NAMESPACE::forward(elem), shfl); + } + }; + + struct _native_shuffle { + template + _CG_STATIC_QUALIFIER TyElem shfl( + TyElem elem, unsigned int gMask, unsigned int srcRank, unsigned int threads) { + return static_cast(__shfl_sync(gMask, elem, srcRank, threads)); + } + + template + _CG_STATIC_QUALIFIER TyElem shfl_down( + TyElem elem, unsigned int gMask, unsigned int delta, unsigned int threads) { + return static_cast(__shfl_down_sync(gMask, elem, delta, threads)); + } + + template + _CG_STATIC_QUALIFIER TyElem shfl_up( + TyElem elem, unsigned int gMask, unsigned int delta, unsigned int threads) { + return static_cast(__shfl_up_sync(gMask, elem, delta, threads)); + } + + template + _CG_STATIC_QUALIFIER TyElem shfl_xor( + TyElem elem, unsigned int gMask, unsigned int lMask, unsigned int threads) { + return static_cast(__shfl_xor_sync(gMask, elem, lMask, threads)); + } + }; + + // Almost all arithmetic types are supported by native shuffle + // Vector types are the exception + template + using use_native_shuffle = _CG_STL_NAMESPACE::integral_constant< + bool, + _CG_STL_NAMESPACE::is_integral< + remove_qual>::value || + details::is_float_or_half< + remove_qual>::value + >; + + constexpr unsigned long long _MemoryShuffleCutoff = 32; + + template ::value, + bool InMem = (sizeof(TyElem) > _MemoryShuffleCutoff)> + struct shuffle_dispatch; + + template + struct shuffle_dispatch : public _native_shuffle {}; + + template + struct shuffle_dispatch : public _intrinsic_compat_shuffle {}; + + template + struct shuffle_dispatch : public _memory_shuffle {}; + +#endif //_CG_CPP11_FEATURES + }; + + namespace multi_grid { + struct multi_grid_functions; + }; + + namespace grid { + _CG_STATIC_QUALIFIER void sync(unsigned int *bar) { + unsigned int expected = gridDim.x * gridDim.y * gridDim.z; + + details::sync_grids(expected, bar); + } + + _CG_STATIC_QUALIFIER unsigned long long num_blocks() + { + // grid.y * grid.z -> [max(65535) * max(65535)] fits within 4b, promote after multiplication + // grid.x * (grid.y * grid.z) -> [max(2^31-1) * max(65535 * 65535)] exceeds 4b, promote before multiplication + return (unsigned long long)gridDim.x * (gridDim.y * gridDim.z); + } + + _CG_STATIC_QUALIFIER unsigned long long num_threads() + { + return num_blocks() * cta::num_threads(); + } + + _CG_STATIC_QUALIFIER unsigned long long block_rank() + { + return vec3_to_linear(blockIdx, gridDim); + } + + _CG_STATIC_QUALIFIER unsigned long long thread_rank() + { + return block_rank() * cta::num_threads() + cta::thread_rank(); + } + + _CG_STATIC_QUALIFIER dim3 dim_blocks() + { + return dim3(gridDim.x, gridDim.y, gridDim.z); + } + + _CG_STATIC_QUALIFIER dim3 block_index() + { + return dim3(blockIdx.x, blockIdx.y, blockIdx.z); + } + +#if defined(_CG_HAS_CLUSTER_GROUP) + _CG_STATIC_QUALIFIER dim3 dim_clusters() { + return __clusterGridDimInClusters(); + } + + _CG_STATIC_QUALIFIER unsigned long long num_clusters() { + const dim3 dimClusters = dim_clusters(); + return dimClusters.x * dimClusters.y * dimClusters.z; + } + + _CG_STATIC_QUALIFIER dim3 cluster_index() { + return __clusterIdx(); + } + + _CG_STATIC_QUALIFIER unsigned long long cluster_rank() { + return vec3_to_linear(cluster_index(), dim_clusters()); + } +#endif + + // Legacy aliases + _CG_STATIC_QUALIFIER unsigned long long size() + { + return num_threads(); + } + + _CG_STATIC_QUALIFIER dim3 grid_dim() + { + return dim_blocks(); + } + }; + + +#if defined(_CG_HAS_MULTI_GRID_GROUP) + + namespace multi_grid { + _CG_STATIC_QUALIFIER unsigned long long get_intrinsic_handle() + { + return (cudaCGGetIntrinsicHandle(cudaCGScopeMultiGrid)); + } + + _CG_STATIC_QUALIFIER void sync(const unsigned long long handle) + { + cudaError_t err = cudaCGSynchronize(handle, 0); + } + + _CG_STATIC_QUALIFIER unsigned int size(const unsigned long long handle) + { + unsigned int numThreads = 0; + cudaCGGetSize(&numThreads, NULL, handle); + return numThreads; + } + + _CG_STATIC_QUALIFIER unsigned int thread_rank(const unsigned long long handle) + { + unsigned int threadRank = 0; + cudaCGGetRank(&threadRank, NULL, handle); + return threadRank; + } + + _CG_STATIC_QUALIFIER unsigned int grid_rank(const unsigned long long handle) + { + unsigned int gridRank = 0; + cudaCGGetRank(NULL, &gridRank, handle); + return gridRank; + } + + _CG_STATIC_QUALIFIER unsigned int num_grids(const unsigned long long handle) + { + unsigned int numGrids = 0; + cudaCGGetSize(NULL, &numGrids, handle); + return numGrids; + } + +# ifdef _CG_CPP11_FEATURES + struct multi_grid_functions { + decltype(multi_grid::get_intrinsic_handle) *get_intrinsic_handle; + decltype(multi_grid::sync) *sync; + decltype(multi_grid::size) *size; + decltype(multi_grid::thread_rank) *thread_rank; + decltype(multi_grid::grid_rank) *grid_rank; + decltype(multi_grid::num_grids) *num_grids; + }; + + template + _CG_STATIC_QUALIFIER const multi_grid_functions* load_grid_intrinsics() { + __constant__ static const multi_grid_functions mgf { + &multi_grid::get_intrinsic_handle, + &multi_grid::sync, + &multi_grid::size, + &multi_grid::thread_rank, + &multi_grid::grid_rank, + &multi_grid::num_grids + }; + + return &mgf; + } +# endif + }; +#endif + +#if defined(_CG_HAS_CLUSTER_GROUP) + namespace cluster { + + _CG_STATIC_QUALIFIER bool isReal() + { + return __clusterDimIsSpecified(); + } + + _CG_STATIC_QUALIFIER void barrier_arrive() + { + __cluster_barrier_arrive(); + } + + _CG_STATIC_QUALIFIER void barrier_wait() + { + __cluster_barrier_wait(); + } + + _CG_STATIC_QUALIFIER void sync() + { + barrier_arrive(); + barrier_wait(); + } + + _CG_STATIC_QUALIFIER unsigned int query_shared_rank(const void *addr) + { + return __cluster_query_shared_rank(addr); + } + + template + _CG_STATIC_QUALIFIER T* map_shared_rank(T *addr, int rank) + { + return static_cast(__cluster_map_shared_rank(addr, rank)); + } + + _CG_STATIC_QUALIFIER dim3 block_index() + { + return __clusterRelativeBlockIdx(); + } + + _CG_STATIC_QUALIFIER unsigned int block_rank() + { + return __clusterRelativeBlockRank(); + } + + _CG_STATIC_QUALIFIER unsigned int thread_rank() + { + return block_rank() * cta::num_threads() + cta::thread_rank(); + } + + _CG_STATIC_QUALIFIER dim3 dim_blocks() + { + return __clusterDim(); + } + + _CG_STATIC_QUALIFIER unsigned int num_blocks() + { + return __clusterSizeInBlocks(); + } + + _CG_STATIC_QUALIFIER dim3 dim_threads() + { + const dim3 dimBlocks = dim_blocks(); + const unsigned int x = dimBlocks.x * blockDim.x; + const unsigned int y = dimBlocks.y * blockDim.y; + const unsigned int z = dimBlocks.z * blockDim.z; + return dim3(x, y, z); + } + + _CG_STATIC_QUALIFIER unsigned int num_threads() + { + return num_blocks() * cta::num_threads(); + } + + }; +#endif + + _CG_STATIC_QUALIFIER unsigned int laneid() + { + unsigned int laneid; + asm ("mov.u32 %0, %%laneid;" : "=r"(laneid)); + return laneid; + } + + _CG_STATIC_QUALIFIER unsigned int lanemask32_eq() + { + unsigned int lanemask32_eq; + asm ("mov.u32 %0, %%lanemask_eq;" : "=r"(lanemask32_eq)); + return (lanemask32_eq); + } + + _CG_STATIC_QUALIFIER unsigned int lanemask32_lt() + { + unsigned int lanemask32_lt; + asm ("mov.u32 %0, %%lanemask_lt;" : "=r"(lanemask32_lt)); + return (lanemask32_lt); + } + + _CG_STATIC_QUALIFIER void abort() + { + _CG_ABORT(); + } + + template + _CG_QUALIFIER void assert_if_not_arithmetic() { +#ifdef _CG_CPP11_FEATURES + static_assert( + _CG_STL_NAMESPACE::is_integral::value || + details::is_float_or_half::value, + "Error: Ty is neither integer or float" + ); +#endif //_CG_CPP11_FEATURES + } + +#ifdef _CG_CPP11_FEATURES + _CG_STATIC_QUALIFIER constexpr unsigned int log2(unsigned int x) { + return x == 1 ? 0 : 1 + log2(x / 2); + } +#endif //_CG_CPP11_FEATURES + +}; // !Namespace internal + +_CG_END_NAMESPACE + +#endif /* !_COOPERATIVE_GROUPS_HELPERS_H_ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/info.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/info.h new file mode 100644 index 0000000000000000000000000000000000000000..434f4334708af406045611dcbaeb4ea35a9f8e64 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/info.h @@ -0,0 +1,338 @@ + /* Copyright 1993-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + + + +#ifndef _CG_INFO_H_ +#define _CG_INFO_H_ +/* +** Define: _CG_VERSION +*/ +#define _CG_VERSION 1000 + +/* +** Define: _CG_ABI_VERSION +*/ +#ifndef _CG_ABI_VERSION +# define _CG_ABI_VERSION 1 +#endif + +/* +** Define: _CG_ABI_EXPERIMENTAL +** Desc: If enabled, sets all features enabled (ABI-breaking or experimental) +*/ +#if defined(_CG_ABI_EXPERIMENTAL) +#endif + +#define _CG_CONCAT_INNER(x, y) x ## y +#define _CG_CONCAT_OUTER(x, y) _CG_CONCAT_INNER(x, y) +#define _CG_NAMESPACE _CG_CONCAT_OUTER(__v, _CG_ABI_VERSION) + +#define _CG_BEGIN_NAMESPACE \ + namespace cooperative_groups { namespace _CG_NAMESPACE { +#define _CG_END_NAMESPACE \ + }; using namespace _CG_NAMESPACE; }; + +#if (defined(__cplusplus) && (__cplusplus >= 201103L)) || (defined(_MSC_VER) && (_MSC_VER >= 1900)) +# define _CG_CPP11_FEATURES +#endif + +#if !defined(_CG_QUALIFIER) +# define _CG_QUALIFIER __forceinline__ __device__ +#endif +#if !defined(_CG_STATIC_QUALIFIER) +# define _CG_STATIC_QUALIFIER static __forceinline__ __device__ +#endif +#if !defined(_CG_CONSTEXPR_QUALIFIER) +# if defined(_CG_CPP11_FEATURES) +# define _CG_CONSTEXPR_QUALIFIER constexpr __forceinline__ __device__ +# else +# define _CG_CONSTEXPR_QUALIFIER _CG_QUALIFIER +# endif +#endif +#if !defined(_CG_STATIC_CONSTEXPR_QUALIFIER) +# if defined(_CG_CPP11_FEATURES) +# define _CG_STATIC_CONSTEXPR_QUALIFIER static constexpr __forceinline__ __device__ +# else +# define _CG_STATIC_CONSTEXPR_QUALIFIER _CG_STATIC_QUALIFIER +# endif +#endif + +#if defined(_MSC_VER) +# define _CG_DEPRECATED __declspec(deprecated) +#else +# define _CG_DEPRECATED __attribute__((deprecated)) +#endif + +#if (__CUDA_ARCH__ >= 600) || !defined(__CUDA_ARCH__) +# define _CG_HAS_GRID_GROUP +#endif +#if (__CUDA_ARCH__ >= 600) || !defined(__CUDA_ARCH__) +# define _CG_HAS_MULTI_GRID_GROUP +#endif +#if (__CUDA_ARCH__ >= 700) || !defined(__CUDA_ARCH__) +# define _CG_HAS_MATCH_COLLECTIVE +#endif + +#if (__CUDA_ARCH__ >= 800) || !defined(__CUDA_ARCH__) && (defined(__NVCC__) || defined(__CUDACC_RTC__)) +# define _CG_HAS_OP_REDUX +#endif + +#if ((__CUDA_ARCH__ >= 800) || !defined(__CUDA_ARCH__)) && !defined(_CG_USER_PROVIDED_SHARED_MEMORY) +# define _CG_HAS_RESERVED_SHARED +#endif + +#if ((__CUDA_ARCH__ >= 900) || !defined(__CUDA_ARCH__)) && \ + (defined(__NVCC__) || defined(__CUDACC_RTC__) || defined(_CG_CLUSTER_INTRINSICS_AVAILABLE)) && \ + defined(_CG_CPP11_FEATURES) +# define _CG_HAS_CLUSTER_GROUP +#endif + +#if (__CUDA_ARCH__ >= 900) || !defined(__CUDA_ARCH__) +# define _CG_HAS_INSTR_ELECT +#endif + +// Has __half and __half2 +// Only usable if you include the cuda_fp16.h extension, and +// _before_ including cooperative_groups.h +#ifdef __CUDA_FP16_TYPES_EXIST__ +# define _CG_HAS_FP16_COLLECTIVE +#endif + +// Include libcu++ where supported. +#if defined(_CG_CPP11_FEATURES) && !defined(__QNX__) && !defined(__ibmxl__) && \ + (defined(__NVCC__) || defined(__CUDACC_RTC__)) && \ + (defined(__x86_64__) || defined(__aarch64__) || defined(__ppc64__)|| defined(_M_X64) || defined(_M_ARM64)) && \ + (defined(_MSC_VER) || defined(__GNUC__) || defined(__clang__)) +# define _CG_USE_CUDA_STL +#else +# define _CG_USE_OWN_TRAITS +#endif + +#if defined(_CG_USE_CUDA_STL) && (!defined(__CUDA_ARCH__) || \ + ((!defined(_MSC_VER) && __CUDA_ARCH__ >= 600) || (defined(_MSC_VER) && __CUDA_ARCH__ >= 700))) +# define _CG_HAS_STL_ATOMICS +#endif + +#ifdef _CG_CPP11_FEATURES +// Use cuda::std:: for type_traits +# if defined(_CG_USE_CUDA_STL) +# define _CG_STL_NAMESPACE cuda::std +# include +// Use CG's implementation of type traits +# else +# define _CG_STL_NAMESPACE cooperative_groups::details::templates +# endif +#endif + +#ifdef _CG_CPP11_FEATURES +# define _CG_STATIC_CONST_DECL static constexpr +# define _CG_CONST_DECL constexpr +#else +# define _CG_STATIC_CONST_DECL static const +# define _CG_CONST_DECL const +#endif + +#if (defined(_MSC_VER) && !defined(_WIN64)) || defined(__arm__) +# define _CG_ASM_PTR_CONSTRAINT "r" +#else +# define _CG_ASM_PTR_CONSTRAINT "l" +#endif + +/* +** Define: CG_DEBUG +** What: Enables various runtime safety checks +*/ +#if defined(__CUDACC_DEBUG__) && defined(CG_DEBUG) && !defined(NDEBUG) +# define _CG_DEBUG +#endif + +#if defined(_CG_DEBUG) +# include +# define _CG_ASSERT(x) assert((x)); +# define _CG_ABORT() assert(0); +#else +# define _CG_ASSERT(x) +# define _CG_ABORT() __trap(); +#endif + +_CG_BEGIN_NAMESPACE + +namespace details { + _CG_STATIC_CONST_DECL unsigned int default_max_block_size = 1024; + +#if defined(_CG_CPP11_FEATURES) && !defined(_CG_USE_CUDA_STL) +namespace templates { + +/** + * Integral constants + **/ +template +struct integral_constant { + static constexpr Ty value = Val; + typedef Ty type; + + _CG_QUALIFIER constexpr operator type() const noexcept { return value; } + _CG_QUALIFIER constexpr type operator()() const noexcept { return value; } +}; + +typedef integral_constant true_type; +typedef integral_constant false_type; + +/** + * CV Qualifiers + **/ +template struct is_lvalue_reference : public details::templates::false_type {}; +template struct is_lvalue_reference : public details::templates::true_type {}; + +template struct remove_reference {typedef Ty type;}; +template struct remove_reference {typedef Ty type;}; +template struct remove_reference {typedef Ty type;}; + +template +using remove_reference_t = typename details::templates::remove_reference::type; + +template struct remove_const {typedef Ty type;}; +template struct remove_const {typedef Ty type;}; + +template struct remove_volatile {typedef Ty type;}; +template struct remove_volatile {typedef Ty type;}; + +template struct remove_cv {typedef typename details::templates::remove_volatile::type>::type type;}; + +template +using remove_cv_t = typename details::templates::remove_cv::type; + +template +_CG_QUALIFIER Ty&& forward(remove_reference_t &t) noexcept { + return static_cast(t); +} + +template +_CG_QUALIFIER Ty&& forward(remove_reference_t &&t) noexcept { + static_assert(!details::templates::is_lvalue_reference::value, "Forwarding an rvalue as an lvalue is not allowed."); + return static_cast(t); +} + +/** + * is_integral + **/ +template struct _is_integral : public details::templates::false_type {}; +template <> struct _is_integral : public details::templates::true_type {}; +template <> struct _is_integral : public details::templates::true_type {}; +template <> struct _is_integral : public details::templates::true_type {}; +template <> struct _is_integral : public details::templates::true_type {}; +template <> struct _is_integral : public details::templates::true_type {}; +template <> struct _is_integral : public details::templates::true_type {}; +template <> struct _is_integral : public details::templates::true_type {}; +template <> struct _is_integral : public details::templates::true_type {}; +template <> struct _is_integral : public details::templates::true_type {}; +template <> struct _is_integral : public details::templates::true_type {}; +template <> struct _is_integral : public details::templates::true_type {}; +//Vector type support? + +template +struct is_integral : public details::templates::_is_integral::type> {}; + +/** + * is_floating_point + **/ +template struct _is_floating_point : public details::templates::false_type {}; +template <> struct _is_floating_point : public details::templates::true_type {}; +template <> struct _is_floating_point : public details::templates::true_type {}; +template <> struct _is_floating_point : public details::templates::true_type {}; +# ifdef __CUDA_FP16_TYPES_EXIST__ +template <> struct _is_floating_point<__half> : public details::templates::true_type {}; +template <> struct _is_floating_point<__half2> : public details::templates::true_type {}; +# endif +//Vector type support? + +template +struct is_floating_point : public details::templates::_is_floating_point::type> {}; + +template +struct is_arithmetic : details::templates::integral_constant< + bool, + details::templates::is_integral::value || + details::templates::is_floating_point::value> {}; + +template ::value> +struct _is_unsigned : details::templates::integral_constant {}; + +template +struct _is_unsigned : details::templates::false_type {}; + +template +struct is_unsigned : _is_unsigned::type> {}; + +/** + * programmatic type traits + **/ +template +struct enable_if {}; + +template +struct enable_if { typedef Ty type; }; + +template +using enable_if_t = typename details::templates::enable_if::type; + +template +struct is_same : details::templates::false_type {}; + +template +struct is_same : details::templates::true_type {}; + +} // templates +#endif // _CG_CPP11_FEATURES + +} // details +_CG_END_NAMESPACE + + +#endif // _CG_INFO_H_ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/invoke.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/invoke.h new file mode 100644 index 0000000000000000000000000000000000000000..f00314ce140e390be90a1ab3c328fd73d73c0d46 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/invoke.h @@ -0,0 +1,189 @@ +/* + * Copyright 1993-2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CG_INVOKE_H +#define _CG_INVOKE_H + +#include "info.h" +#include "helpers.h" + +#if defined(_CG_CPP11_FEATURES) + +_CG_BEGIN_NAMESPACE + +namespace details { + + template + struct _elect_group_supported : _CG_STL_NAMESPACE::false_type {}; +#ifdef _CG_HAS_INSTR_ELECT + template<> + struct _elect_group_supported : _CG_STL_NAMESPACE::true_type {}; + template + struct _elect_group_supported> : + _CG_STL_NAMESPACE::integral_constant {}; +#endif + + template + struct elect_group_supported : public _elect_group_supported> {}; + + template + _CG_STATIC_QUALIFIER bool elect_one(const Group& group, unsigned int mask, unsigned int& leader_lane) { + int is_leader = 0; +#ifdef _CG_HAS_INSTR_ELECT + asm("{\n\t" + " .reg .pred p;\n\t" + " elect.sync %0|p, %2;\n\t" + " @p mov.s32 %1, 1;\n\t" + "}" + : "+r"(leader_lane), "+r"(is_leader) : "r" (mask)); +#endif + return is_leader; + } + + template + struct invoke_one_impl {}; + + template<> + struct invoke_one_impl { + template + _CG_STATIC_QUALIFIER void invoke_one(const Group& group, Fn&& fn, Args&&... args) { + auto mask = details::_coalesced_group_data_access::get_mask(group); + unsigned int leader_lane = 0; + + if (elect_one(group, mask, leader_lane)) { + _CG_STL_NAMESPACE::forward(fn)(_CG_STL_NAMESPACE::forward(args)...); + } + } + + template + _CG_STATIC_QUALIFIER auto invoke_one_broadcast(const Group& group, Fn&& fn, Args&&... args) + -> typename _CG_STL_NAMESPACE::remove_reference< + decltype(_CG_STL_NAMESPACE::forward(fn)(_CG_STL_NAMESPACE::forward(args)...))>::type { + + using ResultType = decltype(_CG_STL_NAMESPACE::forward(fn)(_CG_STL_NAMESPACE::forward(args)...)); + details::remove_qual result; + auto mask = details::_coalesced_group_data_access::get_mask(group); + unsigned int leader_lane = 0; + + if (elect_one(group, mask, leader_lane)) { + result = _CG_STL_NAMESPACE::forward(fn)(_CG_STL_NAMESPACE::forward(args)...); + } + + // Need to use low level api instead of group.shfl, because elect_one returns lane id, not group rank. + return tile::shuffle_dispatch::shfl(result, mask, leader_lane, 32); + } + }; + + template<> + struct invoke_one_impl { + template + _CG_STATIC_QUALIFIER void invoke_one(const Group& group, Fn&& fn, Args&&... args) { + if (group.thread_rank() == 0) { + _CG_STL_NAMESPACE::forward(fn)(_CG_STL_NAMESPACE::forward(args)...); + } + } + + template + _CG_STATIC_QUALIFIER auto invoke_one_broadcast(const Group& group, Fn&& fn, Args&&... args) + -> typename _CG_STL_NAMESPACE::remove_reference< + decltype(_CG_STL_NAMESPACE::forward(fn)(_CG_STL_NAMESPACE::forward(args)...))>::type { + + using ResultType = decltype(_CG_STL_NAMESPACE::forward(fn)(_CG_STL_NAMESPACE::forward(args)...)); + details::remove_qual result; + + if (group.thread_rank() == 0) { + result = _CG_STL_NAMESPACE::forward(fn)(_CG_STL_NAMESPACE::forward(args)...); + } + + return group.shfl(result, 0); + } + }; + + +}; // namespace details + +template +_CG_QUALIFIER void invoke_one(const Group& group, Fn&& fn, Args&&... args) { + using impl = details::invoke_one_impl::value>; + impl::invoke_one(group, _CG_STL_NAMESPACE::forward(fn), _CG_STL_NAMESPACE::forward(args)...); +} + +template +_CG_QUALIFIER auto invoke_one_broadcast(const coalesced_group& group, Fn&& fn, Args&&... args) + -> typename _CG_STL_NAMESPACE::remove_reference< + decltype(_CG_STL_NAMESPACE::forward(fn)(_CG_STL_NAMESPACE::forward(args)...))>::type { + + using ResultType = decltype(_CG_STL_NAMESPACE::forward(fn)(_CG_STL_NAMESPACE::forward(args)...)); + static_assert(!_CG_STL_NAMESPACE::is_same::value, + "For invocables returning void invoke_one should be used instead"); + using impl = details::invoke_one_impl::value>; + return impl::invoke_one_broadcast(group, + _CG_STL_NAMESPACE::forward(fn), + _CG_STL_NAMESPACE::forward(args)...); +} + +template +_CG_QUALIFIER auto invoke_one_broadcast(const thread_block_tile& group, Fn&& fn, Args&&... args) + -> typename _CG_STL_NAMESPACE::remove_reference< + decltype(_CG_STL_NAMESPACE::forward(fn)(_CG_STL_NAMESPACE::forward(args)...))>::type { + + using ResultType = decltype(_CG_STL_NAMESPACE::forward(fn)(_CG_STL_NAMESPACE::forward(args)...)); + static_assert(!_CG_STL_NAMESPACE::is_same::value, + "For invocables returning void invoke_one should be used instead"); + using impl = details::invoke_one_impl>::value>; + return impl::invoke_one_broadcast(group, + _CG_STL_NAMESPACE::forward(fn), + _CG_STL_NAMESPACE::forward(args)...); +} + +_CG_END_NAMESPACE + +#endif //_CG_CPP11_FEATURES + +#endif // _CG_INVOKE_H diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/memory.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/memory.h new file mode 100644 index 0000000000000000000000000000000000000000..47cf260f3b4e0b29bf08c948697102bf027616db --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/memory.h @@ -0,0 +1,135 @@ +/* Copyright 1993-2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _COOPERATIVE_GROUPS_MEMORY_H_ +# define _COOPERATIVE_GROUPS_MEMORY_H_ + +#include "info.h" + +_CG_BEGIN_NAMESPACE + +#if defined(_CG_CPP11_FEATURES) +namespace details { + _CG_STATIC_CONST_DECL int scratch_num_reserved_bytes = 12; + +#if defined(_CG_HAS_RESERVED_SHARED) + _CG_STATIC_QUALIFIER void* reserved_shared_ptr() + { + void *ptr; + asm ("{\n\t" + " .reg .u32 start;\n\t" + " .reg .u64 extended;\n\t" + " mov.u32 start, %%reserved_smem_offset_1;\n\t" + " cvt.u64.u32 extended, start;\n\t" + " cvta.shared.u64 %0, extended;\n\t" + "}" + : "=" _CG_ASM_PTR_CONSTRAINT(ptr)); + return ptr; + } +#endif + + struct multi_warp_scratch { + // One barrier per possible size of the group. + _CG_STATIC_CONST_DECL unsigned int memory_barriers_count = 5; + _CG_STATIC_CONST_DECL size_t sync_memory_size = memory_barriers_count * sizeof(barrier_t); + + using communication_type = unsigned long long; + _CG_STATIC_CONST_DECL size_t communication_size = sizeof(communication_type); + + // Layout of the scratch space: + barrier_t barriers[memory_barriers_count]; + char reserved[scratch_num_reserved_bytes]; // Reserve 12 bytes for future use + communication_type communication_memory[default_max_block_size / 32]; + + _CG_STATIC_CONSTEXPR_QUALIFIER unsigned int scratch_size_needed(unsigned int max_block_size) { + // One slot of collectives memory per warp. + return scratch_num_reserved_bytes + sync_memory_size + max_block_size / 32 * communication_size; + } + + _CG_QUALIFIER void init_barriers(unsigned int thread_rank) { + if (thread_rank < memory_barriers_count) { + barriers[thread_rank] = 0; + } + } + }; + +#if defined(_CG_HAS_RESERVED_SHARED) + // CG can expect at least 288 bytes available in reserved shared + static_assert(sizeof(multi_warp_scratch) <= 288, "multi-warp scratch size is too large"); +#endif + + // Make sure the structure can fit into the user provided memory + static_assert(sizeof(multi_warp_scratch) <= multi_warp_scratch::scratch_size_needed(default_max_block_size), + "multi-warp scratch size is too large"); + + + _CG_QUALIFIER multi_warp_scratch* get_scratch_ptr(void* user_scratch) { + void *ptr; +#if defined(_CG_HAS_RESERVED_SHARED) + ptr = reserved_shared_ptr(); +#else + ptr = user_scratch; +#endif + return static_cast(ptr); + + } + +} + +template +struct __align__(details::multi_warp_scratch::communication_size) block_tile_memory { +private: +#if !defined(_CG_HAS_RESERVED_SHARED) + char scratch[details::multi_warp_scratch::scratch_size_needed(MaxBlockSize)]; +#endif +}; +#endif + +_CG_END_NAMESPACE + +#endif /* !_COOPERATIVE_GROUPS_MEMORY_H_ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/partitioning.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/partitioning.h new file mode 100644 index 0000000000000000000000000000000000000000..c38418657d149e9527f9a01ce5a9f18e0f2bec61 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/partitioning.h @@ -0,0 +1,133 @@ +/* + * Copyright 1993-2016 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CG_PARTITIONING_H +#define _CG_PARTITIONING_H + +#include "info.h" +#include "helpers.h" + +_CG_BEGIN_NAMESPACE + +namespace details { + + template + _CG_STATIC_QUALIFIER coalesced_group _binary_partition(const TyGroup &tile, bool pred) { + const unsigned int fullMask = ~0u; + + unsigned int thisMask = _coalesced_group_data_access::get_mask(tile); + unsigned int predMask = pred ? 0 : fullMask; + unsigned int setMask = __ballot_sync(thisMask, pred); + + if (setMask == thisMask || setMask == 0) { + coalesced_group subTile = _coalesced_group_data_access::construct_from_mask(thisMask); + _coalesced_group_data_access::modify_meta_group(subTile, 0, 1); + return subTile; + } + else { + unsigned int subMask = thisMask & (setMask ^ predMask); + coalesced_group subTile = _coalesced_group_data_access::construct_from_mask(subMask); + _coalesced_group_data_access::modify_meta_group(subTile, pred, 2); + return subTile; + } + } + +#ifdef _CG_HAS_MATCH_COLLECTIVE + template + _CG_STATIC_QUALIFIER coalesced_group _labeled_partition(const TyGroup &tile, TyPredicate pred) { + unsigned int thisMask = _coalesced_group_data_access::get_mask(tile); + unsigned int thisBias = __ffs(thisMask) - 1; // Subtract 1 to index properly from [1-32] + unsigned int subMask = __match_any_sync(thisMask, pred); + + coalesced_group subTile = _coalesced_group_data_access::construct_from_mask(subMask); + + int leaderLaneId = subTile.shfl(details::laneid(), 0); + + bool isLeader = !subTile.thread_rank(); + unsigned int leaderMask = __ballot_sync(thisMask, isLeader); + unsigned int tileRank = __fns(leaderMask, leaderLaneId, 0) - thisBias; + + _coalesced_group_data_access::modify_meta_group(subTile, tileRank, __popc(leaderMask)); + + return subTile; + } +#endif +}; // namespace details + +_CG_STATIC_QUALIFIER coalesced_group binary_partition(const coalesced_group &tile, bool pred) { + return details::_binary_partition(tile, pred); +} + +template +_CG_STATIC_QUALIFIER coalesced_group binary_partition(const thread_block_tile &tile, bool pred) { +#ifdef _CG_CPP11_FEATURES + static_assert(Size <= 32, "Binary partition is available only for tiles of size smaller or equal to 32"); +#endif + return details::_binary_partition(tile, pred); +} + + +#if defined(_CG_HAS_MATCH_COLLECTIVE) && defined(_CG_CPP11_FEATURES) +template +_CG_STATIC_QUALIFIER coalesced_group labeled_partition(const coalesced_group &tile, TyPredicate pred) { + static_assert(_CG_STL_NAMESPACE::is_integral::value, "labeled_partition predicate must be an integral type"); + return details::_labeled_partition(tile, pred); +} + +template +_CG_STATIC_QUALIFIER coalesced_group labeled_partition(const thread_block_tile &tile, TyPredicate pred) { + static_assert(_CG_STL_NAMESPACE::is_integral::value, "labeled_partition predicate must be an integral type"); + static_assert(Size <= 32, "Labeled partition is available only for tiles of size smaller or equal to 32"); + return details::_labeled_partition(tile, pred); +} +#endif + +_CG_END_NAMESPACE + +#endif // _CG_PARTITIONING_H diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/reduce.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/reduce.h new file mode 100644 index 0000000000000000000000000000000000000000..cdc4a9cddcee3838b257a62fefd7823f5ad70c22 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/reduce.h @@ -0,0 +1,429 @@ + /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CG_REDUCE_H_ +#define _CG_REDUCE_H_ + +#include "info.h" +#include "helpers.h" +#include "coalesced_reduce.h" +#include "functional.h" +#include "cooperative_groups.h" + +_CG_BEGIN_NAMESPACE + +namespace details { + + template + using _redux_is_add_supported = _CG_STL_NAMESPACE::integral_constant< + bool, + _CG_STL_NAMESPACE::is_integral::value && (sizeof(Ty) <= 4)>; + + template + using redux_is_add_supported = _redux_is_add_supported; + + // A specialization for 64 bit logical operations is possible + // but for now only accelerate 32 bit bitwise ops + template + using redux_is_logical_supported = redux_is_add_supported; + + // Base operator support case + template struct _redux_op_supported : public _CG_STL_NAMESPACE::false_type {}; +#ifdef _CG_HAS_OP_REDUX + template struct _redux_op_supported, Ty> : public redux_is_add_supported {}; + template struct _redux_op_supported, Ty> : public redux_is_add_supported {}; + template struct _redux_op_supported, Ty> : public redux_is_add_supported {}; + template struct _redux_op_supported, Ty> : public redux_is_logical_supported {}; + template struct _redux_op_supported, Ty> : public redux_is_logical_supported {}; + template struct _redux_op_supported, Ty> : public redux_is_logical_supported {}; +#endif + + template class TyOp> + using redux_op_supported = _redux_op_supported< + typename details::remove_qual>, + Ty>; + + // Groups smaller than 16 actually have worse performance characteristics when used with redux + // tiles of size 16 and 32 perform the same or better and have better code generation profiles + template struct _redux_group_optimized : public _CG_STL_NAMESPACE::false_type {}; + + template + struct _redux_group_optimized> : public _CG_STL_NAMESPACE::integral_constant< + bool, + (Sz >= 16)> {}; + template + struct _redux_group_optimized> : public _CG_STL_NAMESPACE::integral_constant< + bool, + (Sz >= 16)> {}; + template <> + struct _redux_group_optimized : public _CG_STL_NAMESPACE::true_type {}; + + template + using redux_group_optimized = _redux_group_optimized>; + + template